From aefc5ffb371e8d1cf0cc73f6cc733e5f5f7af8aa Mon Sep 17 00:00:00 2001 From: Kevin Reeuwijk Date: Wed, 6 Aug 2025 14:45:56 +0200 Subject: [PATCH 01/90] Disable hardcoded MaaS curtin scripts --- .../roles/providers/tasks/maas-ubuntu.yml | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml b/images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml index 7f67a0fa3c..2a2333f966 100644 --- a/images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml +++ b/images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml @@ -4,12 +4,13 @@ state: directory mode: "0775" -- name: Copy curtin scripts to /curtin - ansible.builtin.copy: - src: "files/maas/curtin/{{ item }}" - dest: "/curtin/{{ item }}" - mode: "0750" - loop: - - curtin-hooks - - install-custom-packages - - setup-bootloader +# Don't copy in hard-coded scripts as modern MaaS already has builtin tasks that are more flexible and powerful +# - name: Copy curtin scripts to /curtin +# ansible.builtin.copy: +# src: "files/maas/curtin/{{ item }}" +# dest: "/curtin/{{ item }}" +# mode: "0750" +# loop: +# - curtin-hooks +# - install-custom-packages +# - setup-bootloader From 1d8283ced133b3ed04e6a71a4640b0593ecc10d3 Mon Sep 17 00:00:00 2001 From: Kevin Reeuwijk Date: Sun, 10 Aug 2025 09:24:20 +0200 Subject: [PATCH 02/90] Add instructions for re-enabling custom curtin hooks Co-authored-by: Matt Boersma --- images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml b/images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml index 2a2333f966..33cbb8ea7f 100644 --- a/images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml +++ b/images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml @@ -4,7 +4,7 @@ state: directory mode: "0775" -# Don't copy in hard-coded scripts as modern MaaS already has builtin tasks that are more flexible and powerful +# Uncomment this ansible task and customize the curtin scripts to replace the builtin MaaS curtin hooks # - name: Copy curtin scripts to /curtin # ansible.builtin.copy: # src: "files/maas/curtin/{{ item }}" From 60e9d7e7f3b56182923323a8f4ddb5978a2bdc09 Mon Sep 17 00:00:00 2001 From: Joe Kratzat Date: Wed, 24 Sep 2025 15:17:43 -0400 Subject: [PATCH 03/90] Add access_cfg_file and access_cfg_file_account for token auth access_cfg_file defaults to `$HOME/.oci/config` access_cfg_file_account defaults to `DEFAULT` more details https://developer.hashicorp.com/packer/integrations/hashicorp/oracle/latest/components/builder/oci#authentication-parameters --- images/capi/packer/oci/packer.json | 2 ++ 1 file changed, 2 insertions(+) diff --git a/images/capi/packer/oci/packer.json b/images/capi/packer/oci/packer.json index 29fe45f88f..baf747691f 100644 --- a/images/capi/packer/oci/packer.json +++ b/images/capi/packer/oci/packer.json @@ -1,6 +1,8 @@ { "builders": [ { + "access_cfg_file": "{{user `access_cfg_file`}}", + "access_cfg_file_account": "{{user `access_cfg_file_account`}}", "availability_domain": "{{user `availability_domain`}}", "base_image_filter": { "operating_system": "{{user `operating_system`}}", From 226f6974c526b09a3a10b0653280c74e2104dccb Mon Sep 17 00:00:00 2001 From: Josh French Date: Mon, 20 Oct 2025 11:11:20 -0400 Subject: [PATCH 04/90] call parent method to prevent infinite recursion --- .../dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/capi/ansible/roles/providers/files/usr/lib/python3/dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py b/images/capi/ansible/roles/providers/files/usr/lib/python3/dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py index b0d311bc09..28cebca9f8 100644 --- a/images/capi/ansible/roles/providers/files/usr/lib/python3/dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py +++ b/images/capi/ansible/roles/providers/files/usr/lib/python3/dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py @@ -117,7 +117,7 @@ def _get_data(self): class DataSourceEc2KubernetesLocal(DataSourceEc2Kubernetes): def _get_data(self): - return super(DataSourceEc2KubernetesLocal, self).get_data() + return super(DataSourceEc2KubernetesLocal, self)._get_data() # Used to match classes to dependencies From 62ff691294eb5ce6a8f7578f4e89a05801bf6293 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Mon, 20 Oct 2025 10:01:43 -0600 Subject: [PATCH 05/90] Update docs for image-builder v0.1.47 --- RELEASE.md | 4 ++-- docs/book/src/capi/container-image.md | 10 +++++----- docs/book/src/capi/releasing.md | 8 ++++---- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/RELEASE.md b/RELEASE.md index 3bed596d74..c313e41fc6 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,11 +1,11 @@ # Image Builder Releases -The current release of Image Builder is [v0.1.46][] (August 25, 2025). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.46`. +The current release of Image Builder is [v0.1.47][] (October 20, 2025). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.47`. ## Release Process For more detail about image-builder project releases, see the [Image Builder Book][]. -[v0.1.46]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.46 +[v0.1.47]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.47 [Image Builder Book]: https://image-builder.sigs.k8s.io/capi/releasing.html diff --git a/docs/book/src/capi/container-image.md b/docs/book/src/capi/container-image.md index 75563e1c2b..0449f0aef8 100644 --- a/docs/book/src/capi/container-image.md +++ b/docs/book/src/capi/container-image.md @@ -18,7 +18,7 @@ Run the docker build target of Makefile The latest image-builder container image release is available here: ```commandline -docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.46 +docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.47 ``` ### Examples @@ -27,7 +27,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v - If the AWS CLI is already installed on your machine, you can simply mount the `~/.aws` folder that stores all the required credentials. ```commandline - docker run -it --rm -v /Users//.aws:/home/imagebuilder/.aws registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.46 build-ami-ubuntu-2404 + docker run -it --rm -v /Users//.aws:/home/imagebuilder/.aws registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.47 build-ami-ubuntu-2404 ``` - Another alternative is to use an `aws-creds.env` file to load the credentials and pass it during docker run. @@ -38,7 +38,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v ``` ```commandline - docker run -it --rm --env-file aws-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.46 build-ami-ubuntu-2404 + docker run -it --rm --env-file aws-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.47 build-ami-ubuntu-2404 ``` - AZURE @@ -52,7 +52,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v ``` ```commandline - docker run -it --rm --env-file az-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.46 build-azure-sig-ubuntu-2404 + docker run -it --rm --env-file az-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.47 build-azure-sig-ubuntu-2404 ``` - Proxmox @@ -84,7 +84,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v - Docker's `--net=host` option to ensure http server starts with the host IP and not the Docker container IP. This option is Linux specific and thus implies that it can be run only from a Linux machine. ```commandline - docker run -it --rm --net=host --env PACKER_VAR_FILES=/home/imagebuilder/vsphere.json -v :/home/imagebuilder/vsphere.json registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.46 build-node-ova-vsphere-ubuntu-2404 + docker run -it --rm --net=host --env PACKER_VAR_FILES=/home/imagebuilder/vsphere.json -v :/home/imagebuilder/vsphere.json registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.47 build-node-ova-vsphere-ubuntu-2404 ``` In addition to this, further customizations can be done as discussed [here](./capi.md#customization). diff --git a/docs/book/src/capi/releasing.md b/docs/book/src/capi/releasing.md index 7a010e94de..5a05194452 100644 --- a/docs/book/src/capi/releasing.md +++ b/docs/book/src/capi/releasing.md @@ -1,6 +1,6 @@ # Image Builder Releases -The current release of Image Builder is [v0.1.47][] (August 25, 2025). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.47`. +The current release of Image Builder is [v0.1.48][] (October 20, 2025). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.48`. ## Release Process @@ -22,7 +22,7 @@ Releases in image-builder follow [semantic versioning][semver] conventions. Curr - *If signing tags with GPG, makes your key available to the `git tag` command.* - Create a new tag: - `export IB_VERSION=v0.1.x` - - *Replace `x` with the next patch version. For example: `v0.1.47`.* + - *Replace `x` with the next patch version. For example: `v0.1.48`.* - `git tag -s -m "Image Builder ${IB_VERSION}" ${IB_VERSION}` - `git push upstream ${IB_VERSION}` @@ -77,11 +77,11 @@ Wait for this PR to merge before communicating the release to users, so image-bu In the [#image-builder channel][] on the Kubernetes Slack, post a message announcing the new release. Include a link to the GitHub release and a thanks to the contributors: ``` -Image-builder v0.1.47 is now available: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.47 +Image-builder v0.1.48 is now available: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.48 Thanks to all contributors! ``` -[v0.1.47]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.47 +[v0.1.48]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.48 [#image-builder channel]: https://kubernetes.slack.com/archives/C01E0Q35A8J [Personal access tokens]: https://github.com/settings/tokens [post-image-builder-push-images]: https://prow.k8s.io/?repo=kubernetes-sigs%2Fimage-builder&type=postsubmit&job=post-image-builder-push-images From 410a45e8f3215342b24fbfd2d89a621c4a99da32 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Mon, 20 Oct 2025 17:01:08 -0600 Subject: [PATCH 06/90] Remove trailing newline from Windows kubelet service definition --- images/capi/ansible/windows/roles/kubernetes/tasks/sc.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/capi/ansible/windows/roles/kubernetes/tasks/sc.yml b/images/capi/ansible/windows/roles/kubernetes/tasks/sc.yml index 3fa42c5785..0c528abfa5 100644 --- a/images/capi/ansible/windows/roles/kubernetes/tasks/sc.yml +++ b/images/capi/ansible/windows/roles/kubernetes/tasks/sc.yml @@ -19,7 +19,7 @@ ansible.windows.win_service: name: kubelet start_mode: auto - path: > + path: >- "{{ kubernetes_install_path }}\kube-log-runner.exe" --log-file={{ systemdrive.stdout | trim }}/var/log/kubelet/kubelet.log {{ kubernetes_install_path }}\kubelet.exe --windows-service --cert-dir={{ systemdrive.stdout | trim }}/var/lib/kubelet/pki From bb55893b126382d55ad1830f8e0f8cc5390a4a49 Mon Sep 17 00:00:00 2001 From: Simon Ostendorf Date: Thu, 16 Oct 2025 11:48:03 +0200 Subject: [PATCH 07/90] feat: flatcar disable usb --- docs/book/src/capi/capi.md | 12 ++++++ .../capi/ansible/roles/node/defaults/main.yml | 4 ++ .../capi/ansible/roles/node/tasks/flatcar.yml | 40 +++++++++++++++++++ images/capi/ansible/roles/node/tasks/main.yml | 7 ++-- 4 files changed, 59 insertions(+), 4 deletions(-) create mode 100644 images/capi/ansible/roles/node/tasks/flatcar.yml diff --git a/docs/book/src/capi/capi.md b/docs/book/src/capi/capi.md index 0fdacc148f..d2e06606de 100644 --- a/docs/book/src/capi/capi.md +++ b/docs/book/src/capi/capi.md @@ -231,3 +231,15 @@ Put the Ansible role files in the `ansible/roles` directory. ``` Note, for backwards compatibility reasons, the variable `custom_role_names` is still accepted as an alternative to `node_custom_roles_post`, and they are functionally equivalent. + +##### Reenabling Flatcar USB devices + +Flatcar usb devices are disabled by default for security reasons. +See [flatcar documentation](https://www.flatcar.org/docs/latest/setup/security/hardening-guide/#disable-usb) for more information. +To reenable them, set the following variable: + +```json +{ + "ansible_user_vars": "disable_flatcar_usb=false" +} +``` diff --git a/images/capi/ansible/roles/node/defaults/main.yml b/images/capi/ansible/roles/node/defaults/main.yml index db9d99f458..7865a18d08 100644 --- a/images/capi/ansible/roles/node/defaults/main.yml +++ b/images/capi/ansible/roles/node/defaults/main.yml @@ -131,3 +131,7 @@ external_binary_path: "{{ '/opt/bin' if ansible_os_family == 'Flatcar' else '/us # Enable containerd trace audit in auditd, default: false. enable_containerd_audit: false + +# Disable flatcar usb devices, default: true +# See hardening guide: https://www.flatcar.org/docs/latest/setup/security/hardening-guide/#disable-usb +disable_flatcar_usb: true diff --git a/images/capi/ansible/roles/node/tasks/flatcar.yml b/images/capi/ansible/roles/node/tasks/flatcar.yml new file mode 100644 index 0000000000..1bf73be586 --- /dev/null +++ b/images/capi/ansible/roles/node/tasks/flatcar.yml @@ -0,0 +1,40 @@ +# Copyright 2025 The Kubernetes Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +# See hardening guide: https://www.flatcar.org/docs/latest/setup/security/hardening-guide/#disable-usb +- name: Create /etc/modprobe.d directory + ansible.builtin.file: + path: /etc/modprobe.d + state: directory + owner: root + group: root + mode: '0755' + when: disable_flatcar_usb + +- name: Blacklist usb-storage module + ansible.builtin.copy: + dest: /etc/modprobe.d/blacklist.conf + content: | + blacklist usb-storage + owner: root + group: root + mode: '0644' + when: disable_flatcar_usb + +# sed is used here instead of the ansible.builtin.lineinfile module +# because of the read-only filesystem on Flatcar in /etc. +- name: Set default HOME_MODE in login.defs + ansible.builtin.shell: sed -ri "s/^#?HOME_MODE\>.*/HOME_MODE 0700/" /etc/login.defs + tags: + - skip_ansible_lint diff --git a/images/capi/ansible/roles/node/tasks/main.yml b/images/capi/ansible/roles/node/tasks/main.yml index c297eb750e..5a596aa9f5 100644 --- a/images/capi/ansible/roles/node/tasks/main.yml +++ b/images/capi/ansible/roles/node/tasks/main.yml @@ -26,6 +26,9 @@ tags: - facts +- ansible.builtin.import_tasks: flatcar.yml + when: ansible_os_family == "Flatcar" + - name: Ensure overlay module is present community.general.modprobe: name: overlay @@ -124,7 +127,3 @@ src: usr/local/bin/etcd-network-tuning.sh dest: "{{ external_binary_path }}/etcd-network-tuning.sh" mode: "0755" - -- name: Set default HOME_MODE in login.defs (Flatcar) - ansible.builtin.shell: sed -ri "s/^#?HOME_MODE\>.*/HOME_MODE 0700/" /etc/login.defs - when: ansible_os_family == "Flatcar" From 9e85a5c846113b713d4711ee41e08e8d134dc37f Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Mon, 3 Nov 2025 10:36:16 -0700 Subject: [PATCH 08/90] Remove --pod-infra-container-image default argument --- images/capi/ansible/roles/kubernetes/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/capi/ansible/roles/kubernetes/defaults/main.yml b/images/capi/ansible/roles/kubernetes/defaults/main.yml index 8ae5473a0a..6cf40cf148 100644 --- a/images/capi/ansible/roles/kubernetes/defaults/main.yml +++ b/images/capi/ansible/roles/kubernetes/defaults/main.yml @@ -43,6 +43,6 @@ kubernetes_cni_http_checksum: sha256:{{ kubernetes_cni_http_source }}/{{ kuberne kubeadm_template: etc/kubeadm.yml -kubelet_extra_args: --pod-infra-container-image={{ pause_image }} +kubelet_extra_args: "" kubernetes_enable_automatic_resource_sizing: false From 9fecb19daaae7150d9f111184fa623f08ba80392 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Potin?= Date: Wed, 5 Nov 2025 15:32:42 +0100 Subject: [PATCH 09/90] Fix image build in Azure with existing vnet The flag public_ip_sku by default is set to Standard by default, but if virtual_network_name is specified, public_ip_sku cannot be specified. So it is better to not specify it. --- images/capi/packer/azure/packer.json | 1 - 1 file changed, 1 deletion(-) diff --git a/images/capi/packer/azure/packer.json b/images/capi/packer/azure/packer.json index ecffb13e2f..f4de234784 100644 --- a/images/capi/packer/azure/packer.json +++ b/images/capi/packer/azure/packer.json @@ -69,7 +69,6 @@ }, "polling_duration_timeout": "60m", "private_virtual_network_with_public_ip": "{{user `private_virtual_network_with_public_ip`}}", - "public_ip_sku": "Standard", "shared_gallery_image_version_exclude_from_latest": "{{ user `exclude_from_latest` }}", "shared_image_gallery": { "community_gallery_image_id": "{{ user `community_gallery_image_id` }}", From 482912666651c0f3005f1b9593956404d4462b69 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Thu, 6 Nov 2025 09:08:25 -0700 Subject: [PATCH 10/90] Skip OVA rockylinux-8 and photon-5 builds in CI --- images/capi/scripts/ci-ova.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/images/capi/scripts/ci-ova.sh b/images/capi/scripts/ci-ova.sh index 6a98a8540a..4d21c590e9 100755 --- a/images/capi/scripts/ci-ova.sh +++ b/images/capi/scripts/ci-ova.sh @@ -29,10 +29,14 @@ export ARTIFACTS="${ARTIFACTS:-${PWD}/_artifacts}" # The following are currently having issues running in the # test environment so are specifically excluded for now # - Photon-4 +# - Photon-5 +# - RockyLinux-8 TARGETS=( $(make build-node-ova-vsphere-all --recon -d | grep "Must remake" | \ grep -v build-node-ova-vsphere-all | \ grep -E -v 'rhel|windows|efi' | \ grep -v build-node-ova-vsphere-photon-4 | \ + grep -v build-node-ova-vsphere-photon-5 | \ + grep -v build-node-ova-vsphere-rockylinux-8 | \ grep -E -o 'build-node-ova-vsphere-[a-zA-Z0-9\-]+' ) ) export BOSKOS_RESOURCE_OWNER=image-builder From b396b3744310162adcaa624fcf633a7f99f67853 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Wed, 5 Nov 2025 14:41:38 -0700 Subject: [PATCH 11/90] Require promotion approval in Azure image pipeline --- images/capi/packer/azure/.pipelines/promote-sig.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/images/capi/packer/azure/.pipelines/promote-sig.yaml b/images/capi/packer/azure/.pipelines/promote-sig.yaml index d99d7ed1a5..09acea5eda 100644 --- a/images/capi/packer/azure/.pipelines/promote-sig.yaml +++ b/images/capi/packer/azure/.pipelines/promote-sig.yaml @@ -14,7 +14,16 @@ # - SIG_OFFER - the name of the offer to attach to image definitions, defaults to `reference-images` jobs: +- deployment: approve_promotion + displayName: 'Approve Image Promotion' + environment: 'image-promotion-approval' + strategy: + runOnce: + deploy: + steps: + - script: echo "Approved for promotion" - job: publish_to_sig + dependsOn: approve_promotion timeoutInMinutes: 120 strategy: maxParallel: 0 From baca417f4d95bcca1ebecea6b6c3ff6aebd31666 Mon Sep 17 00:00:00 2001 From: Andrea Mazzotti Date: Mon, 10 Nov 2025 10:50:29 +0100 Subject: [PATCH 12/90] Expose network tags for GCE builder Signed-off-by: Andrea Mazzotti --- images/capi/packer/gce/packer.json | 1 + 1 file changed, 1 insertion(+) diff --git a/images/capi/packer/gce/packer.json b/images/capi/packer/gce/packer.json index 341f019f27..6464ee07de 100644 --- a/images/capi/packer/gce/packer.json +++ b/images/capi/packer/gce/packer.json @@ -19,6 +19,7 @@ "service_account_email": "{{ user `service_account_email` }}", "source_image_family": "{{ user `source_image_family` }}", "ssh_username": "{{user `ssh_username`}}", + "tags": "{{ user `tags` }}", "type": "googlecompute", "use_internal_ip": "{{ user `use_internal_ip`}}", "zone": "{{ user `zone` }}" From 27e050b43fc21eee8d71f9b2415d88b48b8f4327 Mon Sep 17 00:00:00 2001 From: Pierre Ozoux Date: Tue, 11 Nov 2025 10:30:48 +0100 Subject: [PATCH 13/90] feat(outscale): update base image for ubuntu 22 --- images/capi/packer/outscale/ubuntu-2204.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/capi/packer/outscale/ubuntu-2204.json b/images/capi/packer/outscale/ubuntu-2204.json index 7e67c1df3e..f6e97550a8 100644 --- a/images/capi/packer/outscale/ubuntu-2204.json +++ b/images/capi/packer/outscale/ubuntu-2204.json @@ -3,5 +3,5 @@ "distribution": "ubuntu", "distribution_release": "ubuntu", "distribution_version": "2204", - "image_name": "Ubuntu-22.04-2023.12.04-0" + "image_name": "Ubuntu-22.04-2025-10-15" } From 98dfe0ebe73a6091d4edc18522673ff38ee175a3 Mon Sep 17 00:00:00 2001 From: su-mangale Date: Thu, 13 Nov 2025 00:59:22 +0545 Subject: [PATCH 14/90] nutanix: pass ansible scp extra args to provisioner --- images/capi/packer/nutanix/packer-windows.json | 2 +- images/capi/packer/nutanix/packer.json.tmpl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/images/capi/packer/nutanix/packer-windows.json b/images/capi/packer/nutanix/packer-windows.json index 88ec188636..a6c43475e0 100644 --- a/images/capi/packer/nutanix/packer-windows.json +++ b/images/capi/packer/nutanix/packer-windows.json @@ -161,7 +161,7 @@ "nutanix_port": "{{env `NUTANIX_PORT`}}", "nutanix_subnet_name": "{{env `NUTANIX_SUBNET_NAME`}}", "nutanix_username": "{{env `NUTANIX_USERNAME`}}", - "scp_extra_vars": "", + "scp_extra_vars": "{{user `ansible_scp_extra_args`}}", "vm_force_delete": "false", "windows_admin_password": "{{env `WINDOWS_ADMIN_PASSWORD`}}" } diff --git a/images/capi/packer/nutanix/packer.json.tmpl b/images/capi/packer/nutanix/packer.json.tmpl index 67e0c07e1d..22cc1e4267 100644 --- a/images/capi/packer/nutanix/packer.json.tmpl +++ b/images/capi/packer/nutanix/packer.json.tmpl @@ -155,7 +155,7 @@ "nutanix_subnet_name": "{{env `NUTANIX_SUBNET_NAME`}}", "nutanix_username": "{{env `NUTANIX_USERNAME`}}", "python_path": "", - "scp_extra_vars": "", + "scp_extra_vars": "{{user `ansible_scp_extra_args`}}", "source_image_delete": "false", "source_image_force": "false", "ssh_password": "$SSH_PASSWORD", From 081522f7b9d701d8451a3aa474e30cd0a9dae70a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-Fran=C3=A7ois=20Bustarret?= Date: Thu, 13 Nov 2025 16:56:45 +0000 Subject: [PATCH 15/90] feat(outscale): update ubuntu-2404 base image --- images/capi/packer/outscale/ubuntu-2404.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/capi/packer/outscale/ubuntu-2404.json b/images/capi/packer/outscale/ubuntu-2404.json index f5ce2f1a4c..38469bdb18 100644 --- a/images/capi/packer/outscale/ubuntu-2404.json +++ b/images/capi/packer/outscale/ubuntu-2404.json @@ -3,5 +3,5 @@ "distribution": "ubuntu", "distribution_release": "ubuntu", "distribution_version": "2404", - "image_name": "Ubuntu-24.04-2025-07-07" + "image_name": "Ubuntu-24.04-2025-10-15" } From 0e3efca3a531922724400aee846f1c465cbf847c Mon Sep 17 00:00:00 2001 From: Mathieu Grzybek Date: Wed, 19 Nov 2025 17:40:13 +0100 Subject: [PATCH 16/90] fix(flatcar): use newer butane instead of ct Signed-off-by: Mathieu Grzybek --- images/capi/Makefile | 4 ++-- .../capi/hack/{ensure-ct.sh => ensure-butane.sh} | 14 +++++++------- .../packer/files/flatcar/clc/bootstrap-cloud.yaml | 4 +++- .../files/flatcar/clc/bootstrap-pass-auth.yaml | 4 +++- .../files/flatcar/ignition/bootstrap-cloud.json | 12 ++---------- .../flatcar/ignition/bootstrap-pass-auth.json | 11 ++--------- 6 files changed, 19 insertions(+), 30 deletions(-) rename images/capi/hack/{ensure-ct.sh => ensure-butane.sh} (73%) diff --git a/images/capi/Makefile b/images/capi/Makefile index 92711544ee..7dec9c232f 100644 --- a/images/capi/Makefile +++ b/images/capi/Makefile @@ -143,7 +143,7 @@ deps-powervs: deps-common deps-ignition: ## Installs/checks dependencies for generating Ignition files deps-ignition: hack/ensure-jq.sh - hack/ensure-ct.sh + hack/ensure-butane.sh .PHONY: deps-nutanix deps-nutanix: ## Installs/checks dependencies for Nutanix builds @@ -1155,7 +1155,7 @@ json-sort: ## Sort all JSON files alphabetically .PHONY: gen-ignition ignition_files = bootstrap-pass-auth bootstrap-cloud gen-ignition: deps-ignition ## Generates Ignition files from CLC - for f in $(ignition_files); do (ct < packer/files/flatcar/clc/$$f.yaml | jq '.' > packer/files/flatcar/ignition/$$f.json) || exit 1; done + for f in $(ignition_files); do (butane --pretty --strict < packer/files/flatcar/clc/$$f.yaml | jq '.' > packer/files/flatcar/ignition/$$f.json) || exit 1; done ## -------------------------------------- ## ISO checksum updates diff --git a/images/capi/hack/ensure-ct.sh b/images/capi/hack/ensure-butane.sh similarity index 73% rename from images/capi/hack/ensure-ct.sh rename to images/capi/hack/ensure-butane.sh index b58474acb1..0b6502696b 100755 --- a/images/capi/hack/ensure-ct.sh +++ b/images/capi/hack/ensure-butane.sh @@ -20,7 +20,7 @@ set -o pipefail [[ -n ${DEBUG:-} ]] && set -o xtrace -_version="v0.9.3" +_version="v0.25.0" # Change directories to the parent directory of the one in which this # script is located. @@ -33,11 +33,11 @@ if command -v ct >/dev/null 2>&1; then exit 0; fi mkdir -p .local/bin && cd .local/bin if [[ ${HOSTOS} == "linux" ]]; then - _binfile="ct-${_version}-x86_64-unknown-linux-gnu" + _binfile="butane-x86_64-unknown-linux-gnu" elif [[ ${HOSTOS} == "darwin" ]]; then - _binfile="ct-${_version}-x86_64-apple-darwin" + _binfile="butane-x86_64-apple-darwin" fi -_bin_url="https://github.com/flatcar/container-linux-config-transpiler/releases/download/${_version}/${_binfile}" -curl -SsL "${_bin_url}" -o ct -chmod 0755 ct -echo "'ct' has been installed to $(pwd), make sure this directory is in your \$PATH" +_bin_url="https://github.com/coreos/butane/releases/download/${_version}/${_binfile}" +curl -SsL "${_bin_url}" -o butane +chmod 0755 butane +echo "'butane' has been installed to $(pwd), make sure this directory is in your \$PATH" diff --git a/images/capi/packer/files/flatcar/clc/bootstrap-cloud.yaml b/images/capi/packer/files/flatcar/clc/bootstrap-cloud.yaml index 6f04aa040e..8594349eb3 100644 --- a/images/capi/packer/files/flatcar/clc/bootstrap-cloud.yaml +++ b/images/capi/packer/files/flatcar/clc/bootstrap-cloud.yaml @@ -2,10 +2,12 @@ # authorize SSH keys (typically cloud providers such as AWS or Azure). On such platforms, no SSH # configuration needs to be done via Ignition. The actions in this file are performed before Packer # provisioners (e.g. Ansible) are executed. +variant: flatcar +version: 1.1.0 systemd: units: - name: docker.service - enable: true + enabled: true # Mask update-engine and locksmithd to disable automatic updates during image creation. - name: update-engine.service mask: true diff --git a/images/capi/packer/files/flatcar/clc/bootstrap-pass-auth.yaml b/images/capi/packer/files/flatcar/clc/bootstrap-pass-auth.yaml index 13b364452d..3aef9d32f3 100644 --- a/images/capi/packer/files/flatcar/clc/bootstrap-pass-auth.yaml +++ b/images/capi/packer/files/flatcar/clc/bootstrap-pass-auth.yaml @@ -1,6 +1,8 @@ # This file is used for initial provisioning of a Flatcar machine on platforms which use SSH # password authentication during the build process. The actions in this file are performed before # Packer provisioners (e.g. Ansible) are executed. +variant: flatcar +version: 1.1.0 passwd: users: - name: builder @@ -13,7 +15,7 @@ passwd: systemd: units: - name: docker.service - enable: true + enabled: true # Mask update-engine and locksmithd to disable automatic updates during image creation. - name: update-engine.service mask: true diff --git a/images/capi/packer/files/flatcar/ignition/bootstrap-cloud.json b/images/capi/packer/files/flatcar/ignition/bootstrap-cloud.json index 58ee4eef34..8a937079d8 100644 --- a/images/capi/packer/files/flatcar/ignition/bootstrap-cloud.json +++ b/images/capi/packer/files/flatcar/ignition/bootstrap-cloud.json @@ -1,19 +1,11 @@ { "ignition": { - "config": {}, - "security": { - "tls": {} - }, - "timeouts": {}, - "version": "2.3.0" + "version": "3.4.0" }, - "networkd": {}, - "passwd": {}, - "storage": {}, "systemd": { "units": [ { - "enable": true, + "enabled": true, "name": "docker.service" }, { diff --git a/images/capi/packer/files/flatcar/ignition/bootstrap-pass-auth.json b/images/capi/packer/files/flatcar/ignition/bootstrap-pass-auth.json index 64b5030d89..418b6187e4 100644 --- a/images/capi/packer/files/flatcar/ignition/bootstrap-pass-auth.json +++ b/images/capi/packer/files/flatcar/ignition/bootstrap-pass-auth.json @@ -1,13 +1,7 @@ { "ignition": { - "config": {}, - "security": { - "tls": {} - }, - "timeouts": {}, - "version": "2.3.0" + "version": "3.4.0" }, - "networkd": {}, "passwd": { "users": [ { @@ -21,11 +15,10 @@ } ] }, - "storage": {}, "systemd": { "units": [ { - "enable": true, + "enabled": true, "name": "docker.service" }, { From d3ad3bfd1ec7ed1e39b3df1e3f55cac5e2407ed5 Mon Sep 17 00:00:00 2001 From: Mathieu Grzybek Date: Thu, 20 Nov 2025 14:31:14 +0100 Subject: [PATCH 17/90] fix(flatcar): set butane version to v0.25.1 Co-authored-by: Andreas Sommer --- images/capi/hack/ensure-butane.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/capi/hack/ensure-butane.sh b/images/capi/hack/ensure-butane.sh index 0b6502696b..40f9a3e67d 100755 --- a/images/capi/hack/ensure-butane.sh +++ b/images/capi/hack/ensure-butane.sh @@ -20,7 +20,7 @@ set -o pipefail [[ -n ${DEBUG:-} ]] && set -o xtrace -_version="v0.25.0" +_version="v0.25.1" # Change directories to the parent directory of the one in which this # script is located. From 9125f76efef06dda11f2ffaeeb7c8104a72af996 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Thu, 20 Nov 2025 11:49:23 -0700 Subject: [PATCH 18/90] Pin ansible community general before 12.0.0 --- images/capi/hack/ensure-ansible.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/capi/hack/ensure-ansible.sh b/images/capi/hack/ensure-ansible.sh index 8c2cff4357..64d33a0fcc 100755 --- a/images/capi/hack/ensure-ansible.sh +++ b/images/capi/hack/ensure-ansible.sh @@ -49,7 +49,7 @@ fi echo ${ansible_version[*]} ansible-galaxy collection install \ - community.general \ + 'community.general:<12.0.0' \ ansible.posix \ 'ansible.windows:>=1.7.0' \ community.windows From a272d0c5fb85746f986ebfbd7a618db615027070 Mon Sep 17 00:00:00 2001 From: "Sriraman S." Date: Wed, 26 Nov 2025 00:35:47 +0530 Subject: [PATCH 19/90] =?UTF-8?q?test:=20=F0=9F=92=8D=20Re-enable=20CI=20f?= =?UTF-8?q?or=20photon-5=20OVA=20builds?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- images/capi/scripts/ci-ova.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/images/capi/scripts/ci-ova.sh b/images/capi/scripts/ci-ova.sh index 4d21c590e9..80867cc7f5 100755 --- a/images/capi/scripts/ci-ova.sh +++ b/images/capi/scripts/ci-ova.sh @@ -29,13 +29,11 @@ export ARTIFACTS="${ARTIFACTS:-${PWD}/_artifacts}" # The following are currently having issues running in the # test environment so are specifically excluded for now # - Photon-4 -# - Photon-5 # - RockyLinux-8 TARGETS=( $(make build-node-ova-vsphere-all --recon -d | grep "Must remake" | \ grep -v build-node-ova-vsphere-all | \ grep -E -v 'rhel|windows|efi' | \ grep -v build-node-ova-vsphere-photon-4 | \ - grep -v build-node-ova-vsphere-photon-5 | \ grep -v build-node-ova-vsphere-rockylinux-8 | \ grep -E -o 'build-node-ova-vsphere-[a-zA-Z0-9\-]+' ) ) From 452f98eed1a1972b10e58862a65af1000477c9a1 Mon Sep 17 00:00:00 2001 From: Arteon Prifti <36534667+arteonprifti@users.noreply.github.com> Date: Thu, 27 Nov 2025 00:28:22 +0100 Subject: [PATCH 20/90] Fix: runc_version overwrite and bump containerd and runc (#1880) * Fix: rename containerd_runc_version var to runc_version * chore: bump containerd and runc * chore: bump containerd to v1.7.29 * fix: ignore var-naming for containerd role --- images/capi/.ansible-lint-ignore | 1 + images/capi/ansible/roles/containerd/defaults/main.yml | 6 +++--- images/capi/packer/config/containerd.json | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/images/capi/.ansible-lint-ignore b/images/capi/.ansible-lint-ignore index f2dc791c89..bd93175992 100644 --- a/images/capi/.ansible-lint-ignore +++ b/images/capi/.ansible-lint-ignore @@ -7,6 +7,7 @@ ansible/python.yml name[missing] ansible/python.yml name[play] ansible/roles/containerd/tasks/main.yml name[missing] ansible/roles/containerd/tasks/photon.yml no-changed-when +ansible/roles/containerd/defaults/main.yml var-naming[no-role-prefix] ansible/roles/ecr_credential_provider/tasks/main.yaml no-changed-when ansible/roles/ecr_credential_provider/tasks/main.yaml yaml[line-length] ansible/roles/firstboot/tasks/main.yaml name[missing] diff --git a/images/capi/ansible/roles/containerd/defaults/main.yml b/images/capi/ansible/roles/containerd/defaults/main.yml index 21e309a211..d3642188c2 100644 --- a/images/capi/ansible/roles/containerd/defaults/main.yml +++ b/images/capi/ansible/roles/containerd/defaults/main.yml @@ -18,6 +18,6 @@ containerd_gvisor_version: latest containerd_baseurl: https://github.com/containerd/containerd/releases/download/v{{ containerd_version }} containerd_filename: "containerd-{{ containerd_version }}-{{ system }}-{{ arch }}.tar.gz" containerd_url: "{{ containerd_baseurl }}/{{ containerd_filename }}" -containerd_runc_url: "https://github.com/opencontainers/runc/releases/download/v{{ containerd_runc_version }}/runc.{{ arch }}" -containerd_runc_version: "1.2.3" -containerd_runc_checksum_url: "https://github.com/opencontainers/runc/releases/download/v{{ containerd_runc_version }}/runc.sha256sum" +containerd_runc_url: "https://github.com/opencontainers/runc/releases/download/v{{ runc_version }}/runc.{{ arch }}" +runc_version: "1.2.3" +containerd_runc_checksum_url: "https://github.com/opencontainers/runc/releases/download/v{{ runc_version }}/runc.sha256sum" diff --git a/images/capi/packer/config/containerd.json b/images/capi/packer/config/containerd.json index a7b52a56f3..6bdd1ee574 100644 --- a/images/capi/packer/config/containerd.json +++ b/images/capi/packer/config/containerd.json @@ -3,6 +3,6 @@ "containerd_cri_socket": "/var/run/containerd/containerd.sock", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", - "containerd_version": "1.7.25", - "runc_version": "1.2.3" + "containerd_version": "1.7.29", + "runc_version": "1.2.8" } From e97add73473c5f75c67a83ccdb2cd9c893423199 Mon Sep 17 00:00:00 2001 From: Lukas Stehlik Date: Mon, 1 Dec 2025 08:01:14 +0100 Subject: [PATCH 21/90] remove AWS_DEFAULT_REGION from example env file --- docs/book/src/capi/container-image.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/book/src/capi/container-image.md b/docs/book/src/capi/container-image.md index 0449f0aef8..afcee46a59 100644 --- a/docs/book/src/capi/container-image.md +++ b/docs/book/src/capi/container-image.md @@ -34,7 +34,6 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v ```commandline AWS_ACCESS_KEY_ID=xxxxxxx AWS_SECRET_ACCESS_KEY=xxxxxxxx - AWS_DEFAULT_REGION=xxxxxx ``` ```commandline From 82e9fcd29aecdea0d2de61878993f1f9989cc176 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Thu, 4 Dec 2025 14:58:51 -0700 Subject: [PATCH 22/90] Update docs for image-builder v0.1.48 --- RELEASE.md | 4 ++-- docs/book/src/capi/container-image.md | 10 +++++----- docs/book/src/capi/releasing.md | 6 +++--- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/RELEASE.md b/RELEASE.md index c313e41fc6..f41473ae49 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,11 +1,11 @@ # Image Builder Releases -The current release of Image Builder is [v0.1.47][] (October 20, 2025). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.47`. +The current release of Image Builder is [v0.1.48][] (December 4, 2025). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.48`. ## Release Process For more detail about image-builder project releases, see the [Image Builder Book][]. -[v0.1.47]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.47 +[v0.1.48]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.48 [Image Builder Book]: https://image-builder.sigs.k8s.io/capi/releasing.html diff --git a/docs/book/src/capi/container-image.md b/docs/book/src/capi/container-image.md index afcee46a59..5ba8971fff 100644 --- a/docs/book/src/capi/container-image.md +++ b/docs/book/src/capi/container-image.md @@ -18,7 +18,7 @@ Run the docker build target of Makefile The latest image-builder container image release is available here: ```commandline -docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.47 +docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.48 ``` ### Examples @@ -27,7 +27,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v - If the AWS CLI is already installed on your machine, you can simply mount the `~/.aws` folder that stores all the required credentials. ```commandline - docker run -it --rm -v /Users//.aws:/home/imagebuilder/.aws registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.47 build-ami-ubuntu-2404 + docker run -it --rm -v /Users//.aws:/home/imagebuilder/.aws registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.48 build-ami-ubuntu-2404 ``` - Another alternative is to use an `aws-creds.env` file to load the credentials and pass it during docker run. @@ -37,7 +37,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v ``` ```commandline - docker run -it --rm --env-file aws-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.47 build-ami-ubuntu-2404 + docker run -it --rm --env-file aws-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.48 build-ami-ubuntu-2404 ``` - AZURE @@ -51,7 +51,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v ``` ```commandline - docker run -it --rm --env-file az-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.47 build-azure-sig-ubuntu-2404 + docker run -it --rm --env-file az-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.48 build-azure-sig-ubuntu-2404 ``` - Proxmox @@ -83,7 +83,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v - Docker's `--net=host` option to ensure http server starts with the host IP and not the Docker container IP. This option is Linux specific and thus implies that it can be run only from a Linux machine. ```commandline - docker run -it --rm --net=host --env PACKER_VAR_FILES=/home/imagebuilder/vsphere.json -v :/home/imagebuilder/vsphere.json registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.47 build-node-ova-vsphere-ubuntu-2404 + docker run -it --rm --net=host --env PACKER_VAR_FILES=/home/imagebuilder/vsphere.json -v :/home/imagebuilder/vsphere.json registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.48 build-node-ova-vsphere-ubuntu-2404 ``` In addition to this, further customizations can be done as discussed [here](./capi.md#customization). diff --git a/docs/book/src/capi/releasing.md b/docs/book/src/capi/releasing.md index 5a05194452..4e516f3cdc 100644 --- a/docs/book/src/capi/releasing.md +++ b/docs/book/src/capi/releasing.md @@ -1,6 +1,6 @@ # Image Builder Releases -The current release of Image Builder is [v0.1.48][] (October 20, 2025). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.48`. +The current release of Image Builder is [v0.1.48][] (December 4, 2025). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.48`. ## Release Process @@ -22,7 +22,7 @@ Releases in image-builder follow [semantic versioning][semver] conventions. Curr - *If signing tags with GPG, makes your key available to the `git tag` command.* - Create a new tag: - `export IB_VERSION=v0.1.x` - - *Replace `x` with the next patch version. For example: `v0.1.48`.* + - *Replace `x` with the next patch version. For example: `v0.1.49`.* - `git tag -s -m "Image Builder ${IB_VERSION}" ${IB_VERSION}` - `git push upstream ${IB_VERSION}` @@ -77,7 +77,7 @@ Wait for this PR to merge before communicating the release to users, so image-bu In the [#image-builder channel][] on the Kubernetes Slack, post a message announcing the new release. Include a link to the GitHub release and a thanks to the contributors: ``` -Image-builder v0.1.48 is now available: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.48 +Image-builder v0.1.49 is now available: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.49 Thanks to all contributors! ``` From a2288e51caf2ab9a1c4130817c8ea6dd707e1144 Mon Sep 17 00:00:00 2001 From: Victor Sartori Date: Fri, 5 Dec 2025 11:34:53 -0300 Subject: [PATCH 23/90] Fix MAAS Deploy "in memory" Machines deployed in memory raise an error, leaving users stuck in "emergency mode" This PR is related to issue: https://github.com/canonical/packer-maas/issues/362 --- images/capi/packer/maas/packer.json.tmpl | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/images/capi/packer/maas/packer.json.tmpl b/images/capi/packer/maas/packer.json.tmpl index 9c7b1cebb1..f51472147f 100644 --- a/images/capi/packer/maas/packer.json.tmpl +++ b/images/capi/packer/maas/packer.json.tmpl @@ -114,6 +114,13 @@ "type": "ansible", "user": "builder" }, + { + "inline": [ + "sudo rm -f /etc/fstab" + ], + "inline_shebang": "/bin/bash -e", + "type": "shell" + }, { "arch": "{{user `goss_arch`}}", "format": "{{user `goss_format`}}", @@ -160,7 +167,7 @@ "containerd_version": null, "cpus": "1", "crictl_version": null, - "crictl_url": "https://github.com/kubernetes-sigs/cri-tools/releases/download/v{{user `crictl_version`}}/crictl-v{{user `crictl_version`}}-linux-amd64.tar.gz", + "crictl_url": "https://github.com/kubernetes-sigs/cri-tools/releases/download/v{{user `crictl_version`}}/crictl-v{{user `crictl_version`}}-linux-amd64.tar.gz", "disk_compression": "false", "disk_discard": "unmap", "disk_image": "false", @@ -188,7 +195,7 @@ "kubernetes_series": null, "kubernetes_source_type": null, "runc_url": "https://github.com/opencontainers/runc/releases/download/v{{user `runc_version`}}/runc.amd64", - "runc_version": null, + "runc_version": null, "machine_id_mode": "444", "memory": "2048", "oem_id": "", From f2432327e2b06fb7908e1adecf6e634e4d56a2cd Mon Sep 17 00:00:00 2001 From: Balamurugan Thandayuthapani Date: Thu, 11 Dec 2025 13:01:21 +0800 Subject: [PATCH 24/90] Fix chrony configuration issues --- images/capi/ansible/roles/providers/tasks/openstack.yml | 7 +++++++ images/capi/ansible/roles/providers/tasks/proxmox.yml | 7 +++++++ images/capi/ansible/roles/providers/tasks/qemu.yml | 7 +++++++ .../capi/ansible/roles/providers/tasks/vmware-photon.yml | 6 ++++++ .../capi/ansible/roles/providers/tasks/vmware-ubuntu.yml | 8 +++++++- 5 files changed, 34 insertions(+), 1 deletion(-) diff --git a/images/capi/ansible/roles/providers/tasks/openstack.yml b/images/capi/ansible/roles/providers/tasks/openstack.yml index a5bfb809fe..3985be2e1e 100644 --- a/images/capi/ansible/roles/providers/tasks/openstack.yml +++ b/images/capi/ansible/roles/providers/tasks/openstack.yml @@ -39,6 +39,13 @@ enabled: true when: ansible_os_family == "Debian" +- name: Create directory for DHCP chrony server files + ansible.builtin.file: + path: /var/lib/dhcp + state: directory + mode: '0755' + when: ansible_os_family == "Debian" + - name: Copy networkd-dispatcher scripts to add DHCP provided NTP servers ansible.builtin.template: src: "{{ item.src }}" diff --git a/images/capi/ansible/roles/providers/tasks/proxmox.yml b/images/capi/ansible/roles/providers/tasks/proxmox.yml index be3e8f15f3..8771aa68f7 100644 --- a/images/capi/ansible/roles/providers/tasks/proxmox.yml +++ b/images/capi/ansible/roles/providers/tasks/proxmox.yml @@ -42,6 +42,13 @@ enabled: false when: ansible_os_family == "Debian" +- name: Create directory for DHCP chrony server files + ansible.builtin.file: + path: /var/lib/dhcp + state: directory + mode: '0755' + when: ansible_os_family == "Debian" + - name: Copy networkd-dispatcher scripts to add DHCP provided NTP servers ansible.builtin.template: src: "{{ item.src }}" diff --git a/images/capi/ansible/roles/providers/tasks/qemu.yml b/images/capi/ansible/roles/providers/tasks/qemu.yml index 54c8404158..2434e07c6c 100644 --- a/images/capi/ansible/roles/providers/tasks/qemu.yml +++ b/images/capi/ansible/roles/providers/tasks/qemu.yml @@ -42,6 +42,13 @@ enabled: false when: ansible_os_family == "Debian" +- name: Create directory for DHCP chrony server files + ansible.builtin.file: + path: /var/lib/dhcp + state: directory + mode: '0755' + when: ansible_os_family == "Debian" + - name: Copy networkd-dispatcher scripts to add DHCP provided NTP servers ansible.builtin.template: src: "{{ item.src }}" diff --git a/images/capi/ansible/roles/providers/tasks/vmware-photon.yml b/images/capi/ansible/roles/providers/tasks/vmware-photon.yml index fbffb314a1..57ba6871a7 100644 --- a/images/capi/ansible/roles/providers/tasks/vmware-photon.yml +++ b/images/capi/ansible/roles/providers/tasks/vmware-photon.yml @@ -58,6 +58,12 @@ state: started enabled: true +- name: Create directory for DHCP chrony server files + ansible.builtin.file: + path: /var/lib/dhclient + state: directory + mode: '0755' + - name: Copy networkd-dispatcher scripts to add DHCP provided NTP servers ansible.builtin.template: src: "{{ item.src }}" diff --git a/images/capi/ansible/roles/providers/tasks/vmware-ubuntu.yml b/images/capi/ansible/roles/providers/tasks/vmware-ubuntu.yml index 1863619259..c5d38e2051 100644 --- a/images/capi/ansible/roles/providers/tasks/vmware-ubuntu.yml +++ b/images/capi/ansible/roles/providers/tasks/vmware-ubuntu.yml @@ -42,6 +42,12 @@ state: started enabled: true +- name: Create directory for DHCP chrony server files + ansible.builtin.file: + path: /var/lib/dhcp + state: directory + mode: '0755' + - name: Copy networkd-dispatcher scripts to add DHCP provided NTP servers ansible.builtin.template: src: "{{ item.src }}" @@ -49,7 +55,7 @@ mode: a+x vars: server_dir: /var/lib/dhcp - chrony_helper_dir: /usr/lib/chrony + chrony_helper_dir: "{{ '/usr/libexec/chrony' if ansible_distribution_version is version('22.04', '>=') else '/usr/lib/chrony' }}" loop: - { src: files/etc/networkd-dispatcher/routable.d/20-chrony.j2, dest: /etc/networkd-dispatcher/routable.d/20-chrony } - { src: files/etc/networkd-dispatcher/off.d/20-chrony.j2, dest: /etc/networkd-dispatcher/off.d/20-chrony } From 7f20765e453eac161d3cca6f12c82ce2369ba2c2 Mon Sep 17 00:00:00 2001 From: Aziz Kone Date: Thu, 11 Dec 2025 09:30:05 +0100 Subject: [PATCH 25/90] Fix Ansible linter errors in Outscale provider tasks Fixed Ansible linter violations in outscale.yml: - Capitalized task names (lines 13, 19) - Added newline at end of file This continues work from PR 1851, originally implemented by lde. Related to issue 1897. --- .../capi/ansible/roles/providers/tasks/outscale.yml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/images/capi/ansible/roles/providers/tasks/outscale.yml b/images/capi/ansible/roles/providers/tasks/outscale.yml index f63228eb36..d30be5612a 100644 --- a/images/capi/ansible/roles/providers/tasks/outscale.yml +++ b/images/capi/ansible/roles/providers/tasks/outscale.yml @@ -8,5 +8,16 @@ packages: - cloud-init - cloud-guest-utils - - cloud-initramfs-copymods - cloud-initramfs-dyn-netconf + +- name: Install Debian specific packages + ansible.builtin.apt: + name: cloud-initramfs-dyn-netconf + state: present + when: ansible_distribution == 'Debian' + +- name: Install Ubuntu specific packages + ansible.builtin.apt: + name: cloud-initramfs-copymods + state: present + when: ansible_distribution == 'Ubuntu' From 12fee08a8197d5dee6b9ded3ba1320c3b9a6e6f7 Mon Sep 17 00:00:00 2001 From: Mitchel Haring Date: Mon, 15 Dec 2025 14:52:26 +1000 Subject: [PATCH 26/90] fix: environment path for tc --- .../roles/node/files/usr/local/bin/etcd-network-tuning.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/images/capi/ansible/roles/node/files/usr/local/bin/etcd-network-tuning.sh b/images/capi/ansible/roles/node/files/usr/local/bin/etcd-network-tuning.sh index 03713816a9..365af7b16d 100755 --- a/images/capi/ansible/roles/node/files/usr/local/bin/etcd-network-tuning.sh +++ b/images/capi/ansible/roles/node/files/usr/local/bin/etcd-network-tuning.sh @@ -18,6 +18,8 @@ set -o errexit # exits immediately on any unexpected error (does not bypass tra set -o nounset # will error if variables are used without first being defined set -o pipefail # any non-zero exit code in a piped command causes the pipeline to fail with that code +export PATH="${PATH}:/usr/sbin" + trap on_exit ERR on_exit() { echo "Error setting etcd network tuning parameters for interface: ${DEV}" | systemd-cat -p emerg -t etcd-tuning From 8e8ac006729cd90fd08f9bb5e5cfcb8122243b4a Mon Sep 17 00:00:00 2001 From: Mitchel Haring Date: Thu, 18 Dec 2025 13:40:43 +1000 Subject: [PATCH 27/90] rockylinux 9.6 iso --- images/capi/packer/ova/rockylinux-9.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/images/capi/packer/ova/rockylinux-9.json b/images/capi/packer/ova/rockylinux-9.json index 078dd30c83..33dc454e27 100644 --- a/images/capi/packer/ova/rockylinux-9.json +++ b/images/capi/packer/ova/rockylinux-9.json @@ -10,9 +10,9 @@ "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-9", "firmware": "efi", "guest_os_type": "rockylinux-64", - "iso_checksum": "ee3ac97fdffab58652421941599902012179c37535aece76824673105169c4a2", + "iso_checksum": "aed9449cf79eb2d1c365f4f2561f923a80451b3e8fdbf595889b4cf0ac6c58b8", "iso_checksum_type": "sha256", - "iso_url": "https://dl.rockylinux.org/vault/rocky/9.4/isos/x86_64/Rocky-9.4-x86_64-minimal.iso", + "iso_url": "https://dl.rockylinux.org/vault/rocky/9.6/isos/x86_64/Rocky-9.6-x86_64-minimal.iso", "os_display_name": "RockyLinux 9", "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm", "shutdown_command": "/sbin/halt -h -p", From 6492e91428a72efaeef60b3217e736fc8c327775 Mon Sep 17 00:00:00 2001 From: Mitchel Haring Date: Thu, 18 Dec 2025 13:42:06 +1000 Subject: [PATCH 28/90] fix: update rockylinux 9 repo urls --- images/capi/packer/ova/linux/rockylinux/http/9/ks.cfg.tmpl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/images/capi/packer/ova/linux/rockylinux/http/9/ks.cfg.tmpl b/images/capi/packer/ova/linux/rockylinux/http/9/ks.cfg.tmpl index 8c01692a09..7bbe82c577 100644 --- a/images/capi/packer/ova/linux/rockylinux/http/9/ks.cfg.tmpl +++ b/images/capi/packer/ova/linux/rockylinux/http/9/ks.cfg.tmpl @@ -1,6 +1,6 @@ # Use CDROM installation media -repo --name="AppStream" --baseurl="http://download.rockylinux.org/pub/rocky/9/AppStream/x86_64/os/" -repo --name="kickstart" --baseurl="http://download.rockylinux.org/pub/rocky/9/devel/x86_64/kickstart/" +repo --name="AppStream" --baseurl="https://download.rockylinux.org/pub/rocky/9/AppStream/x86_64/os/" +repo --name="kickstart" --baseurl="https://download.rockylinux.org/pub/rocky/9/BaseOS/x86_64/kickstart/" cdrom # Use text install From ed5e03a4605ff7f0ac2f04859fb4feed51093c8b Mon Sep 17 00:00:00 2001 From: Johan Thomsen Date: Fri, 19 Dec 2025 10:41:11 +0100 Subject: [PATCH 29/90] cleanup: since 615ec65694 crictl_source_type is no longer used, so remove it --- images/capi/packer/ami/flatcar-arm64.json | 1 - images/capi/packer/ami/flatcar.json | 1 - images/capi/packer/azure/flatcar.json | 1 - images/capi/packer/config/ansible-args.json | 2 +- images/capi/packer/config/kubernetes.json | 1 - images/capi/packer/hcloud/flatcar-arm64.json | 1 - images/capi/packer/hcloud/flatcar.json | 1 - images/capi/packer/nutanix/flatcar.json | 1 - images/capi/packer/openstack/flatcar.json | 1 - images/capi/packer/ova/flatcar.json | 1 - images/capi/packer/proxmox/flatcar.json | 1 - images/capi/packer/qemu/qemu-flatcar.json | 1 - images/capi/packer/raw/raw-flatcar.json | 1 - 13 files changed, 1 insertion(+), 13 deletions(-) diff --git a/images/capi/packer/ami/flatcar-arm64.json b/images/capi/packer/ami/flatcar-arm64.json index 0849746a97..1fc11c3c92 100644 --- a/images/capi/packer/ami/flatcar-arm64.json +++ b/images/capi/packer/ami/flatcar-arm64.json @@ -6,7 +6,6 @@ "arch": "arm64", "build_name": "flatcar-{{env `FLATCAR_CHANNEL`}}", "builder_instance_type": "t4g.small", - "crictl_source_type": "http", "distribution": "flatcar", "goss_arch": "arm64", "kubernetes_cni_source_type": "http", diff --git a/images/capi/packer/ami/flatcar.json b/images/capi/packer/ami/flatcar.json index fb7a1b7688..7a7aa51a9e 100644 --- a/images/capi/packer/ami/flatcar.json +++ b/images/capi/packer/ami/flatcar.json @@ -5,7 +5,6 @@ "ansible_extra_vars": "ansible_python_interpreter=/opt/bin/python", "arch": "amd64", "build_name": "flatcar-{{env `FLATCAR_CHANNEL`}}", - "crictl_source_type": "http", "distribution": "flatcar", "kubernetes_cni_source_type": "http", "kubernetes_source_type": "http", diff --git a/images/capi/packer/azure/flatcar.json b/images/capi/packer/azure/flatcar.json index 392e43f647..98b256a1d1 100644 --- a/images/capi/packer/azure/flatcar.json +++ b/images/capi/packer/azure/flatcar.json @@ -1,7 +1,6 @@ { "ansible_extra_vars": "ansible_python_interpreter=/opt/pypy/bin/pypy", "build_name": "flatcar", - "crictl_source_type": "http", "custom_data_file": "packer/files/flatcar/ignition/bootstrap-cloud.json", "distribution": "flatcar", "distribution_release": "{{env `FLATCAR_CHANNEL`}}", diff --git a/images/capi/packer/config/ansible-args.json b/images/capi/packer/config/ansible-args.json index 0ba049e72f..eb2906c1ae 100644 --- a/images/capi/packer/config/ansible-args.json +++ b/images/capi/packer/config/ansible-args.json @@ -1,5 +1,5 @@ { "ansible_common_ssh_args": "-o IdentitiesOnly=yes", - "ansible_common_vars": "containerd_gvisor_runtime={{user `containerd_gvisor_runtime`}} containerd_gvisor_version={{user `containerd_gvisor_version`}} containerd_sha256={{user `containerd_sha256`}} pause_image={{user `pause_image`}} containerd_additional_settings={{user `containerd_additional_settings`}} containerd_cri_socket={{user `containerd_cri_socket`}} containerd_version={{user `containerd_version`}} containerd_wasm_shims_url={{user `containerd_wasm_shims_url`}} containerd_wasm_shims_version={{user `containerd_wasm_shims_version`}} containerd_wasm_shims_sha256={{user `containerd_wasm_shims_sha256`}} containerd_wasm_shims_runtimes=\"{{user `containerd_wasm_shims_runtimes`}}\" containerd_wasm_shims_runtime_versions=\"{{user `containerd_wasm_shims_runtime_versions`}}\" crictl_version={{user `crictl_version`}} crictl_source_type={{user `crictl_source_type`}} custom_role_names=\"{{user `custom_role_names`}}\" firstboot_custom_roles_pre=\"{{user `firstboot_custom_roles_pre`}}\" firstboot_custom_roles_post=\"{{user `firstboot_custom_roles_post`}}\" node_custom_roles_pre=\"{{user `node_custom_roles_pre`}}\" node_custom_roles_post=\"{{user `node_custom_roles_post`}}\" node_custom_roles_post_sysprep=\"{{user `node_custom_roles_post_sysprep`}}\" disable_public_repos={{user `disable_public_repos`}} extra_debs=\"{{user `extra_debs`}}\" extra_repos=\"{{user `extra_repos`}}\" extra_rpms=\"{{user `extra_rpms`}}\" http_proxy={{user `http_proxy`}} https_proxy={{user `https_proxy`}} kubeadm_template={{user `kubeadm_template`}} kubernetes_apiserver_port={{user `kubernetes_apiserver_port`}} kubernetes_cni_http_source={{user `kubernetes_cni_http_source`}} kubernetes_http_source={{user `kubernetes_http_source`}} kubernetes_container_registry={{user `kubernetes_container_registry`}} kubernetes_rpm_repo={{user `kubernetes_rpm_repo`}} kubernetes_rpm_gpg_key={{user `kubernetes_rpm_gpg_key`}} kubernetes_rpm_gpg_check={{user `kubernetes_rpm_gpg_check`}} kubernetes_deb_repo={{user `kubernetes_deb_repo`}} kubernetes_deb_gpg_key={{user `kubernetes_deb_gpg_key`}} kubernetes_cni_deb_version={{user `kubernetes_cni_deb_version`}} kubernetes_cni_rpm_version={{user `kubernetes_cni_rpm_version`}} kubernetes_cni_semver={{user `kubernetes_cni_semver`}} kubernetes_cni_source_type={{user `kubernetes_cni_source_type`}} kubernetes_semver={{user `kubernetes_semver`}} kubernetes_source_type={{user `kubernetes_source_type`}} kubernetes_load_additional_imgs={{user `kubernetes_load_additional_imgs`}} kubernetes_deb_version={{user `kubernetes_deb_version`}} kubernetes_rpm_version={{user `kubernetes_rpm_version`}} no_proxy={{user `no_proxy`}} pip_conf_file={{user `pip_conf_file`}} python_path={{user `python_path`}} redhat_epel_rpm={{user `redhat_epel_rpm`}} epel_rpm_gpg_key={{user `epel_rpm_gpg_key`}} reenable_public_repos={{user `reenable_public_repos`}} remove_extra_repos={{user `remove_extra_repos`}} systemd_prefix={{user `systemd_prefix`}} sysusr_prefix={{user `sysusr_prefix`}} sysusrlocal_prefix={{user `sysusrlocal_prefix`}} load_additional_components={{ user `load_additional_components`}} additional_registry_images={{ user `additional_registry_images`}} additional_registry_images_list={{ user `additional_registry_images_list`}} ecr_credential_provider={{ user `ecr_credential_provider` }} additional_url_images={{ user `additional_url_images`}} additional_url_images_list={{ user `additional_url_images_list`}} additional_executables={{ user `additional_executables`}} additional_executables_list={{ user `additional_executables_list`}} additional_executables_destination_path={{ user `additional_executables_destination_path`}} additional_s3={{ user `additional_s3`}} build_target={{ user `build_target`}} amazon_ssm_agent_rpm={{ user `amazon_ssm_agent_rpm` }} enable_containerd_audit={{ user `enable_containerd_audit` }} kubernetes_enable_automatic_resource_sizing={{ user `kubernetes_enable_automatic_resource_sizing` }} debug_tools={{user `debug_tools`}} ubuntu_repo={{user `ubuntu_repo`}} ubuntu_security_repo={{user `ubuntu_security_repo`}} gpu_block_nouveau_loading={{user `block_nouveau_loading`}} runc_version={{user `runc_version`}} containerd_service_url={{user `containerd_service_url`}} netplan_removal_excludes=\"{{user `netplan_removal_excludes`}}\"", + "ansible_common_vars": "containerd_gvisor_runtime={{user `containerd_gvisor_runtime`}} containerd_gvisor_version={{user `containerd_gvisor_version`}} containerd_sha256={{user `containerd_sha256`}} pause_image={{user `pause_image`}} containerd_additional_settings={{user `containerd_additional_settings`}} containerd_cri_socket={{user `containerd_cri_socket`}} containerd_version={{user `containerd_version`}} containerd_wasm_shims_url={{user `containerd_wasm_shims_url`}} containerd_wasm_shims_version={{user `containerd_wasm_shims_version`}} containerd_wasm_shims_sha256={{user `containerd_wasm_shims_sha256`}} containerd_wasm_shims_runtimes=\"{{user `containerd_wasm_shims_runtimes`}}\" containerd_wasm_shims_runtime_versions=\"{{user `containerd_wasm_shims_runtime_versions`}}\" crictl_version={{user `crictl_version`}} custom_role_names=\"{{user `custom_role_names`}}\" firstboot_custom_roles_pre=\"{{user `firstboot_custom_roles_pre`}}\" firstboot_custom_roles_post=\"{{user `firstboot_custom_roles_post`}}\" node_custom_roles_pre=\"{{user `node_custom_roles_pre`}}\" node_custom_roles_post=\"{{user `node_custom_roles_post`}}\" node_custom_roles_post_sysprep=\"{{user `node_custom_roles_post_sysprep`}}\" disable_public_repos={{user `disable_public_repos`}} extra_debs=\"{{user `extra_debs`}}\" extra_repos=\"{{user `extra_repos`}}\" extra_rpms=\"{{user `extra_rpms`}}\" http_proxy={{user `http_proxy`}} https_proxy={{user `https_proxy`}} kubeadm_template={{user `kubeadm_template`}} kubernetes_apiserver_port={{user `kubernetes_apiserver_port`}} kubernetes_cni_http_source={{user `kubernetes_cni_http_source`}} kubernetes_http_source={{user `kubernetes_http_source`}} kubernetes_container_registry={{user `kubernetes_container_registry`}} kubernetes_rpm_repo={{user `kubernetes_rpm_repo`}} kubernetes_rpm_gpg_key={{user `kubernetes_rpm_gpg_key`}} kubernetes_rpm_gpg_check={{user `kubernetes_rpm_gpg_check`}} kubernetes_deb_repo={{user `kubernetes_deb_repo`}} kubernetes_deb_gpg_key={{user `kubernetes_deb_gpg_key`}} kubernetes_cni_deb_version={{user `kubernetes_cni_deb_version`}} kubernetes_cni_rpm_version={{user `kubernetes_cni_rpm_version`}} kubernetes_cni_semver={{user `kubernetes_cni_semver`}} kubernetes_cni_source_type={{user `kubernetes_cni_source_type`}} kubernetes_semver={{user `kubernetes_semver`}} kubernetes_source_type={{user `kubernetes_source_type`}} kubernetes_load_additional_imgs={{user `kubernetes_load_additional_imgs`}} kubernetes_deb_version={{user `kubernetes_deb_version`}} kubernetes_rpm_version={{user `kubernetes_rpm_version`}} no_proxy={{user `no_proxy`}} pip_conf_file={{user `pip_conf_file`}} python_path={{user `python_path`}} redhat_epel_rpm={{user `redhat_epel_rpm`}} epel_rpm_gpg_key={{user `epel_rpm_gpg_key`}} reenable_public_repos={{user `reenable_public_repos`}} remove_extra_repos={{user `remove_extra_repos`}} systemd_prefix={{user `systemd_prefix`}} sysusr_prefix={{user `sysusr_prefix`}} sysusrlocal_prefix={{user `sysusrlocal_prefix`}} load_additional_components={{ user `load_additional_components`}} additional_registry_images={{ user `additional_registry_images`}} additional_registry_images_list={{ user `additional_registry_images_list`}} ecr_credential_provider={{ user `ecr_credential_provider` }} additional_url_images={{ user `additional_url_images`}} additional_url_images_list={{ user `additional_url_images_list`}} additional_executables={{ user `additional_executables`}} additional_executables_list={{ user `additional_executables_list`}} additional_executables_destination_path={{ user `additional_executables_destination_path`}} additional_s3={{ user `additional_s3`}} build_target={{ user `build_target`}} amazon_ssm_agent_rpm={{ user `amazon_ssm_agent_rpm` }} enable_containerd_audit={{ user `enable_containerd_audit` }} kubernetes_enable_automatic_resource_sizing={{ user `kubernetes_enable_automatic_resource_sizing` }} debug_tools={{user `debug_tools`}} ubuntu_repo={{user `ubuntu_repo`}} ubuntu_security_repo={{user `ubuntu_security_repo`}} gpu_block_nouveau_loading={{user `block_nouveau_loading`}} runc_version={{user `runc_version`}} containerd_service_url={{user `containerd_service_url`}} netplan_removal_excludes=\"{{user `netplan_removal_excludes`}}\"", "ansible_scp_extra_args": "{{env `ANSIBLE_SCP_EXTRA_ARGS`}}" } diff --git a/images/capi/packer/config/kubernetes.json b/images/capi/packer/config/kubernetes.json index 6e0deb7196..b6ab15580e 100644 --- a/images/capi/packer/config/kubernetes.json +++ b/images/capi/packer/config/kubernetes.json @@ -1,5 +1,4 @@ { - "crictl_source_type": "pkg", "crictl_version": "1.32.0", "kubeadm_template": "etc/kubeadm.yml", "kubernetes_apiserver_port": "6443", diff --git a/images/capi/packer/hcloud/flatcar-arm64.json b/images/capi/packer/hcloud/flatcar-arm64.json index dffddaeaf4..558488a881 100644 --- a/images/capi/packer/hcloud/flatcar-arm64.json +++ b/images/capi/packer/hcloud/flatcar-arm64.json @@ -3,7 +3,6 @@ "arch": "arm64", "build_name": "flatcar", "crictl_arch": "arm64", - "crictl_source_type": "http", "distribution": "flatcar", "distribution_release": "{{env `FLATCAR_CHANNEL`}}", "distribution_version": "{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", diff --git a/images/capi/packer/hcloud/flatcar.json b/images/capi/packer/hcloud/flatcar.json index f7ea8824af..70ca728d8f 100644 --- a/images/capi/packer/hcloud/flatcar.json +++ b/images/capi/packer/hcloud/flatcar.json @@ -2,7 +2,6 @@ "ansible_extra_vars": "ansible_python_interpreter=/opt/bin/python", "arch": "amd64", "build_name": "flatcar", - "crictl_source_type": "http", "distribution": "flatcar", "distribution_release": "{{env `FLATCAR_CHANNEL`}}", "distribution_version": "{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", diff --git a/images/capi/packer/nutanix/flatcar.json b/images/capi/packer/nutanix/flatcar.json index 0f57bd1a43..2a6075a0c7 100644 --- a/images/capi/packer/nutanix/flatcar.json +++ b/images/capi/packer/nutanix/flatcar.json @@ -2,7 +2,6 @@ "ansible_extra_vars": "ansible_python_interpreter=/opt/bin/python3", "build_name": "flatcar-{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", "channel_name": "{{env `FLATCAR_CHANNEL`}}", - "crictl_source_type": "http", "distribution": "flatcar", "distribution_release": "Core", "distribution_version": "{{env `FLATCAR_CHANNEL`}}", diff --git a/images/capi/packer/openstack/flatcar.json b/images/capi/packer/openstack/flatcar.json index 2c63585d1b..5c984db8d0 100644 --- a/images/capi/packer/openstack/flatcar.json +++ b/images/capi/packer/openstack/flatcar.json @@ -1,7 +1,6 @@ { "ansible_extra_vars": "ansible_python_interpreter=/opt/bin/python", "build_name": "flatcar", - "crictl_source_type": "http", "distro_name": "flatcar", "kubernetes_cni_source_type": "http", "kubernetes_source_type": "http", diff --git a/images/capi/packer/ova/flatcar.json b/images/capi/packer/ova/flatcar.json index ebc1eea33e..d135f9550b 100644 --- a/images/capi/packer/ova/flatcar.json +++ b/images/capi/packer/ova/flatcar.json @@ -5,7 +5,6 @@ "build_name": "flatcar-{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", "channel_name": "{{env `FLATCAR_CHANNEL`}}", "containerd_cri_socket": "/run/docker/libcontainerd/docker-containerd.sock", - "crictl_source_type": "http", "distro_name": "flatcar", "guest_os_type": "flatcar-64", "http_directory": "", diff --git a/images/capi/packer/proxmox/flatcar.json b/images/capi/packer/proxmox/flatcar.json index ae1a3e2606..e1d304cff1 100644 --- a/images/capi/packer/proxmox/flatcar.json +++ b/images/capi/packer/proxmox/flatcar.json @@ -6,7 +6,6 @@ "boot_wait": "180s", "build_name": "flatcar-{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", "channel_name": "{{env `FLATCAR_CHANNEL`}}", - "crictl_source_type": "http", "distribution_version": "{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", "distro_name": "flatcar", "guest_os_type": "linux-64", diff --git a/images/capi/packer/qemu/qemu-flatcar.json b/images/capi/packer/qemu/qemu-flatcar.json index 786c25d2ef..4c129c983f 100644 --- a/images/capi/packer/qemu/qemu-flatcar.json +++ b/images/capi/packer/qemu/qemu-flatcar.json @@ -5,7 +5,6 @@ "boot_wait": "180s", "build_name": "flatcar-{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", "channel_name": "{{env `FLATCAR_CHANNEL`}}", - "crictl_source_type": "http", "distribution_version": "{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", "distro_name": "flatcar", "guest_os_type": "linux-64", diff --git a/images/capi/packer/raw/raw-flatcar.json b/images/capi/packer/raw/raw-flatcar.json index c499489e88..c6f8aa6704 100644 --- a/images/capi/packer/raw/raw-flatcar.json +++ b/images/capi/packer/raw/raw-flatcar.json @@ -5,7 +5,6 @@ "boot_wait": "180s", "build_name": "flatcar-{{env `FLATCAR_CHANNEL`}}-{{env `FLATCAR_VERSION`}}", "channel_name": "{{env `FLATCAR_CHANNEL`}}", - "crictl_source_type": "http", "distro_name": "flatcar", "guest_os_type": "linux-64", "http_directory": "./packer/files/flatcar/ignition/", From 0cac7c81ead84dea5a73a02f42062ff9f40d88a8 Mon Sep 17 00:00:00 2001 From: Victor Sartori Date: Mon, 5 Jan 2026 17:31:32 -0300 Subject: [PATCH 30/90] Remove bundled MaaS curtin hook scripts - Add documentation on overriding MaaS curtin scripts via a custom role, with a link to the Image Builder customization docs. - Document how to set a unique iSCSI InitiatorName using KubeadmConfigTemplate preKubeadmCommands, including a YAML example. This is part of PR #1828. --- docs/book/src/capi/providers/maas.md | 41 +++++ .../providers/files/maas/curtin/NOTICE.md | 3 - .../providers/files/maas/curtin/curtin-hooks | 145 ------------------ .../files/maas/curtin/install-custom-packages | 3 - .../files/maas/curtin/setup-bootloader | 39 ----- .../roles/providers/tasks/maas-ubuntu.yml | 19 ++- 6 files changed, 50 insertions(+), 200 deletions(-) delete mode 100644 images/capi/ansible/roles/providers/files/maas/curtin/NOTICE.md delete mode 100644 images/capi/ansible/roles/providers/files/maas/curtin/curtin-hooks delete mode 100644 images/capi/ansible/roles/providers/files/maas/curtin/install-custom-packages delete mode 100644 images/capi/ansible/roles/providers/files/maas/curtin/setup-bootloader diff --git a/docs/book/src/capi/providers/maas.md b/docs/book/src/capi/providers/maas.md index 178dbf5751..2db31b0b9a 100644 --- a/docs/book/src/capi/providers/maas.md +++ b/docs/book/src/capi/providers/maas.md @@ -74,3 +74,44 @@ maas admin boot-resources create name=custom/your-image architecture=amd64/gener ``` **Note:** Set `base_image=ubuntu/jammy` for Ubuntu 22.04 or `ubuntu/noble` for 24.04. + +## Custom Curtin Scripts +If you need to override the default MaaS curtin scripts, create a custom role containing the curtin hooks. The files must be copied to the `/curtin` directory + +For more information on how to create and use custom roles, refer to the official documentation: https://image-builder.sigs.k8s.io/capi/capi#customization + +## iSCSI configuration note: + +If you need unique names for the iSCSI InitiatorName, add a KubeadmConfigTemplate and include the following command under `spec.template.spec.preKubeadmCommands` + +```bash +echo "InitiatorName=$(iscsi-iname -p iqn.2004-10.com.ubuntu:$(cat /etc/hostname))" > /etc/iscsi/initiatorname.iscsi +``` + +### Example + +```yaml +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: t-cluster-md-0 + namespace: default +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + event-qps: "0" + feature-gates: RotateKubeletServerCertificate=true + read-only-port: "0" + name: '{{ v1.local_hostname }}' + preKubeadmCommands: + - echo "InitiatorName=$(iscsi-iname -p iqn.2004-10.com.ubuntu:$(cat /etc/hostname))" > /etc/iscsi/initiatorname.iscsi + - systemctl restart open-iscsi + - while [ ! -S /var/run/containerd/containerd.sock ]; do echo 'Waiting for containerd...'; + sleep 1; done + - sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab + - swapoff -a + useExperimentalRetryJoin: true +``` diff --git a/images/capi/ansible/roles/providers/files/maas/curtin/NOTICE.md b/images/capi/ansible/roles/providers/files/maas/curtin/NOTICE.md deleted file mode 100644 index 21e4b4f83f..0000000000 --- a/images/capi/ansible/roles/providers/files/maas/curtin/NOTICE.md +++ /dev/null @@ -1,3 +0,0 @@ -# Attribution -All the script in this folder is derived from the original work by Alexsander de Souza (Canonical), -available at https://github.com/canonical/packer-maas. \ No newline at end of file diff --git a/images/capi/ansible/roles/providers/files/maas/curtin/curtin-hooks b/images/capi/ansible/roles/providers/files/maas/curtin/curtin-hooks deleted file mode 100644 index 50d8385521..0000000000 --- a/images/capi/ansible/roles/providers/files/maas/curtin/curtin-hooks +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/env python3 -# -# This original script was copied from: -# Source: https://github.com/canonical/packer-maas -# Original Author: Alexsander de Souza -# and modified by the image-builder team - - -import os -import platform -import random -import shutil -import socket -import string -import sys - -from curtin import distro, util -from curtin.commands import apt_config, curthooks -from curtin.config import load_command_config -from curtin.log import DEBUG, LOG, basicConfig -from curtin.paths import target_path -from curtin.util import ChrootableTarget, load_command_environment - - -def run_hook_in_target(target, hook): - """Look for "hook" in "target" and run in a chroot""" - target_hook = target_path(target, "/curtin/" + hook) - if os.path.isfile(target_hook): - LOG.debug("running %s" % target_hook) - with ChrootableTarget(target=target) as in_chroot: - in_chroot.subp(["/curtin/" + hook]) - return True - return False - - -def curthook(cfg, target, state): - """Configure network and bootloader""" - LOG.info("Running curtin builtin curthooks") - state_etcd = os.path.split(state["fstab"])[0] - machine = platform.machine() - - distro_info = distro.get_distroinfo(target=target) - if not distro_info: - raise RuntimeError("Failed to determine target distro") - osfamily = distro_info.family - LOG.info( - "Configuring target system for distro: %s osfamily: %s", - distro_info.variant, - osfamily, - ) - - sources = cfg.get("sources", {}) - dd_image = len(util.get_dd_images(sources)) > 0 - - curthooks.disable_overlayroot(cfg, target) - curthooks.disable_update_initramfs(cfg, target, machine) - curthooks.install_missing_packages(cfg, target, osfamily=osfamily) - - if not dd_image: - curthooks.configure_iscsi(cfg, state_etcd, target, osfamily=osfamily) - curthooks.configure_mdadm(cfg, state_etcd, target, osfamily=osfamily) - curthooks.copy_fstab(state.get("fstab"), target) - curthooks.add_swap(cfg, target, state.get("fstab")) - - run_hook_in_target(target, "install-custom-packages") - - if not dd_image: - curthooks.setup_kernel_img_conf(target) - - crypttab_location = os.path.join(os.path.split(state["fstab"])[0], "crypttab") - if os.path.exists(crypttab_location): - curthooks.copy_crypttab(crypttab_location, target) - - udev_rules_d = os.path.join(state["scratch"], "rules.d") - if os.path.isdir(udev_rules_d): - curthooks.copy_dname_rules(udev_rules_d, target) - - apt_config.apply_debconf_selections(cfg, target) - - curthooks.apply_networking(target, state) - curthooks.handle_pollinate_user_agent(cfg, target) - - # re-enable update_initramfs - curthooks.enable_update_initramfs(cfg, target, machine) - curthooks.update_initramfs(target, all_kernels=True) - - run_hook_in_target(target, "setup-bootloader") - generate_unique_iscsi_initiator_name(target) - - -def generate_random_id(length): - return ''.join(random.choices(string.hexdigits.lower(), k=length)) - - -def generate_unique_iscsi_initiator_name(target): - """Generate a unique iSCSI initiator name for the target.""" - base_iqn_name = "iqn.2004-10.com.ubuntu" - random_id = generate_random_id(4) - initiator_name = f"{base_iqn_name}:{random_id}:{socket.gethostname()}" - header = "## This file is automatically generated by curtin ##\n" - LOG.info("Generated unique iSCSI initiator name: %s", initiator_name) - - if not os.path.exists(target + "/etc/iscsi"): - LOG.warning("Target directory " + target + "/etc/iscsi does not exist." - "Skiping iSCSI initiator name generation.") - return - - if not os.path.exists(target + "/etc/iscsi/initiatorname.iscsi"): - LOG.warning("Target file " + target + "/etc/iscsi/initiatorname.iscsi does not exist." - "Skiping iSCSI initiator name generation.") - return - - # why 127 characters? https://kb.netapp.com/on-prem/ontap/da/SAN/SAN-KBs/What_is_the_maximum_length_of_a_iSCSI_iqn_name - if len(initiator_name) > 127: - LOG.error("iSCSI initiator name exceeds 127 characters: " + initiator_name) - raise ValueError("iSCSI initiator name exceeds 127 characters") - - try: - with open(target + "/etc/iscsi/initiatorname.iscsi", "w") as f: - f.write(header + "InitiatorName=%s\n" % initiator_name) - LOG.info("Wrote initiator name to " + target + "/etc/iscsi/initiatorname.iscsi") - except IOError as e: - LOG.error("Failed to write iSCSI initiator name: %s", e) - raise RuntimeError("Failed to write iSCSI initiator name") from e - - -def cleanup(): - """Remove curtin-hooks so its as if we were never here.""" - curtin_dir = os.path.dirname(__file__) - shutil.rmtree(curtin_dir) - - -def main(): - state = load_command_environment() - config = load_command_config(None, state) - target = state["target"] - - basicConfig(stream=sys.stderr, verbosity=DEBUG) - - curthook(config, target, state) - cleanup() - - -if __name__ == "__main__": - main() diff --git a/images/capi/ansible/roles/providers/files/maas/curtin/install-custom-packages b/images/capi/ansible/roles/providers/files/maas/curtin/install-custom-packages deleted file mode 100644 index b8262897ce..0000000000 --- a/images/capi/ansible/roles/providers/files/maas/curtin/install-custom-packages +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -ex -# -exit 0 \ No newline at end of file diff --git a/images/capi/ansible/roles/providers/files/maas/curtin/setup-bootloader b/images/capi/ansible/roles/providers/files/maas/curtin/setup-bootloader deleted file mode 100644 index eb1882c70a..0000000000 --- a/images/capi/ansible/roles/providers/files/maas/curtin/setup-bootloader +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -ex -# -# This script was based on: -# Source: https://github.com/canonical/packer-maas -# Original Author: Alexsander de Souza -# and modified by the image-builder team - -export DEBIAN_FRONTEND=noninteractive - -# Clean up remnants from packer-maas vm install -rm /var/cache/debconf/config.dat -dpkg --configure -a - -# Update the package lists before attempting to install the kernel -apt-get update - -if [ -d /sys/firmware/efi ]; then - echo "EFI MODE!" - dpkg-reconfigure grub-efi-amd64 - update-grub - - grub-install \ - --target=x86_64-efi \ - --efi-directory=/boot/efi \ - --bootloader-id=ubuntu \ - --recheck - update-initramfs -uk all - efibootmgr -v -else - echo "BIOS MODE!" - apt-get remove -y --allow-change-held-packages --allow-remove-essential grub-efi-amd64 grub-efi-amd64-signed shim-signed - apt-get install -y grub-pc - dpkg-reconfigure grub-pc - update-grub - DEVICE=$(findmnt -no SOURCE "/") - BOOT_DISK=$(lsblk -no PKNAME "$DEVICE") - grub-install /dev/"$BOOT_DISK" - update-initramfs -uk all -fi diff --git a/images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml b/images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml index 33cbb8ea7f..1d0974e88a 100644 --- a/images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml +++ b/images/capi/ansible/roles/providers/tasks/maas-ubuntu.yml @@ -4,13 +4,12 @@ state: directory mode: "0775" -# Uncomment this ansible task and customize the curtin scripts to replace the builtin MaaS curtin hooks -# - name: Copy curtin scripts to /curtin -# ansible.builtin.copy: -# src: "files/maas/curtin/{{ item }}" -# dest: "/curtin/{{ item }}" -# mode: "0750" -# loop: -# - curtin-hooks -# - install-custom-packages -# - setup-bootloader +# Additional instruction: +# If you need to keep a custom curtin script, create a custom role containing the curtin hooks. +# For more information on how to create and use custom roles, refer to the official documentation: +# https://image-builder.sigs.k8s.io/capi/capi#customization + +# iSCSI configuration note: +# If you need unique names for the iSCSI InitiatorName, add a KubeadmConfigTemplate and include the following command under spec.preKubeadmCommands: +# +# echo "InitiatorName=$(iscsi-iname -p iqn.2004-10.com.ubuntu:$(cat /etc/hostname))" > /etc/iscsi/initiatorname.iscsi From 1c54e029f155b0a0cb872afe144394f2f8d350f5 Mon Sep 17 00:00:00 2001 From: vishesh92 Date: Tue, 6 Jan 2026 11:35:41 +0530 Subject: [PATCH 31/90] Update CAPC OWNERS --- OWNERS_ALIASES | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index b2bee5f1f0..c3c33cecda 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -18,8 +18,10 @@ aliases: image-builder-openstack-maintainers: - yankcrime image-builder-cloudstack-reviewers: - - rohityadavcloud - davidjumani + - vishesh92 + - weizhouapache + - yadvr image-builder-scaleway-reviewers: - Tomy2e - Mia-Cross @@ -66,9 +68,11 @@ aliases: - seanschneeweiss - tobiasgiese cluster-api-cloudstack-maintainers: - - rohityadavcloud - davidjumani - Pearl1594 + - vishesh92 + - weizhouapache + - yadvr cluster-api-vsphere-maintainers: - chrischdi - gab-satchi From ce0d0b8406c1f2621c7dc3e7a99f139e28e047c1 Mon Sep 17 00:00:00 2001 From: Alex Benn Date: Tue, 6 Jan 2026 13:03:58 -0500 Subject: [PATCH 32/90] fix: update Azure build VM size to a more modern v2 generation --- images/capi/packer/azure/azure-config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/capi/packer/azure/azure-config.json b/images/capi/packer/azure/azure-config.json index 9949048dd9..15dc8d11ac 100644 --- a/images/capi/packer/azure/azure-config.json +++ b/images/capi/packer/azure/azure-config.json @@ -5,5 +5,5 @@ "containerd_wasm_shims_runtimes": "lunatic,slight,spin,wws", "subscription_id": "{{env `AZURE_SUBSCRIPTION_ID`}}", "use_azure_cli_auth": "{{env `USE_AZURE_CLI_AUTH`}}", - "vm_size": "Standard_B2ms" + "vm_size": "Standard_B2als_v2" } From be8da68145f8516571ecf7edbe7da16e089a7aa8 Mon Sep 17 00:00:00 2001 From: ffais Date: Fri, 9 Jan 2026 10:10:13 +0100 Subject: [PATCH 33/90] add user specified kernel boot command line parameters to final image Signed-off-by: ffais --- docs/book/src/capi/capi.md | 1 + images/capi/ansible/roles/setup/defaults/main.yml | 1 + images/capi/ansible/roles/sysprep/tasks/debian.yml | 6 +++--- .../etc/default/grub.d/50-cloudimg-settings.cfg | 2 +- images/capi/packer/config/ansible-args.json | 2 +- images/capi/packer/config/common.json | 1 + images/capi/packer/goss/goss-files.yaml | 7 +++++++ images/capi/packer/goss/goss-vars.yaml | 1 + images/capi/packer/raw/packer.json.tmpl | 3 ++- 9 files changed, 18 insertions(+), 6 deletions(-) rename images/capi/ansible/roles/sysprep/{files => templates}/etc/default/grub.d/50-cloudimg-settings.cfg (77%) diff --git a/docs/book/src/capi/capi.md b/docs/book/src/capi/capi.md index d2e06606de..6fd42127c1 100644 --- a/docs/book/src/capi/capi.md +++ b/docs/book/src/capi/capi.md @@ -61,6 +61,7 @@ Several variables can be used to customize the image build. | `firstboot_custom_roles_pre`
`firstboot_custom_roles_post`
`node_custom_roles_pre`
`node_custom_roles_post` | Each of these four variables allows for giving a space delimited string of custom Ansible roles to run at different times. The "pre" roles run as the very first thing in the playbook (useful for setting up environment specifics like networking changes), and the "post" roles as the very last (useful for undoing those changes, custom additions, etc). Note that the "post" role does run before the "sysprep" role in the "node" playbook, as the "sysprep" role seals the image. If the role is placed in the `ansible/roles` directory, it can be referenced by name. Otherwise, it must be a fully qualified path to the role. | `""` | | `disable_public_repos` | If set to `"true"`, this will disable all existing package repositories defined in the OS before doing any package installs. The `extra_repos` variable *must* be set for package installs to succeed. | `"false"` | | `extra_debs` | This can be set to a space delimited string containing the names of additional deb packages to install | `""` | +| `extra_kernel_boot_params` | This can be set to a space delimited string containing the boot kernel parameters (e.g.: cpufreq.default_governor=performance) | `""` | | `extra_repos` | A space delimited string containing the names of files to add to the image containing repository definitions. The files should be given as absolute paths. | `""` | | `extra_rpms` | This can be set to a space delimited string containing the names of additional RPM packages to install | `""` | | `http_proxy` | This can be set to URL to use as an HTTP proxy during the Ansible stage of building | `""` | diff --git a/images/capi/ansible/roles/setup/defaults/main.yml b/images/capi/ansible/roles/setup/defaults/main.yml index 8432b7868d..4756226d5d 100644 --- a/images/capi/ansible/roles/setup/defaults/main.yml +++ b/images/capi/ansible/roles/setup/defaults/main.yml @@ -19,6 +19,7 @@ redhat_epel_rpm: https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noa epel_rpm_gpg_key: https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7 rpms: "" extra_rpms: "" +extra_kernel_boot_params: "" disable_public_repos: false external_binary_path: "{{ '/opt/bin' if ansible_os_family == 'Flatcar' else '/usr/local/bin' }}" diff --git a/images/capi/ansible/roles/sysprep/tasks/debian.yml b/images/capi/ansible/roles/sysprep/tasks/debian.yml index 6f5b7e933e..a97472e393 100644 --- a/images/capi/ansible/roles/sysprep/tasks/debian.yml +++ b/images/capi/ansible/roles/sysprep/tasks/debian.yml @@ -119,9 +119,9 @@ path: /usr/sbin/update-grub register: _stat_update_grub -- name: Configure grub for non graphical consoles - ansible.builtin.copy: - src: etc/default/grub.d/50-cloudimg-settings.cfg +- name: Configure grub for non graphical consoles and add user extra kernel boot params + ansible.builtin.template: + src: templates/etc/default/grub.d/50-cloudimg-settings.cfg dest: /etc/default/grub.d/50-cloudimg-settings.cfg group: root owner: root diff --git a/images/capi/ansible/roles/sysprep/files/etc/default/grub.d/50-cloudimg-settings.cfg b/images/capi/ansible/roles/sysprep/templates/etc/default/grub.d/50-cloudimg-settings.cfg similarity index 77% rename from images/capi/ansible/roles/sysprep/files/etc/default/grub.d/50-cloudimg-settings.cfg rename to images/capi/ansible/roles/sysprep/templates/etc/default/grub.d/50-cloudimg-settings.cfg index d36c0b180d..9751434d32 100644 --- a/images/capi/ansible/roles/sysprep/files/etc/default/grub.d/50-cloudimg-settings.cfg +++ b/images/capi/ansible/roles/sysprep/templates/etc/default/grub.d/50-cloudimg-settings.cfg @@ -8,7 +8,7 @@ GRUB_RECORDFAIL_TIMEOUT=0 GRUB_TIMEOUT=0 # Set the default commandline -GRUB_CMDLINE_LINUX_DEFAULT="console=tty1 console=ttyS0" +GRUB_CMDLINE_LINUX_DEFAULT="console=tty1 console=ttyS0 {{ extra_kernel_boot_params }}" # Set the grub console type GRUB_TERMINAL=console diff --git a/images/capi/packer/config/ansible-args.json b/images/capi/packer/config/ansible-args.json index eb2906c1ae..487412c37f 100644 --- a/images/capi/packer/config/ansible-args.json +++ b/images/capi/packer/config/ansible-args.json @@ -1,5 +1,5 @@ { "ansible_common_ssh_args": "-o IdentitiesOnly=yes", - "ansible_common_vars": "containerd_gvisor_runtime={{user `containerd_gvisor_runtime`}} containerd_gvisor_version={{user `containerd_gvisor_version`}} containerd_sha256={{user `containerd_sha256`}} pause_image={{user `pause_image`}} containerd_additional_settings={{user `containerd_additional_settings`}} containerd_cri_socket={{user `containerd_cri_socket`}} containerd_version={{user `containerd_version`}} containerd_wasm_shims_url={{user `containerd_wasm_shims_url`}} containerd_wasm_shims_version={{user `containerd_wasm_shims_version`}} containerd_wasm_shims_sha256={{user `containerd_wasm_shims_sha256`}} containerd_wasm_shims_runtimes=\"{{user `containerd_wasm_shims_runtimes`}}\" containerd_wasm_shims_runtime_versions=\"{{user `containerd_wasm_shims_runtime_versions`}}\" crictl_version={{user `crictl_version`}} custom_role_names=\"{{user `custom_role_names`}}\" firstboot_custom_roles_pre=\"{{user `firstboot_custom_roles_pre`}}\" firstboot_custom_roles_post=\"{{user `firstboot_custom_roles_post`}}\" node_custom_roles_pre=\"{{user `node_custom_roles_pre`}}\" node_custom_roles_post=\"{{user `node_custom_roles_post`}}\" node_custom_roles_post_sysprep=\"{{user `node_custom_roles_post_sysprep`}}\" disable_public_repos={{user `disable_public_repos`}} extra_debs=\"{{user `extra_debs`}}\" extra_repos=\"{{user `extra_repos`}}\" extra_rpms=\"{{user `extra_rpms`}}\" http_proxy={{user `http_proxy`}} https_proxy={{user `https_proxy`}} kubeadm_template={{user `kubeadm_template`}} kubernetes_apiserver_port={{user `kubernetes_apiserver_port`}} kubernetes_cni_http_source={{user `kubernetes_cni_http_source`}} kubernetes_http_source={{user `kubernetes_http_source`}} kubernetes_container_registry={{user `kubernetes_container_registry`}} kubernetes_rpm_repo={{user `kubernetes_rpm_repo`}} kubernetes_rpm_gpg_key={{user `kubernetes_rpm_gpg_key`}} kubernetes_rpm_gpg_check={{user `kubernetes_rpm_gpg_check`}} kubernetes_deb_repo={{user `kubernetes_deb_repo`}} kubernetes_deb_gpg_key={{user `kubernetes_deb_gpg_key`}} kubernetes_cni_deb_version={{user `kubernetes_cni_deb_version`}} kubernetes_cni_rpm_version={{user `kubernetes_cni_rpm_version`}} kubernetes_cni_semver={{user `kubernetes_cni_semver`}} kubernetes_cni_source_type={{user `kubernetes_cni_source_type`}} kubernetes_semver={{user `kubernetes_semver`}} kubernetes_source_type={{user `kubernetes_source_type`}} kubernetes_load_additional_imgs={{user `kubernetes_load_additional_imgs`}} kubernetes_deb_version={{user `kubernetes_deb_version`}} kubernetes_rpm_version={{user `kubernetes_rpm_version`}} no_proxy={{user `no_proxy`}} pip_conf_file={{user `pip_conf_file`}} python_path={{user `python_path`}} redhat_epel_rpm={{user `redhat_epel_rpm`}} epel_rpm_gpg_key={{user `epel_rpm_gpg_key`}} reenable_public_repos={{user `reenable_public_repos`}} remove_extra_repos={{user `remove_extra_repos`}} systemd_prefix={{user `systemd_prefix`}} sysusr_prefix={{user `sysusr_prefix`}} sysusrlocal_prefix={{user `sysusrlocal_prefix`}} load_additional_components={{ user `load_additional_components`}} additional_registry_images={{ user `additional_registry_images`}} additional_registry_images_list={{ user `additional_registry_images_list`}} ecr_credential_provider={{ user `ecr_credential_provider` }} additional_url_images={{ user `additional_url_images`}} additional_url_images_list={{ user `additional_url_images_list`}} additional_executables={{ user `additional_executables`}} additional_executables_list={{ user `additional_executables_list`}} additional_executables_destination_path={{ user `additional_executables_destination_path`}} additional_s3={{ user `additional_s3`}} build_target={{ user `build_target`}} amazon_ssm_agent_rpm={{ user `amazon_ssm_agent_rpm` }} enable_containerd_audit={{ user `enable_containerd_audit` }} kubernetes_enable_automatic_resource_sizing={{ user `kubernetes_enable_automatic_resource_sizing` }} debug_tools={{user `debug_tools`}} ubuntu_repo={{user `ubuntu_repo`}} ubuntu_security_repo={{user `ubuntu_security_repo`}} gpu_block_nouveau_loading={{user `block_nouveau_loading`}} runc_version={{user `runc_version`}} containerd_service_url={{user `containerd_service_url`}} netplan_removal_excludes=\"{{user `netplan_removal_excludes`}}\"", + "ansible_common_vars": "containerd_gvisor_runtime={{user `containerd_gvisor_runtime`}} containerd_gvisor_version={{user `containerd_gvisor_version`}} containerd_sha256={{user `containerd_sha256`}} pause_image={{user `pause_image`}} containerd_additional_settings={{user `containerd_additional_settings`}} containerd_cri_socket={{user `containerd_cri_socket`}} containerd_version={{user `containerd_version`}} containerd_wasm_shims_url={{user `containerd_wasm_shims_url`}} containerd_wasm_shims_version={{user `containerd_wasm_shims_version`}} containerd_wasm_shims_sha256={{user `containerd_wasm_shims_sha256`}} containerd_wasm_shims_runtimes=\"{{user `containerd_wasm_shims_runtimes`}}\" containerd_wasm_shims_runtime_versions=\"{{user `containerd_wasm_shims_runtime_versions`}}\" crictl_version={{user `crictl_version`}} custom_role_names=\"{{user `custom_role_names`}}\" firstboot_custom_roles_pre=\"{{user `firstboot_custom_roles_pre`}}\" firstboot_custom_roles_post=\"{{user `firstboot_custom_roles_post`}}\" node_custom_roles_pre=\"{{user `node_custom_roles_pre`}}\" node_custom_roles_post=\"{{user `node_custom_roles_post`}}\" node_custom_roles_post_sysprep=\"{{user `node_custom_roles_post_sysprep`}}\" disable_public_repos={{user `disable_public_repos`}} extra_debs=\"{{user `extra_debs`}}\" extra_kernel_boot_params=\"{{user `extra_kernel_boot_params`}}\" extra_repos=\"{{user `extra_repos`}}\" extra_rpms=\"{{user `extra_rpms`}}\" http_proxy={{user `http_proxy`}} https_proxy={{user `https_proxy`}} kubeadm_template={{user `kubeadm_template`}} kubernetes_apiserver_port={{user `kubernetes_apiserver_port`}} kubernetes_cni_http_source={{user `kubernetes_cni_http_source`}} kubernetes_http_source={{user `kubernetes_http_source`}} kubernetes_container_registry={{user `kubernetes_container_registry`}} kubernetes_rpm_repo={{user `kubernetes_rpm_repo`}} kubernetes_rpm_gpg_key={{user `kubernetes_rpm_gpg_key`}} kubernetes_rpm_gpg_check={{user `kubernetes_rpm_gpg_check`}} kubernetes_deb_repo={{user `kubernetes_deb_repo`}} kubernetes_deb_gpg_key={{user `kubernetes_deb_gpg_key`}} kubernetes_cni_deb_version={{user `kubernetes_cni_deb_version`}} kubernetes_cni_rpm_version={{user `kubernetes_cni_rpm_version`}} kubernetes_cni_semver={{user `kubernetes_cni_semver`}} kubernetes_cni_source_type={{user `kubernetes_cni_source_type`}} kubernetes_semver={{user `kubernetes_semver`}} kubernetes_source_type={{user `kubernetes_source_type`}} kubernetes_load_additional_imgs={{user `kubernetes_load_additional_imgs`}} kubernetes_deb_version={{user `kubernetes_deb_version`}} kubernetes_rpm_version={{user `kubernetes_rpm_version`}} no_proxy={{user `no_proxy`}} pip_conf_file={{user `pip_conf_file`}} python_path={{user `python_path`}} redhat_epel_rpm={{user `redhat_epel_rpm`}} epel_rpm_gpg_key={{user `epel_rpm_gpg_key`}} reenable_public_repos={{user `reenable_public_repos`}} remove_extra_repos={{user `remove_extra_repos`}} systemd_prefix={{user `systemd_prefix`}} sysusr_prefix={{user `sysusr_prefix`}} sysusrlocal_prefix={{user `sysusrlocal_prefix`}} load_additional_components={{ user `load_additional_components`}} additional_registry_images={{ user `additional_registry_images`}} additional_registry_images_list={{ user `additional_registry_images_list`}} ecr_credential_provider={{ user `ecr_credential_provider` }} additional_url_images={{ user `additional_url_images`}} additional_url_images_list={{ user `additional_url_images_list`}} additional_executables={{ user `additional_executables`}} additional_executables_list={{ user `additional_executables_list`}} additional_executables_destination_path={{ user `additional_executables_destination_path`}} additional_s3={{ user `additional_s3`}} build_target={{ user `build_target`}} amazon_ssm_agent_rpm={{ user `amazon_ssm_agent_rpm` }} enable_containerd_audit={{ user `enable_containerd_audit` }} kubernetes_enable_automatic_resource_sizing={{ user `kubernetes_enable_automatic_resource_sizing` }} debug_tools={{user `debug_tools`}} ubuntu_repo={{user `ubuntu_repo`}} ubuntu_security_repo={{user `ubuntu_security_repo`}} gpu_block_nouveau_loading={{user `block_nouveau_loading`}} runc_version={{user `runc_version`}} containerd_service_url={{user `containerd_service_url`}} netplan_removal_excludes=\"{{user `netplan_removal_excludes`}}\"", "ansible_scp_extra_args": "{{env `ANSIBLE_SCP_EXTRA_ARGS`}}" } diff --git a/images/capi/packer/config/common.json b/images/capi/packer/config/common.json index e41c230127..1b2e649b30 100644 --- a/images/capi/packer/config/common.json +++ b/images/capi/packer/config/common.json @@ -3,6 +3,7 @@ "debug_tools": "false", "disable_public_repos": "false", "extra_debs": "", + "extra_kernel_boot_params": "", "extra_repos": "", "extra_rpms": "", "firstboot_custom_roles_post": "", diff --git a/images/capi/packer/goss/goss-files.yaml b/images/capi/packer/goss/goss-files.yaml index 2130871688..d38a634989 100644 --- a/images/capi/packer/goss/goss-files.yaml +++ b/images/capi/packer/goss/goss-files.yaml @@ -25,3 +25,10 @@ file: {{end}} {{end}} {{end}} +{{if .Vars.extra_kernel_boot_params }} + "/boot/grub/grub.cfg": + exists: true + filetype: file + contains: + - {{ .Vars.extra_kernel_boot_params }} +{{end}} diff --git a/images/capi/packer/goss/goss-vars.yaml b/images/capi/packer/goss/goss-vars.yaml index 08c9bc48d3..9dce084995 100644 --- a/images/capi/packer/goss/goss-vars.yaml +++ b/images/capi/packer/goss/goss-vars.yaml @@ -93,6 +93,7 @@ containerd_gvisor_runtime: "" containerd_gvisor_version: "" containerd_version: "" containerd_wasm_shims_runtimes: "" +extra_kernel_boot_params: "" kubernetes_cni_source_type: "" kubernetes_cni_version: "" kubernetes_source_type: "" diff --git a/images/capi/packer/raw/packer.json.tmpl b/images/capi/packer/raw/packer.json.tmpl index 512f5e0d11..7199703d87 100644 --- a/images/capi/packer/raw/packer.json.tmpl +++ b/images/capi/packer/raw/packer.json.tmpl @@ -142,7 +142,8 @@ "kubernetes_deb_version": "{{ user `kubernetes_deb_version` }}", "kubernetes_rpm_version": "{{ split (user `kubernetes_rpm_version`) \"-\" 0 }}", "kubernetes_source_type": "{{user `kubernetes_source_type`}}", - "kubernetes_version": "{{user `kubernetes_semver` | replace \"v\" \"\" 1}}" + "kubernetes_version": "{{user `kubernetes_semver` | replace \"v\" \"\" 1}}", + "extra_kernel_boot_params": "{{ user `extra_kernel_boot_params` }}" }, "version": "{{user `goss_version`}}" } From a73e5caa7c90dca968fd4afeb278f83cd8f7b265 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20=C5=81oskot?= Date: Sat, 17 Jan 2026 21:39:44 +0100 Subject: [PATCH 34/90] docs: Clarify no binary/application to install A trivial correction of an apparent mistake. --- docs/book/src/capi/quickstart.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/book/src/capi/quickstart.md b/docs/book/src/capi/quickstart.md index 78b0d07ef9..4fa176830b 100644 --- a/docs/book/src/capi/quickstart.md +++ b/docs/book/src/capi/quickstart.md @@ -4,7 +4,7 @@ In this tutorial we will cover the basics of how to download and execute the Ima ## Installation -As a set of scripts and Makefiles that rely on Packer and Ansible, there is image builder binary/application to install. Rather we need to download the tooling from the GitHub repo and make sure that the Packer and Ansible are installed. +As a set of scripts and Makefiles that rely on Packer and Ansible, there is no image builder binary/application to install. Rather we need to download the tooling from the GitHub repo and make sure that the Packer and Ansible are installed. To get the latest image-builder source on your machine, choose one of the following methods: From 0191d3cfdac7b9f12ca531ec2022aeea04e47f5e Mon Sep 17 00:00:00 2001 From: Damiano Donati Date: Sun, 25 Jan 2026 18:57:42 +0100 Subject: [PATCH 35/90] capg: add overwrite-1-34 --- images/capi/packer/gce/ci/nightly/overwrite-1-34.json | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 images/capi/packer/gce/ci/nightly/overwrite-1-34.json diff --git a/images/capi/packer/gce/ci/nightly/overwrite-1-34.json b/images/capi/packer/gce/ci/nightly/overwrite-1-34.json new file mode 100644 index 0000000000..8a36fb79e5 --- /dev/null +++ b/images/capi/packer/gce/ci/nightly/overwrite-1-34.json @@ -0,0 +1,8 @@ +{ + "build_timestamp": "nightly", + "kubernetes_deb_version": "1.34.3-1.1", + "kubernetes_rpm_version": "1.34.3", + "kubernetes_semver": "v1.34.3", + "kubernetes_series": "v1.34", + "service_account_email": "gcb-builder-cluster-api-gcp@k8s-staging-cluster-api-gcp.iam.gserviceaccount.com" +} From 72e79b6d6b679ccbc659cefa8d84fb8a3341b30a Mon Sep 17 00:00:00 2001 From: Damiano Donati Date: Mon, 26 Jan 2026 09:44:28 +0100 Subject: [PATCH 36/90] capg: add overwrite-1-35 --- images/capi/packer/gce/ci/nightly/overwrite-1-35.json | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 images/capi/packer/gce/ci/nightly/overwrite-1-35.json diff --git a/images/capi/packer/gce/ci/nightly/overwrite-1-35.json b/images/capi/packer/gce/ci/nightly/overwrite-1-35.json new file mode 100644 index 0000000000..801e4116f1 --- /dev/null +++ b/images/capi/packer/gce/ci/nightly/overwrite-1-35.json @@ -0,0 +1,8 @@ +{ + "build_timestamp": "nightly", + "kubernetes_deb_version": "1.35.0-1.1", + "kubernetes_rpm_version": "1.35.0", + "kubernetes_semver": "v1.35.0", + "kubernetes_series": "v1.35", + "service_account_email": "gcb-builder-cluster-api-gcp@k8s-staging-cluster-api-gcp.iam.gserviceaccount.com" +} From 72d869a359d32a2258001884a0783dac79e6202f Mon Sep 17 00:00:00 2001 From: Carlos Salas Date: Mon, 26 Jan 2026 10:29:42 +0100 Subject: [PATCH 37/90] create ctr symlink after installation Signed-off-by: Carlos Salas --- images/capi/ansible/roles/containerd/tasks/main.yml | 13 +++++++++++++ images/capi/ansible/roles/kubernetes/tasks/main.yml | 13 ------------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/images/capi/ansible/roles/containerd/tasks/main.yml b/images/capi/ansible/roles/containerd/tasks/main.yml index cd0368f0b8..a2238dab05 100644 --- a/images/capi/ansible/roles/containerd/tasks/main.yml +++ b/images/capi/ansible/roles/containerd/tasks/main.yml @@ -196,6 +196,19 @@ enabled: true state: restarted +- name: Symlink cri-tools + ansible.builtin.file: + src: /usr/local/bin/{{ item }} + dest: /usr/bin/{{ item }} + mode: "0755" + state: link + force: true + loop: + - ctr + - crictl + - critest + when: ansible_os_family != "Flatcar" + - name: Delete containerd tarball ansible.builtin.file: path: /tmp/{{ containerd_filename }} diff --git a/images/capi/ansible/roles/kubernetes/tasks/main.yml b/images/capi/ansible/roles/kubernetes/tasks/main.yml index 4bfe1e9111..bd14799d35 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/main.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/main.yml @@ -31,19 +31,6 @@ # as the cri-containerd tarball also includes crictl. - ansible.builtin.import_tasks: crictl-url.yml -- name: Symlink cri-tools - ansible.builtin.file: - src: /usr/local/bin/{{ item }} - dest: /usr/bin/{{ item }} - mode: "0755" - state: link - force: true - loop: - - ctr - - crictl - - critest - when: ansible_os_family != "Flatcar" - - name: Create kubelet default config file ansible.builtin.template: src: etc/sysconfig/kubelet From 990737c79abb37fd56faca64f46e5a385dd1a982 Mon Sep 17 00:00:00 2001 From: Andrej Bella Date: Thu, 29 Jan 2026 12:10:31 +0100 Subject: [PATCH 38/90] fix: handle missing secret-userdata.txt for MachinePool/ASG nodes --- .../cloudinit/sources/DataSourceEc2Kubernetes.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/images/capi/ansible/roles/providers/files/usr/lib/python3/dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py b/images/capi/ansible/roles/providers/files/usr/lib/python3/dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py index 28cebca9f8..97ba291516 100644 --- a/images/capi/ansible/roles/providers/files/usr/lib/python3/dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py +++ b/images/capi/ansible/roles/providers/files/usr/lib/python3/dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py @@ -84,6 +84,15 @@ def _get_data(self): ) LOG.info("User-data before update:[\n%s]", self.userdata_raw) secret_userdata = "/etc/secret-userdata.txt" + # Check if secret-userdata.txt exists (written by boothook for MachineDeployment/ControlPlane nodes) + # For MachinePool (ASG), this file won't exist as userdata is passed directly via EC2 metadata + if not os.path.exists(secret_userdata): + LOG.info( + "Secret userdata file %s not found. Something might have failed or this is a MachinePool/ASG node." + "Using original userdata from EC2 metadata.", + secret_userdata, + ) + return True # Get the boothook output, save it as user-data # TODO: work with upstream to put this somewhere more sensible like: # /var/lib/cloud/instances/{{v1.instance_id}}/ec2-kubernetes-userdata.txt From de8cc098b30f3b5d0a01657c24292039ffb8d588 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-Fran=C3=A7ois=20Bustarret?= Date: Thu, 29 Jan 2026 10:22:42 +0000 Subject: [PATCH 39/90] outscale: update base images to v2026-01-12 --- images/capi/packer/outscale/ubuntu-2204.json | 2 +- images/capi/packer/outscale/ubuntu-2404.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/images/capi/packer/outscale/ubuntu-2204.json b/images/capi/packer/outscale/ubuntu-2204.json index f6e97550a8..ac74b43b25 100644 --- a/images/capi/packer/outscale/ubuntu-2204.json +++ b/images/capi/packer/outscale/ubuntu-2204.json @@ -3,5 +3,5 @@ "distribution": "ubuntu", "distribution_release": "ubuntu", "distribution_version": "2204", - "image_name": "Ubuntu-22.04-2025-10-15" + "image_name": "Ubuntu-22.04-2026-01-12" } diff --git a/images/capi/packer/outscale/ubuntu-2404.json b/images/capi/packer/outscale/ubuntu-2404.json index 38469bdb18..43cd4337cd 100644 --- a/images/capi/packer/outscale/ubuntu-2404.json +++ b/images/capi/packer/outscale/ubuntu-2404.json @@ -3,5 +3,5 @@ "distribution": "ubuntu", "distribution_release": "ubuntu", "distribution_version": "2404", - "image_name": "Ubuntu-24.04-2025-10-15" + "image_name": "Ubuntu-24.04-2026-01-12" } From e669ea00f3546f4f1f853197627d5fef73779f30 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Thu, 29 Jan 2026 14:23:36 -0700 Subject: [PATCH 40/90] Remove windows-2019 from default Azure e2e --- docs/book/src/capi/providers/azure.md | 4 ++-- images/capi/azure_targets.sh | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/book/src/capi/providers/azure.md b/docs/book/src/capi/providers/azure.md index 9c3a736522..7a00ea4a9c 100644 --- a/docs/book/src/capi/providers/azure.md +++ b/docs/book/src/capi/providers/azure.md @@ -50,8 +50,8 @@ Confidential VMs require specific generation 2 OS images. The naming pattern of # Ubuntu 24.04 LTS for Confidential VMs make build-azure-sig-ubuntu-2404-cvm -# Windows 2019 with containerd for Confindential VMs -make build-azure-sig-windows-2019-containerd-cvm +# Windows 2022 with containerd for Confindential VMs +make build-azure-sig-windows-2022-containerd-cvm ``` ### Configuration diff --git a/images/capi/azure_targets.sh b/images/capi/azure_targets.sh index 88a428dad0..df40a78c2f 100644 --- a/images/capi/azure_targets.sh +++ b/images/capi/azure_targets.sh @@ -1,4 +1,4 @@ -VHD_TARGETS="ubuntu-2204 ubuntu-2404 azurelinux-3 rhel-8 windows-2019-containerd windows-2022-containerd" -SIG_TARGETS="ubuntu-2204 ubuntu-2404 azurelinux-3 rhel-8 windows-2019-containerd windows-2022-containerd windows-2025-containerd flatcar" +VHD_TARGETS="ubuntu-2204 ubuntu-2404 azurelinux-3 rhel-8 windows-2022-containerd" +SIG_TARGETS="ubuntu-2204 ubuntu-2404 azurelinux-3 rhel-8 windows-2022-containerd windows-2025-containerd flatcar" SIG_GEN2_TARGETS="ubuntu-2204 ubuntu-2404 azurelinux-3 flatcar" -SIG_CVM_TARGETS="ubuntu-2204 ubuntu-2404 windows-2019-containerd windows-2022-containerd" +SIG_CVM_TARGETS="ubuntu-2204 ubuntu-2404 windows-2022-containerd" From f15600fde621a3691e9b023836b9ceed4dfa7f23 Mon Sep 17 00:00:00 2001 From: Anshuman Date: Mon, 2 Feb 2026 13:18:58 +0530 Subject: [PATCH 41/90] Fixed arch code for ppc64le --- images/capi/ansible/node.yml | 2 +- images/capi/packer/powervs/packer.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/images/capi/ansible/node.yml b/images/capi/ansible/node.yml index ab79237e14..3018f4aea8 100644 --- a/images/capi/ansible/node.yml +++ b/images/capi/ansible/node.yml @@ -20,7 +20,7 @@ custom_role_names: "" system: linux arch_uname: "{{ ansible_architecture }}" - arch: "{{ 'amd64' if arch_uname in ['x86_64', 'amd64'] else 'arm64' if arch_uname in ['aarch64', 'arm64'] else 'unsupported' }}" + arch: "{{ {'x86_64': 'amd64', 'amd64': 'amd64', 'aarch64': 'arm64', 'arm64': 'arm64', 'ppc64le': 'ppc64le'}.get(arch_uname, 'unsupported') }}" tasks: - ansible.builtin.include_role: diff --git a/images/capi/packer/powervs/packer.json b/images/capi/packer/powervs/packer.json index fb1b779f91..5b87d8c5ac 100644 --- a/images/capi/packer/powervs/packer.json +++ b/images/capi/packer/powervs/packer.json @@ -74,7 +74,7 @@ "capture_cos_secret_key": "", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", - "containerd_service_url": "null", + "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, "dhcp_network": "false", From 2e2c0f3918ca67ae33f37535437c1a691df27e12 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Sat, 31 Jan 2026 00:09:58 +0000 Subject: [PATCH 42/90] Make py3_install more flexible --- images/capi/hack/utils.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/images/capi/hack/utils.sh b/images/capi/hack/utils.sh index df2db68397..05cfe0fb27 100755 --- a/images/capi/hack/utils.sh +++ b/images/capi/hack/utils.sh @@ -107,6 +107,9 @@ pip3_install() { ensure_py3 if output=$(pip3 install --disable-pip-version-check --user "${@}" 2>&1); then echo "$output" + elif [[ $output == *"Can not perform a '--user' install"* ]]; then + >&2 echo "warning: '--user' install failed, retrying pip3 install without --user" + pip3 install --disable-pip-version-check "${@}" elif [[ $output == *"error: externally-managed-environment"* ]]; then >&2 echo "warning: externally-managed-environment, retrying pip3 install with --break-system-packages" pip3 install --disable-pip-version-check --user --break-system-packages "${@}" From 30ac30c5f8dd18c527f184f79412d87128f8f389 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Mon, 2 Feb 2026 20:40:42 +0000 Subject: [PATCH 43/90] Bump ansible to v2.16.16 --- images/capi/hack/utils.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/images/capi/hack/utils.sh b/images/capi/hack/utils.sh index df2db68397..61b067022b 100755 --- a/images/capi/hack/utils.sh +++ b/images/capi/hack/utils.sh @@ -14,8 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Note: ansible-core v2.16.x requires Python >= 3.10. -_version_ansible_core="2.15.13" +# Note: ansible-core v2.16 supports Python 3.10-3.12. +_version_ansible_core="2.16.16" case "${OSTYPE}" in linux*) From e7b1d4271ef8a6a63c789423426e83ba8f5709c7 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Mon, 2 Feb 2026 20:24:52 +0000 Subject: [PATCH 44/90] Bump default k8s version to v1.34.3 --- images/capi/packer/config/kubernetes.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/images/capi/packer/config/kubernetes.json b/images/capi/packer/config/kubernetes.json index b6ab15580e..886ac748ea 100644 --- a/images/capi/packer/config/kubernetes.json +++ b/images/capi/packer/config/kubernetes.json @@ -1,20 +1,20 @@ { - "crictl_version": "1.32.0", + "crictl_version": "1.34.0", "kubeadm_template": "etc/kubeadm.yml", "kubernetes_apiserver_port": "6443", "kubernetes_container_registry": "registry.k8s.io", "kubernetes_deb_gpg_key": "https://pkgs.k8s.io/core:/stable:/{{ user `kubernetes_series` }}/deb/Release.key", "kubernetes_deb_repo": "https://pkgs.k8s.io/core:/stable:/{{ user `kubernetes_series` }}/deb/", - "kubernetes_deb_version": "1.32.4-1.1", + "kubernetes_deb_version": "1.34.3-1.1", "kubernetes_http_source": "https://dl.k8s.io/release", "kubernetes_load_additional_imgs": "false", "kubernetes_rpm_gpg_check": "True", "kubernetes_rpm_gpg_key": "https://pkgs.k8s.io/core:/stable:/{{ user `kubernetes_series` }}/rpm/repodata/repomd.xml.key", "kubernetes_rpm_repo": "https://pkgs.k8s.io/core:/stable:/{{ user `kubernetes_series` }}/rpm/", "kubernetes_rpm_repo_arch": "x86_64", - "kubernetes_rpm_version": "1.32.4", - "kubernetes_semver": "v1.32.4", - "kubernetes_series": "v1.32", + "kubernetes_rpm_version": "1.34.3", + "kubernetes_semver": "v1.34.3", + "kubernetes_series": "v1.34", "kubernetes_source_type": "pkg", "systemd_prefix": "/usr/lib/systemd", "sysusr_prefix": "/usr", From b7ce84f6d16a8c5ae4318acb0e37487f265eaf8f Mon Sep 17 00:00:00 2001 From: ffais Date: Wed, 4 Feb 2026 12:53:39 +0100 Subject: [PATCH 45/90] fix: move extra_kernel_boot_params default variable from setup to sysprep role Signed-off-by: ffais --- images/capi/ansible/roles/setup/defaults/main.yml | 1 - images/capi/ansible/roles/sysprep/defaults/main.yml | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/images/capi/ansible/roles/setup/defaults/main.yml b/images/capi/ansible/roles/setup/defaults/main.yml index 4756226d5d..8432b7868d 100644 --- a/images/capi/ansible/roles/setup/defaults/main.yml +++ b/images/capi/ansible/roles/setup/defaults/main.yml @@ -19,7 +19,6 @@ redhat_epel_rpm: https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noa epel_rpm_gpg_key: https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7 rpms: "" extra_rpms: "" -extra_kernel_boot_params: "" disable_public_repos: false external_binary_path: "{{ '/opt/bin' if ansible_os_family == 'Flatcar' else '/usr/local/bin' }}" diff --git a/images/capi/ansible/roles/sysprep/defaults/main.yml b/images/capi/ansible/roles/sysprep/defaults/main.yml index 2babaa9807..33ac3fa677 100644 --- a/images/capi/ansible/roles/sysprep/defaults/main.yml +++ b/images/capi/ansible/roles/sysprep/defaults/main.yml @@ -17,3 +17,4 @@ pip_conf_file: "" remove_extra_repos: false flatcar_disable_autologin: false sysprep_require_grub_file: true +extra_kernel_boot_params: "" From 0ed6c7c5b739fe1a5189aab9aedc832e7b195f0f Mon Sep 17 00:00:00 2001 From: zylxjtu Date: Wed, 11 Feb 2026 17:35:08 +0000 Subject: [PATCH 46/90] Apply regkey for windows 2025 Windows 2025 tetst grid is not stable, due to some os level network related issue, this is to enable the feature/fix for it --- .../ansible/windows/roles/systemprep/tasks/main.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/images/capi/ansible/windows/roles/systemprep/tasks/main.yml b/images/capi/ansible/windows/roles/systemprep/tasks/main.yml index 4e27026869..0f1b8d924a 100644 --- a/images/capi/ansible/windows/roles/systemprep/tasks/main.yml +++ b/images/capi/ansible/windows/roles/systemprep/tasks/main.yml @@ -133,6 +133,16 @@ type: dword when: distribution_version == "2022" +# VPF changes to reduce lock contention +- name: Apply networking fix for Windows 2025 + ansible.windows.win_regedit: + path: HKLM:\SYSTEM\CurrentControlSet\Policies\Microsoft\FeatureManagement\Overrides + state: present + name: 520997518 + data: 1 + type: dword + when: distribution_version == "2025" + # Apply HNS flags for fixes that need to be enabled via Registry # these eventually get turned on automatically and can be removed in future releases - name: Apply HNS control Flags 0x40 and 0x10 in 2022-11B patches From 08f80d28894369fe5decc636efcf9e1170eac05a Mon Sep 17 00:00:00 2001 From: sivchari Date: Thu, 12 Feb 2026 15:36:31 +0900 Subject: [PATCH 47/90] feat: add SHA256 checksum verification for Kubernetes binaries and images Add checksum verification when downloading Kubernetes binaries (kubeadm, kubectl, kubelet) and container images via URL. This resolves the TODO comment that has been present since 2019. The implementation follows the same pattern used for CNI plugin downloads, using the .sha256 files provided by dl.k8s.io for integrity verification. Verified that .sha256 files exist for all supported architectures: - amd64 - arm64 - ppc64le --- images/capi/ansible/roles/kubernetes/tasks/url.yml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/images/capi/ansible/roles/kubernetes/tasks/url.yml b/images/capi/ansible/roles/kubernetes/tasks/url.yml index afcd5f0a19..c8407863ab 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/url.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/url.yml @@ -43,10 +43,7 @@ - name: Download Kubernetes binaries ansible.builtin.get_url: url: "{{ kubernetes_http_source }}/{{ kubernetes_semver }}/bin/linux/{{ kubernetes_resolved_arch }}/{{ item }}" - # TODO(akutz) Write a script to separately download the checksum - # and verify the associated file using the correct - # checksum file format - # checksum: "sha1:{{ kubernetes_http_source }}/bin/linux/amd64/{{ item }}.sha1" + checksum: "sha256:{{ kubernetes_http_source }}/{{ kubernetes_semver }}/bin/linux/{{ kubernetes_resolved_arch }}/{{ item }}.sha256" dest: "{{ sysusr_prefix }}/bin/{{ item }}" mode: "0755" owner: root @@ -56,10 +53,7 @@ - name: Download Kubernetes images ansible.builtin.get_url: url: "{{ kubernetes_http_source }}/{{ kubernetes_semver }}/bin/linux/{{ kubernetes_resolved_arch }}/{{ item }}" - # TODO(akutz) Write a script to separately download the checksum - # and verify the associated file using the correct - # checksum file format - # checksum: "sha1:{{ kubernetes_http_source }}/bin/linux/amd64/{{ item }}.sha1" + checksum: "sha256:{{ kubernetes_http_source }}/{{ kubernetes_semver }}/bin/linux/{{ kubernetes_resolved_arch }}/{{ item }}.sha256" dest: /tmp/{{ item }} mode: "0600" loop: "{{ kubernetes_imgs }}" From 5e8dd766d547d5f812a1d456b2a4ddbdc9ca365f Mon Sep 17 00:00:00 2001 From: sivchari Date: Fri, 13 Feb 2026 11:19:25 +0900 Subject: [PATCH 48/90] fix: validate only configured wasm shims in goss tests Previously, if containerd_wasm_shims_runtimes was set to any non-empty value (e.g., "spin,slight"), goss would validate all 4 shims (lunatic, slight, spin, wws), causing failures when only a subset was installed. This change validates each shim individually based on whether it appears in the containerd_wasm_shims_runtimes variable. Fixes #1664 --- images/capi/packer/goss/goss-command.yaml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/images/capi/packer/goss/goss-command.yaml b/images/capi/packer/goss/goss-command.yaml index 1fa9971982..2bd64e54ed 100644 --- a/images/capi/packer/goss/goss-command.yaml +++ b/images/capi/packer/goss/goss-command.yaml @@ -10,27 +10,35 @@ command: stdout: [] stderr: [] timeout: 0 -{{if ne .Vars.containerd_wasm_shims_runtimes ""}} +{{if contains "lunatic" .Vars.containerd_wasm_shims_runtimes}} containerd-shim-lunatic-v1: exit-status: 1 stdout: [ ] stderr: ["io.containerd.lunatic.v1: InvalidArgument(\"Shim namespace cannot be empty\")"] timeout: 0 +{{end}} +{{if contains "slight" .Vars.containerd_wasm_shims_runtimes}} containerd-shim-slight-v1: exit-status: 1 stdout: [ ] stderr: ["io.containerd.slight.v1: InvalidArgument(\"Shim namespace cannot be empty\")"] timeout: 0 +{{end}} +{{if contains "spin" .Vars.containerd_wasm_shims_runtimes}} containerd-shim-spin-v2: exit-status: 1 stdout: [ ] stderr: ["io.containerd.spin.v2: InvalidArgument(\"Shim namespace cannot be empty\")"] timeout: 0 +{{end}} +{{if contains "wws" .Vars.containerd_wasm_shims_runtimes}} containerd-shim-wws-v1: exit-status: 1 stdout: [ ] stderr: ["io.containerd.wws.v1: InvalidArgument(\"Shim namespace cannot be empty\")"] timeout: 0 +{{end}} +{{if ne .Vars.containerd_wasm_shims_runtimes ""}} grep -E 'io\.containerd\.(lunatic|slight|spin|wws)\.v' /etc/containerd/config.toml: exit-status: 0 stdout: [ ] From 82668248c325cfa5ec6a5aaa64c745c3d323d36b Mon Sep 17 00:00:00 2001 From: sivchari Date: Fri, 13 Feb 2026 11:28:59 +0900 Subject: [PATCH 49/90] increase azure disk size Signed-off-by: sivchari --- images/capi/packer/azure/azurelinux-3-gen2.json | 3 ++- images/capi/packer/azure/azurelinux-3.json | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/images/capi/packer/azure/azurelinux-3-gen2.json b/images/capi/packer/azure/azurelinux-3-gen2.json index e10bf2671f..217f4d145d 100644 --- a/images/capi/packer/azure/azurelinux-3-gen2.json +++ b/images/capi/packer/azure/azurelinux-3-gen2.json @@ -5,5 +5,6 @@ "distribution_version": "3", "image_offer": "azure-linux-3", "image_publisher": "MicrosoftCBLMariner", - "image_sku": "azure-linux-3-gen2" + "image_sku": "azure-linux-3-gen2", + "os_disk_size_gb": "20" } diff --git a/images/capi/packer/azure/azurelinux-3.json b/images/capi/packer/azure/azurelinux-3.json index bafd90c669..f8dfd3752b 100644 --- a/images/capi/packer/azure/azurelinux-3.json +++ b/images/capi/packer/azure/azurelinux-3.json @@ -5,5 +5,6 @@ "distribution_version": "3", "image_offer": "azure-linux-3", "image_publisher": "MicrosoftCBLMariner", - "image_sku": "azure-linux-3" + "image_sku": "azure-linux-3", + "os_disk_size_gb": "20" } From 34268ade3646a8a6b5afbc372955e2ae0b6cf161 Mon Sep 17 00:00:00 2001 From: Hugo Prudente Date: Fri, 13 Feb 2026 07:10:19 +0000 Subject: [PATCH 50/90] Upgrade CentOS 9 Stream base image on qemu to newer version. Old CentOS image is not available anymore on the repository --- images/capi/packer/qemu/qemu-centos-9.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/images/capi/packer/qemu/qemu-centos-9.json b/images/capi/packer/qemu/qemu-centos-9.json index 1a0a0b6b5b..a68e041ae7 100644 --- a/images/capi/packer/qemu/qemu-centos-9.json +++ b/images/capi/packer/qemu/qemu-centos-9.json @@ -10,9 +10,9 @@ "distro_version": "9", "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-9", "guest_os_type": "centos9-64", - "iso_checksum": "01126d2baac31f520e5b6f20ef0a2d8f2de26c8ffdebbe3ddd0eea99f2c7a765", + "iso_checksum": "2282a8ea8b98188d30958b2274548394cc854e0eb64d25abefc65b1d44e2aebf", "iso_checksum_type": "sha256", - "iso_url": "https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/iso/CentOS-Stream-9-20240304.0-x86_64-dvd1.iso", + "iso_url": "https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/iso/CentOS-Stream-9-20260209.0-x86_64-dvd1.iso", "os_display_name": "CentOS 9 Stream", "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm", "shutdown_command": "/sbin/halt -h -p" From 8354613f4e33dfac91e927772df51bbfa4eca485 Mon Sep 17 00:00:00 2001 From: Hamadi Bettahar Date: Thu, 19 Feb 2026 10:15:16 +0100 Subject: [PATCH 51/90] fix: goss validation openstack flatcar --- images/capi/packer/goss/goss-vars.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/images/capi/packer/goss/goss-vars.yaml b/images/capi/packer/goss/goss-vars.yaml index 9dce084995..ad216478e3 100644 --- a/images/capi/packer/goss/goss-vars.yaml +++ b/images/capi/packer/goss/goss-vars.yaml @@ -190,6 +190,8 @@ flatcar: command: hcloud: command: + openstack: + command: photon: common-service: apparmor: From 3bfb49b6e3ee4184a453cac2e3e118a24959a00e Mon Sep 17 00:00:00 2001 From: Drew Hudson-Viles Date: Wed, 25 Feb 2026 17:44:59 +0000 Subject: [PATCH 52/90] chore: adding overrides for cpu and machine type on qemu --- images/capi/packer/qemu/packer.json.tmpl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/images/capi/packer/qemu/packer.json.tmpl b/images/capi/packer/qemu/packer.json.tmpl index 7f7dacfaa1..31434039fa 100644 --- a/images/capi/packer/qemu/packer.json.tmpl +++ b/images/capi/packer/qemu/packer.json.tmpl @@ -12,7 +12,7 @@ "{{user `cd_files`}}" ], "cd_label": "cidata", - "cpu_model": "host", + "cpu_model": "{{user `cpu_model`}}", "cpus": "{{user `cpus`}}", "disk_compression": "{{ user `disk_compression`}}", "disk_discard": "{{user `disk_discard`}}", @@ -25,6 +25,7 @@ "http_directory": "{{user `http_directory`}}", "iso_checksum": "{{user `iso_checksum_type`}}:{{user `iso_checksum`}}", "iso_url": "{{user `iso_url`}}", + "machine_type": "{{user `machine_type`}}", "memory": "{{user `memory`}}", "net_device": "virtio-net", "output_directory": "{{user `output_directory`}}", @@ -182,6 +183,7 @@ "build_timestamp": "{{timestamp}}", "cd_files": "linux/base/*.nothing", "containerd_version": null, + "cpu_model": "host", "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "runc_version": null, "cpus": "1", @@ -213,6 +215,7 @@ "kubernetes_series": null, "kubernetes_source_type": null, "machine_id_mode": "444", + "machine_type": "pc", "memory": "2048", "oem_id": "", "output_directory": "./output/{{user `build_name`}}-kube-{{user `kubernetes_semver`}}", From 03f7da0ff7347b7e141fae54c53ebc96f255ee78 Mon Sep 17 00:00:00 2001 From: ffais Date: Thu, 26 Feb 2026 15:21:43 +0100 Subject: [PATCH 53/90] Bump ansible-lint to 25.2.0 Signed-off-by: ffais --- images/capi/Makefile | 8 ++++---- images/capi/hack/ensure-ansible-lint.sh | 2 +- images/capi/hack/ensure-ansible.sh | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/images/capi/Makefile b/images/capi/Makefile index 7dec9c232f..3e0d4b7f5e 100644 --- a/images/capi/Makefile +++ b/images/capi/Makefile @@ -212,7 +212,7 @@ IMAGE_NAME ?= cluster-node-image-builder CONTROLLER_IMG ?= $(REGISTRY)/$(IMAGE_NAME) TAG ?= dev ARCH ?= amd64 -BASE_IMAGE ?= docker.io/library/ubuntu:jammy +BASE_IMAGE ?= docker.io/library/ubuntu:24.04 BUILDKIT_SYNTAX ?= docker/dockerfile:1.14 ## -------------------------------------- @@ -1055,19 +1055,19 @@ validate-all: ## Validates the Packer config for all build targets lint: ## Runs linters on image-builder code sh_files = $(shell find . -type f -name "*.sh") lint: deps-lint - ansible-lint ansible/ + ansible-lint --project-dir . ansible/ # ignore error code since shellcheck exits with Error 1 if problems are found despite running properly -@for f in $(sh_files); do (shellcheck -x $$f); done .PHONY: lint-fix lint-fix: ## Runs linters on image-builder code and fixes issues lint-fix: deps-lint - ansible-lint --fix=all ansible/ + ansible-lint --fix=all --project-dir . ansible/ .PHONY: lint-ignore lint-ignore: ## Runs linters on image-builder code and creates an "ignore" file lint-ignore: deps-lint - ansible-lint --generate-ignore ansible/ + ansible-lint --generate-ignore --project-dir . ansible/ ## -------------------------------------- ## Clean targets diff --git a/images/capi/hack/ensure-ansible-lint.sh b/images/capi/hack/ensure-ansible-lint.sh index a876a64c37..b194c29e13 100755 --- a/images/capi/hack/ensure-ansible-lint.sh +++ b/images/capi/hack/ensure-ansible-lint.sh @@ -22,7 +22,7 @@ set -o pipefail source hack/utils.sh -_version="6.21.1" +_version="25.2.0" # Change directories to the parent directory of the one in which this # script is located. diff --git a/images/capi/hack/ensure-ansible.sh b/images/capi/hack/ensure-ansible.sh index 64d33a0fcc..f767f68019 100755 --- a/images/capi/hack/ensure-ansible.sh +++ b/images/capi/hack/ensure-ansible.sh @@ -49,7 +49,7 @@ fi echo ${ansible_version[*]} ansible-galaxy collection install \ - 'community.general:<12.0.0' \ - ansible.posix \ + 'community.general:<=12.0.0' \ + 'ansible.posix' \ 'ansible.windows:>=1.7.0' \ community.windows From cc3cebf6505c18e31a8f3836e1590bd1a1036c44 Mon Sep 17 00:00:00 2001 From: "Sriraman S." Date: Fri, 27 Feb 2026 12:35:19 +0000 Subject: [PATCH 54/90] =?UTF-8?q?fix:=20=F0=9F=90=9B=20Update=20packer=20v?= =?UTF-8?q?sphere=20plugin=20location=20for=20ova?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- images/capi/packer/ova/config.pkr.hcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/capi/packer/ova/config.pkr.hcl b/images/capi/packer/ova/config.pkr.hcl index 2595b89e71..d1b4dd6c35 100644 --- a/images/capi/packer/ova/config.pkr.hcl +++ b/images/capi/packer/ova/config.pkr.hcl @@ -3,7 +3,7 @@ packer { required_plugins { vsphere = { version = ">= 1.4.2" - source = "github.com/hashicorp/vsphere" + source = "github.com/vmware/vsphere" } } } From 02c1f30c6dc60dd4a77c06733291dbdabf6b4d43 Mon Sep 17 00:00:00 2001 From: Victor Sartori Date: Mon, 9 Mar 2026 09:55:52 -0300 Subject: [PATCH 55/90] Update Ubuntu ISO images from 24.04.3 to 24.04.4 --- images/capi/packer/maas/maas-ubuntu-2404-efi.json | 4 ++-- images/capi/packer/ova/ubuntu-2404-efi.json | 4 ++-- images/capi/packer/ova/ubuntu-2404.json | 4 ++-- images/capi/packer/proxmox/ubuntu-2404-efi.json | 4 ++-- images/capi/packer/proxmox/ubuntu-2404.json | 4 ++-- images/capi/packer/qemu/qemu-ubuntu-2404-efi.json | 4 ++-- images/capi/packer/qemu/qemu-ubuntu-2404.json | 4 ++-- images/capi/packer/raw/raw-ubuntu-2404-efi.json | 4 ++-- images/capi/packer/raw/raw-ubuntu-2404.json | 4 ++-- 9 files changed, 18 insertions(+), 18 deletions(-) diff --git a/images/capi/packer/maas/maas-ubuntu-2404-efi.json b/images/capi/packer/maas/maas-ubuntu-2404-efi.json index de66ad7ec9..c57b05fecc 100644 --- a/images/capi/packer/maas/maas-ubuntu-2404-efi.json +++ b/images/capi/packer/maas/maas-ubuntu-2404-efi.json @@ -5,9 +5,9 @@ "distro_name": "ubuntu", "firmware": "OVMF.fd", "guest_os_type": "ubuntu-64", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", - "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "shutdown_command": "shutdown -P now", "unmount_iso": "true" diff --git a/images/capi/packer/ova/ubuntu-2404-efi.json b/images/capi/packer/ova/ubuntu-2404-efi.json index c6ace6cefb..a75d8c322f 100644 --- a/images/capi/packer/ova/ubuntu-2404-efi.json +++ b/images/capi/packer/ova/ubuntu-2404-efi.json @@ -10,9 +10,9 @@ "firmware": "efi", "floppy_dirs": "", "guest_os_type": "ubuntu-64", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", - "iso_url": "https://releases.ubuntu.com/noble/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/noble/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "shutdown_command": "shutdown -P now", "vsphere_guest_os_type": "ubuntu64Guest" diff --git a/images/capi/packer/ova/ubuntu-2404.json b/images/capi/packer/ova/ubuntu-2404.json index 17c3c9a4c5..1af3a9af96 100644 --- a/images/capi/packer/ova/ubuntu-2404.json +++ b/images/capi/packer/ova/ubuntu-2404.json @@ -9,9 +9,9 @@ "distro_version": "24.04", "floppy_dirs": "", "guest_os_type": "ubuntu-64", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", - "iso_url": "https://releases.ubuntu.com/noble/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/noble/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "shutdown_command": "shutdown -P now", "vsphere_guest_os_type": "ubuntu64Guest" diff --git a/images/capi/packer/proxmox/ubuntu-2404-efi.json b/images/capi/packer/proxmox/ubuntu-2404-efi.json index 065623fd6a..c5cac85d92 100644 --- a/images/capi/packer/proxmox/ubuntu-2404-efi.json +++ b/images/capi/packer/proxmox/ubuntu-2404-efi.json @@ -4,10 +4,10 @@ "build_name": "ubuntu-2404-efi", "distribution_version": "2404", "distro_name": "ubuntu", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", "iso_file": "{{env `ISO_FILE`}}", - "iso_url": "https://releases.ubuntu.com/noble/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/noble/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "unmount_iso": "true", "version": "24.04" diff --git a/images/capi/packer/proxmox/ubuntu-2404.json b/images/capi/packer/proxmox/ubuntu-2404.json index 1ca5bca0f4..ed1196d446 100644 --- a/images/capi/packer/proxmox/ubuntu-2404.json +++ b/images/capi/packer/proxmox/ubuntu-2404.json @@ -3,10 +3,10 @@ "build_name": "ubuntu-2404", "distribution_version": "2404", "distro_name": "ubuntu", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", "iso_file": "{{env `ISO_FILE`}}", - "iso_url": "https://releases.ubuntu.com/noble/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/noble/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "source_image": "ubuntu-20-04-x64", "unmount_iso": "true", diff --git a/images/capi/packer/qemu/qemu-ubuntu-2404-efi.json b/images/capi/packer/qemu/qemu-ubuntu-2404-efi.json index de66ad7ec9..c57b05fecc 100644 --- a/images/capi/packer/qemu/qemu-ubuntu-2404-efi.json +++ b/images/capi/packer/qemu/qemu-ubuntu-2404-efi.json @@ -5,9 +5,9 @@ "distro_name": "ubuntu", "firmware": "OVMF.fd", "guest_os_type": "ubuntu-64", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", - "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "shutdown_command": "shutdown -P now", "unmount_iso": "true" diff --git a/images/capi/packer/qemu/qemu-ubuntu-2404.json b/images/capi/packer/qemu/qemu-ubuntu-2404.json index e5d24835c5..9ef9e8d198 100644 --- a/images/capi/packer/qemu/qemu-ubuntu-2404.json +++ b/images/capi/packer/qemu/qemu-ubuntu-2404.json @@ -4,9 +4,9 @@ "distribution_version": "2404", "distro_name": "ubuntu", "guest_os_type": "ubuntu-64", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", - "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "shutdown_command": "shutdown -P now", "unmount_iso": "true" diff --git a/images/capi/packer/raw/raw-ubuntu-2404-efi.json b/images/capi/packer/raw/raw-ubuntu-2404-efi.json index 9851777f1f..2c4cdf6d04 100644 --- a/images/capi/packer/raw/raw-ubuntu-2404-efi.json +++ b/images/capi/packer/raw/raw-ubuntu-2404-efi.json @@ -8,9 +8,9 @@ "distro_version_slug": "2404", "firmware": "OVMF.fd", "guest_os_type": "ubuntu-64", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", - "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "shutdown_command": "shutdown -P now", "unmount_iso": "true" diff --git a/images/capi/packer/raw/raw-ubuntu-2404.json b/images/capi/packer/raw/raw-ubuntu-2404.json index b376441745..529aea8e56 100644 --- a/images/capi/packer/raw/raw-ubuntu-2404.json +++ b/images/capi/packer/raw/raw-ubuntu-2404.json @@ -7,9 +7,9 @@ "distro_version": "24.04", "distro_version_slug": "2404", "guest_os_type": "ubuntu-64", - "iso_checksum": "c3514bf0056180d09376462a7a1b4f213c1d6e8ea67fae5c25099c6fd3d8274b", + "iso_checksum": "e907d92eeec9df64163a7e454cbc8d7755e8ddc7ed42f99dbc80c40f1a138433", "iso_checksum_type": "sha256", - "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.3-live-server-amd64.iso", + "iso_url": "https://releases.ubuntu.com/releases/24.04/ubuntu-24.04.4-live-server-amd64.iso", "os_display_name": "Ubuntu 24.04", "shutdown_command": "shutdown -P now", "unmount_iso": "true" From 2ff67f10b4738c3169087f58a3a6a30f12414edc Mon Sep 17 00:00:00 2001 From: Agustina Barbetta Date: Tue, 10 Mar 2026 07:21:14 -0300 Subject: [PATCH 56/90] Add AlmaLinux 9 OVA build (#1946) * Add AlmaLinux 9 OVA build * Add AlmaLinux 9 to the OS matrix in README * Sort Provider/OS table alphabetically * Fix guest OS type for AlmaLinux 9 --- README.md | 1 + images/capi/Makefile | 11 +++ images/capi/packer/goss/goss-vars.yaml | 12 +++ images/capi/packer/ova/almalinux-9.json | 21 ++++ .../ova/linux/almalinux/http/9/ks.cfg.tmpl | 97 +++++++++++++++++++ 5 files changed, 142 insertions(+) create mode 100644 images/capi/packer/ova/almalinux-9.json create mode 100644 images/capi/packer/ova/linux/almalinux/http/9/ks.cfg.tmpl diff --git a/README.md b/README.md index 5493ff670a..e83b26fff8 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,7 @@ The table below shows the currently provided operating systems for each provider | OS | ami | azure | digitalocean | gce | hcloud | huaweicloud | maas | nutanix | oci | openstack | outscale | ova | powervs | proxmox | qemu | raw | scaleway | vultr | |-------------------|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----| +| AlmaLinux 9 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | Amazon Linux 2 | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | Amazon Linux 2023 | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | Azure Linux 3 | ❌ | 💙 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | diff --git a/images/capi/Makefile b/images/capi/Makefile index 7dec9c232f..a87d440b9b 100644 --- a/images/capi/Makefile +++ b/images/capi/Makefile @@ -341,6 +341,7 @@ FLATCAR_VERSIONS := flatcar PHOTON_VERSIONS := photon-4 photon-5 RHEL_VERSIONS := rhel-8 rhel-9 ROCKYLINUX_VERSIONS := rockylinux-8 rockylinux-9 +ALMALINUX_VERSIONS := almalinux-9 UBUNTU_VERSIONS := ubuntu-2204 ubuntu-2204-efi ubuntu-2404 ubuntu-2404-efi WINDOWS_VERSIONS := windows-2019 windows-2019-efi windows-2022 windows-2022-efi @@ -356,6 +357,7 @@ export FLATCAR_CHANNEL FLATCAR_VERSION PLATFORMS_AND_VERSIONS := $(PHOTON_VERSIONS) \ $(RHEL_VERSIONS) \ $(ROCKYLINUX_VERSIONS) \ + $(ALMALINUX_VERSIONS) \ $(UBUNTU_VERSIONS) \ $(FLATCAR_VERSIONS) \ $(WINDOWS_VERSIONS) @@ -750,6 +752,7 @@ build-node-ova-local-rhel-8: ## Builds RHEL 8 Node OVA w local hypervisor build-node-ova-local-rhel-9: ## Builds RHEL 9 Node OVA w local hypervisor build-node-ova-local-rockylinux-8: ## Builds RockyLinux 8 Node OVA w local hypervisor build-node-ova-local-rockylinux-9: ## Builds RockyLinux 9 Node OVA w local hypervisor +build-node-ova-local-almalinux-9: ## Builds AlmaLinux 9 Node OVA w local hypervisor build-node-ova-local-windows-2019: ## Builds for Windows Server 2019 Node OVA w local hypervisor build-node-ova-local-all: $(NODE_OVA_LOCAL_BUILD_TARGETS) ## Builds all Node OVAs w local hypervisor @@ -760,6 +763,7 @@ build-node-ova-vsphere-rhel-8: ## Builds RHEL 8 Node OVA and template on vSphere build-node-ova-vsphere-rhel-9: ## Builds RHEL 9 Node OVA and template on vSphere build-node-ova-vsphere-rockylinux-8: ## Builds RockyLinux 8 Node OVA and template on vSphere build-node-ova-vsphere-rockylinux-9: ## Builds RockyLinux 9 Node OVA and template on vSphere +build-node-ova-vsphere-almalinux-9: ## Builds AlmaLinux 9 Node OVA and template on vSphere build-node-ova-vsphere-ubuntu-2204: ## Builds Ubuntu 22.04 Node OVA and template on vSphere build-node-ova-vsphere-ubuntu-2204-efi: ## Builds Ubuntu 22.04 Node OVA and template on vSphere that EFI boots build-node-ova-vsphere-ubuntu-2404: ## Builds Ubuntu 24.04 Node OVA and template on vSphere @@ -776,6 +780,7 @@ build-node-ova-vsphere-clone-rhel-8: ## Builds RHEL 8 Node OVA and template on v build-node-ova-vsphere-clone-rhel-9: ## Builds RHEL 9 Node OVA and template on vSphere build-node-ova-vsphere-clone-rockylinux-8: ## Builds RockyLinux 8 Node OVA and template on vSphere build-node-ova-vsphere-clone-rockylinux-9: ## Builds RockyLinux 9 Node OVA and template on vSphere +build-node-ova-vsphere-clone-almalinux-9: ## Builds AlmaLinux 9 Node OVA and template on vSphere build-node-ova-vsphere-clone-ubuntu-2204: ## Builds Ubuntu 22.04 Node OVA and template on vSphere build-node-ova-vsphere-clone-ubuntu-2204-efi: ## ## Builds Ubuntu 22.04 Node OVA and template on vSphere that EFI boots build-node-ova-vsphere-clone-ubuntu-2404: ## Builds Ubuntu 24.04 Node OVA and template on vSphere @@ -788,6 +793,7 @@ build-node-ova-vsphere-base-rhel-8: ## Builds base RHEL 8 Node OVA and template build-node-ova-vsphere-base-rhel-9: ## Builds base RHEL 9 Node OVA and template on vSphere build-node-ova-vsphere-base-rockylinux-8: ## Builds base RockyLinux 8 Node OVA and template on vSphere build-node-ova-vsphere-base-rockylinux-9: ## Builds base RockyLinux 9 Node OVA and template on vSphere +build-node-ova-vsphere-base-almalinux-9: ## Builds base AlmaLinux 9 Node OVA and template on vSphere build-node-ova-vsphere-base-ubuntu-2204: ## Builds base Ubuntu 22.04 Node OVA and template on vSphere build-node-ova-vsphere-base-ubuntu-2204-efi: ## Builds Ubuntu 22.04 Node OVA and template on vSphere that EFI boots build-node-ova-vsphere-base-ubuntu-2404: ## Builds base Ubuntu 24.04 Node OVA and template on vSphere @@ -800,6 +806,7 @@ build-node-ova-local-vmx-rhel-8: ## Builds RHEL 8 Node OVA from VMX file w local build-node-ova-local-vmx-rhel-9: ## Builds RHEL 9 Node OVA from VMX file w local hypervisor build-node-ova-local-vmx-rockylinux-8: ## Builds RockyLinux 8 Node OVA from VMX file w local hypervisor build-node-ova-local-vmx-rockylinux-9: ## Builds RockyLinux 9 Node OVA from VMX file w local hypervisor +build-node-ova-local-vmx-almalinux-9: ## Builds AlmaLinux 9 Node OVA from VMX file w local hypervisor build-node-ova-local-base-photon-4: ## Builds Photon 4 Base Node OVA w local hypervisor build-node-ova-local-base-photon-5: ## Builds Photon 5 Base Node OVA w local hypervisor @@ -807,6 +814,7 @@ build-node-ova-local-base-rhel-8: ## Builds RHEL 8 Base Node OVA w local hypervi build-node-ova-local-base-rhel-9: ## Builds RHEL 9 Base Node OVA w local hypervisor build-node-ova-local-base-rockylinux-8: ## Builds RockyLinux 8 Base Node OVA w local hypervisor build-node-ova-local-base-rockylinux-9: ## Builds RockyLinux 9 Base Node OVA w local hypervisor +build-node-ova-local-base-almalinux-9: ## Builds AlmaLinux 9 Base Node OVA w local hypervisor build-openstack-ubuntu-2204: ## Builds Ubuntu 22.04 OpenStack image build-openstack-ubuntu-2404: ## Builds Ubuntu 24.04 OpenStack image @@ -945,6 +953,7 @@ validate-node-ova-local-rhel-8: ## Validates RHEL 8 Node OVA Packer config w loc validate-node-ova-local-rhel-9: ## Validates RHEL 9 Node OVA Packer config w local hypervisor validate-node-ova-local-rockylinux-8: ## Validates RockyLinux 8 Node OVA Packer config w local hypervisor validate-node-ova-local-rockylinux-9: ## Validates RockyLinux 9 Node OVA Packer config w local hypervisor +validate-node-ova-local-almalinux-9: ## Validates AlmaLinux 9 Node OVA Packer config w local hypervisor validate-node-ova-local-ubuntu-2204: ## Validates Ubuntu 22.04 Node OVA Packer config w local hypervisor validate-node-ova-local-ubuntu-2404: ## Validates Ubuntu 24.04 Node OVA Packer config w local hypervisor validate-node-ova-local-windows-2019: ## Validates Windows Server 2019 Node OVA Packer config w local hypervisor @@ -957,6 +966,7 @@ validate-node-ova-local-vmx-rhel-8: ## Validates RHEL 8 Node OVA from VMX file w validate-node-ova-local-vmx-rhel-9: ## Validates RHEL 9 Node OVA from VMX file w local hypervisor validate-node-ova-local-vmx-rockylinux-8: ## Validates RockyLinux 8 Node OVA from VMX file w local hypervisor validate-node-ova-local-vmx-rockylinux-9: ## Validates RockyLinux 9 Node OVA from VMX file w local hypervisor +validate-node-ova-local-vmx-almalinux-9: ## Validates AlmaLinux 9 Node OVA from VMX file w local hypervisor validate-node-ova-local-vmx-ubuntu-2204: ## Validates Ubuntu 22.04 Node OVA from VMX file w local hypervisor validate-node-ova-local-vmx-ubuntu-2404: ## Validates Ubuntu 24.04 Node OVA from VMX file w local hypervisor @@ -966,6 +976,7 @@ validate-node-ova-local-base-rhel-8: ## Validates RHEL 8 Base Node OVA w local h validate-node-ova-local-base-rhel-9: ## Validates RHEL 9 Base Node OVA w local hypervisor validate-node-ova-local-base-rockylinux-8: ## Validates RockyLinux 8 Base Node OVA w local hypervisor validate-node-ova-local-base-rockylinux-9: ## Validates RockyLinux 9 Base Node OVA w local hypervisor +validate-node-ova-local-base-almalinux-9: ## Validates AlmaLinux 9 Base Node OVA w local hypervisor validate-node-ova-local-base-ubuntu-2204: ## Validates Ubuntu 22.04 Base Node OVA w local hypervisor validate-node-ova-local-base-ubuntu-2404: ## Validates Ubuntu 24.04 Base Node OVA w local hypervisor diff --git a/images/capi/packer/goss/goss-vars.yaml b/images/capi/packer/goss/goss-vars.yaml index ad216478e3..85c406031a 100644 --- a/images/capi/packer/goss/goss-vars.yaml +++ b/images/capi/packer/goss/goss-vars.yaml @@ -171,6 +171,18 @@ centos: cloud-init: cloud-utils-growpart: python2-pip: +almalinux: + common-package: *common_rpms + ova: + package: + open-vm-tools: + os_version: + - distro_version: "8" + package: + <<: *rh8_rpms + - distro_version: "9" + package: + <<: *rh9_rpms flatcar: common-service: systemd-timesyncd: diff --git a/images/capi/packer/ova/almalinux-9.json b/images/capi/packer/ova/almalinux-9.json new file mode 100644 index 0000000000..4787a8765c --- /dev/null +++ b/images/capi/packer/ova/almalinux-9.json @@ -0,0 +1,21 @@ +{ + "boot_command_prefix": " inst.stage2=hd:LABEL=AlmaLinux-9-4-x86_64-dvd inst.repo=cdrom inst.text inst.ks=cdrom:/ks.cfg rd.multipath=0 rd.live.check", + "boot_command_suffix": "", + "build_name": "almalinux-9", + "cd_content_location": "./packer/ova/linux/{{user `distro_name`}}/http/{{user `distro_version`}}/*", + "cd_label": "cidata", + "distro_arch": "amd64", + "distro_name": "almalinux", + "distro_version": "9", + "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-9", + "firmware": "bios", + "guest_os_type": "rhel9-64", + "iso_checksum": "34c4285d524605da6dbd76b0b475338f6ea0a28bb88929bf14b04db68f1e1620", + "iso_checksum_type": "sha256", + "iso_url": "https://repo.almalinux.org/vault/9.4/isos/x86_64/AlmaLinux-9.4-x86_64-dvd.iso", + "os_display_name": "AlmaLinux 9", + "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm", + "shutdown_command": "/sbin/halt -h -p", + "vmx_version": "18", + "vsphere_guest_os_type": "rhel9_64Guest" +} diff --git a/images/capi/packer/ova/linux/almalinux/http/9/ks.cfg.tmpl b/images/capi/packer/ova/linux/almalinux/http/9/ks.cfg.tmpl new file mode 100644 index 0000000000..a1d720770a --- /dev/null +++ b/images/capi/packer/ova/linux/almalinux/http/9/ks.cfg.tmpl @@ -0,0 +1,97 @@ +# Use DVD installation media +repo --name="BaseOS" --baseurl=file:///run/install/repo/BaseOS +repo --name="AppStream" --baseurl=file:///run/install/repo/AppStream +cdrom + +# Use text install +text + +# Don't run the Setup Agent on first boot +firstboot --disabled +eula --agreed + +# Keyboard layouts +keyboard --vckeymap=us --xlayouts='us' + +# System language +lang en_US.UTF-8 + +# Network information +network --bootproto=dhcp --onboot=on --ipv6=auto --activate --hostname=capv.vm + +# Lock Root account +rootpw --lock + +# Create builder user +user --name=builder --groups=wheel --password=$SSH_PASSWORD --plaintext --shell=/bin/bash + +# System services +selinux --permissive +firewall --disabled +services --enabled="NetworkManager,sshd,chronyd" + +# System timezone +timezone UTC + +# System bootloader configuration +bootloader --location=mbr --boot-drive=sda +zerombr +clearpart --all --initlabel --drives=sda +autopart --nohome --noswap --nolvm + +skipx + +%packages --ignoremissing --excludedocs +openssh-server +open-vm-tools +sudo +sed +python3 + +# unnecessary firmware +-aic94xx-firmware +-atmel-firmware +-b43-openfwwf +-bfa-firmware +-ipw2100-firmware +-ipw2200-firmware +-ivtv-firmware +-iwl*-firmware +-libertas-usb8388-firmware +-ql*-firmware +-rt61pci-firmware +-rt73usb-firmware +-xorg-x11-drv-ati-firmware +-zd1211-firmware +-cockpit +-quota +-alsa-* +-fprintd-pam +-intltool +-microcode_ctl +%end + +%addon com_redhat_kdump --disable +%end + +reboot + +%post + +echo 'builder ALL=(ALL) NOPASSWD: ALL' >/etc/sudoers.d/builder +chmod 440 /etc/sudoers.d/builder + +# Remove the package cache +yum -y clean all + +swapoff -a +rm -f /swapfile +sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab + +systemctl enable vmtoolsd +systemctl start vmtoolsd + +# Ensure on next boot that network devices get assigned unique IDs. +sed -i '/^\(HWADDR\|UUID\)=/d' /etc/sysconfig/network-scripts/ifcfg-* + +%end From 23be41950e8dc18da1c2fdafa4c24f45a77cf603 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Tue, 10 Mar 2026 12:31:12 -0600 Subject: [PATCH 57/90] Publish Azure SIG images via GitHub Action (#1944) * Publish Azure VHDs via GitHub Action * Remove obsolete Azure ADO build pipeline * Make REPLICATED_REGIONS an overridable input variable * Extract duplicate sed commands into a composite action * Quote var references in command statements * Check for Windows kube-proxy image via HEAD request --- .../actions/configure-k8s-version/action.yaml | 25 + .github/workflows/README.md | 165 +++++ .github/workflows/build-azure-sig.yaml | 627 ++++++++++++++++++ .../packer/azure/.pipelines/build-sig.yaml | 80 --- .../packer/azure/.pipelines/clean-sig.yaml | 59 -- .../packer/azure/.pipelines/k8s-config.yaml | 10 - .../packer/azure/.pipelines/promote-sig.yaml | 153 ----- .../azure/.pipelines/sig-publishing-info.yaml | 42 -- .../capi/packer/azure/.pipelines/stages.yaml | 33 - .../packer/azure/.pipelines/test-sig.yaml | 142 ---- 10 files changed, 817 insertions(+), 519 deletions(-) create mode 100644 .github/actions/configure-k8s-version/action.yaml create mode 100644 .github/workflows/README.md create mode 100644 .github/workflows/build-azure-sig.yaml delete mode 100644 images/capi/packer/azure/.pipelines/build-sig.yaml delete mode 100644 images/capi/packer/azure/.pipelines/clean-sig.yaml delete mode 100644 images/capi/packer/azure/.pipelines/k8s-config.yaml delete mode 100644 images/capi/packer/azure/.pipelines/promote-sig.yaml delete mode 100644 images/capi/packer/azure/.pipelines/sig-publishing-info.yaml delete mode 100644 images/capi/packer/azure/.pipelines/stages.yaml delete mode 100644 images/capi/packer/azure/.pipelines/test-sig.yaml diff --git a/.github/actions/configure-k8s-version/action.yaml b/.github/actions/configure-k8s-version/action.yaml new file mode 100644 index 0000000000..168bf3a238 --- /dev/null +++ b/.github/actions/configure-k8s-version/action.yaml @@ -0,0 +1,25 @@ +name: Configure Kubernetes version +description: Update kubernetes.json with the specified Kubernetes version + +inputs: + kubernetes_version: + description: 'Kubernetes version (e.g., 1.31.1)' + required: true + +runs: + using: composite + steps: + - name: Configure Kubernetes version + working-directory: images/capi/packer/config + shell: bash + env: + KUBERNETES_VERSION: ${{ inputs.kubernetes_version }} + run: | + set -euo pipefail + KUBERNETES_RELEASE=$(echo "${KUBERNETES_VERSION}" | cut -d "." -f -2) + sed -i "s/^ \"kubernetes_series\".*/ \"kubernetes_series\": \"v${KUBERNETES_RELEASE}\",/g" kubernetes.json + sed -i "s/^ \"kubernetes_semver\".*/ \"kubernetes_semver\": \"v${KUBERNETES_VERSION}\",/g" kubernetes.json + sed -i "s/^ \"kubernetes_rpm_version\".*/ \"kubernetes_rpm_version\": \"${KUBERNETES_VERSION}\",/g" kubernetes.json + sed -i "s/^ \"kubernetes_deb_version\".*/ \"kubernetes_deb_version\": \"${KUBERNETES_VERSION}-1.1\",/g" kubernetes.json + grep -q "v${KUBERNETES_VERSION}" kubernetes.json || { echo 'ERROR: kubernetes version not set in kubernetes.json'; exit 1; } + cat kubernetes.json diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100644 index 0000000000..4e2f46e05a --- /dev/null +++ b/.github/workflows/README.md @@ -0,0 +1,165 @@ +# Azure SIG Image Builder - GitHub Actions Workflows + +This directory contains GitHub Actions workflows for building and publishing Azure Shared Image Gallery (SIG) images using the image-builder project. These workflows are the GitHub Actions equivalent of the Azure DevOps pipelines in `images/capi/packer/azure/.pipelines/`. + +## Workflow Overview + +The entire pipeline is defined in a single workflow file, `build-azure-sig.yaml`, which contains all stages as separate jobs: + +## Pipeline Stages + +``` +┌─────────┐ ┌──────────┐ ┌─────────────┐ ┌─────────┐ +│ Build │───▶│ Test │───▶│ Promote │───▶│ Clean │ +└─────────┘ └──────────┘ └─────────────┘ └─────────┘ + (optional) (requires approval) (if build succeeded) +``` + +1. **Build**: Builds the Kubernetes node image using Packer and publishes it to a staging Azure Compute Gallery +2. **Test**: (Optional) Creates a test CAPI cluster using the built image to validate it works correctly +3. **Promote**: (Requires approval) Promotes the image from staging to the community gallery for public access +4. **Clean**: Cleans up staging resources (managed image and staging gallery version) — only runs if the build succeeded + +## Usage + +### Triggering the Workflow + +1. Go to the **Actions** tab in the GitHub repository +2. Select **Build Azure SIG Image** from the workflows list +3. Click **Run workflow** +4. Fill in the required inputs: + +| Input | Required | Description | Example | +|-------|----------|-------------|---------| +| `kubernetes_version` | Yes | Kubernetes version to build | `1.31.1` | +| `os` | Yes | Operating system | `Ubuntu`, `AzureLinux`, or `Windows` | +| `os_version` | Yes | OS version | `24.04`, `22.04`, `2022-containerd` | +| `resource_group` | No | Azure resource group | `cluster-api-gallery` | +| `staging_gallery_name` | No | Staging gallery name | `staging_gallery` | +| `gallery_name` | No | Community gallery name | `community_gallery` | +| `packer_flags` | No | Additional Packer flags | `--on-error=abort` | +| `tags` | No | Custom tags for the image | `env=prod team=infra` | +| `skip_test` | No | Skip the test stage | `true` (default) | +| `skip_promote` | No | Skip the promote stage | `false` | + +### Supported OS and Version Combinations + +| OS | Versions | +|----|----------| +| Ubuntu | `22.04`, `24.04` | +| AzureLinux | `3` | +| Windows | `2022-containerd`, `2025-containerd` | + +## Setup Requirements + +### 1. Azure OIDC Authentication + +Configure Azure OIDC (OpenID Connect) authentication for passwordless authentication from GitHub Actions: + +1. Create an Azure AD application and service principal +2. Configure federated credentials for the GitHub repository +3. Grant the service principal necessary permissions on your Azure subscription + +Add the following secrets to your GitHub repository or organization: + +| Secret | Description | +|--------|-------------| +| `AZURE_CLIENT_ID` | Azure AD application (client) ID | +| `AZURE_TENANT_ID` | Azure AD tenant ID | +| `AZURE_SUBSCRIPTION_ID` | Azure subscription ID | + +For detailed instructions, see: [Azure Login with OIDC](https://github.com/azure/login#login-with-openid-connect-oidc-recommended) + +### 2. GitHub Environment for Approvals + +Create a GitHub Environment for the promotion approval gate: + +1. Go to **Settings** → **Environments** +2. Create a new environment named `image-promotion-approval` +3. Enable **Required reviewers** and add the appropriate team members +4. Optionally configure deployment branches and wait timer + +### 3. Repository/Organization Variables + +Set the following variables in your repository or organization settings for the promotion stage: + +| Variable | Description | Example | +|----------|-------------|---------| +| `EULA_LINK` | URL to the EULA for the image | `https://example.com/eula` | +| `PUBLISHER_EMAIL` | Email for the image publisher | `team@example.com` | +| `PUBLISHER_URI` | URI for the image publisher | `https://example.com` | +| `SIG_PUBLISHER` | Publisher name for image definitions | `MyOrganization` | + +### 4. Azure Resources + +Ensure the following Azure resources are set up: + +- **Resource Group**: A resource group for the compute galleries (default: `cluster-api-gallery`) +- **Staging Gallery**: An Azure Compute Gallery for initial image publishing +- **Community Gallery**: An Azure Compute Gallery with community permissions for public access + +The workflows will create these resources if they don't exist, provided the service principal has sufficient permissions. + +### Required Azure RBAC Permissions + +The service principal needs the following permissions: + +- `Contributor` on the resource group (or subscription) +- `User Access Administrator` if creating new resource groups +- For community galleries: permissions to create and manage Shared Image Galleries + +## Artifacts + +The workflows produce the following artifacts: + +| Artifact | Description | Retention | +|----------|-------------|-----------| +| `publishing-info` | JSON file with image metadata from the build stage | 7 days | +| `sig-publishing` | JSON file with community gallery publishing details | 30 days | + +## Differences from Azure DevOps Pipelines + +| Feature | Azure DevOps | GitHub Actions | +|---------|--------------|----------------| +| Authentication | Service Connection | Azure OIDC via `azure/login@v2` | +| Approvals | ADO Environments | GitHub Environments | +| Artifacts | Pipeline Artifacts | GitHub Actions Artifacts | +| Variables | Pipeline Variables | Workflow Inputs + Repository Variables | +| Templates | YAML Templates | Jobs within a single workflow | + +## Troubleshooting + +### Common Issues + +1. **Authentication failures** + - Verify OIDC credentials are correctly configured + - Check that the federated credential matches the repository and branch + +2. **Permission denied errors** + - Ensure the service principal has sufficient Azure RBAC permissions + - Verify the subscription ID is correct + +3. **Packer build failures** + - Check the Packer output in the build logs + - Verify the OS/version combination is supported + - Ensure the Kubernetes version exists + +4. **Test stage failures** + - The test stage requires the Azure CAPI CLI extension + - Ensure sufficient quota for VMs in the target region + +### Debug Mode + +To enable debug output, add `--on-error=abort` to the `packer_flags` input to preserve the Packer VM on failure for investigation. + +> **Warning:** Do **not** use `--on-error=ask` — it will cause the workflow to hang indefinitely waiting for interactive input, consuming the entire job timeout. + +For more verbose logging, you can enable GitHub Actions debug logging by setting the `ACTIONS_STEP_DEBUG` secret to `true`. + +## Related Documentation + +- [Image Builder Documentation](../../docs/book/src/capi/capi.md) +- [Azure Provider Documentation](../../images/capi/packer/azure/README.md) +- [Azure DevOps Pipelines](../../images/capi/packer/azure/.pipelines/) +- [GitHub Actions Documentation](https://docs.github.com/en/actions) +- [Azure Login Action](https://github.com/azure/login) diff --git a/.github/workflows/build-azure-sig.yaml b/.github/workflows/build-azure-sig.yaml new file mode 100644 index 0000000000..dd2701dffc --- /dev/null +++ b/.github/workflows/build-azure-sig.yaml @@ -0,0 +1,627 @@ +# GitHub Actions workflow for building and publishing Azure Shared Image Gallery (SIG) images +# +# Required secrets: +# - AZURE_CLIENT_ID - Azure service principal client ID (for OIDC authentication) +# - AZURE_TENANT_ID - Azure tenant ID +# - AZURE_SUBSCRIPTION_ID - Azure subscription ID +# +# Required environment variables (set in GitHub repository or organization settings): +# - EULA_LINK - the URL to the EULA for the image (for promote stage) +# - PUBLISHER_EMAIL - the email for the image publisher (for promote stage) +# - PUBLISHER_URI - the URI for the image publisher (for promote stage) +# - SIG_PUBLISHER - the publisher for the image definition (for promote stage) +# +# Required inputs: +# - kubernetes_version - version of Kubernetes to build the image with, e.g. `1.31.1` +# - os - operating system distro, such as 'Ubuntu', 'AzureLinux', or 'Windows' +# - os_version - version of distro, such as `24.04` or `2022-containerd` +# +# Optional inputs: +# - resource_group - name of the Azure resource group to use for the compute galleries +# - staging_gallery_name - name of the Azure compute gallery for initial image publishing +# - gallery_name - name of the Azure community gallery for final image publishing +# - packer_flags - additional flags to pass to packer +# - tags - tags to apply to the image +# - replicated_regions - space-separated list of Azure regions to replicate the image to +# - skip_test - skip the test stage +# - skip_promote - skip the promote stage + +name: Build Azure SIG Image + +on: + workflow_dispatch: + inputs: + kubernetes_version: + description: 'Kubernetes version (e.g., 1.31.1)' + required: true + type: string + os: + description: 'Operating system (Ubuntu, AzureLinux, Windows)' + required: true + type: choice + options: + - Ubuntu + - AzureLinux + - Windows + os_version: + description: 'OS version (e.g., 24.04, 2022-containerd)' + required: true + type: string + resource_group: + description: 'Azure resource group name' + required: false + type: string + default: 'cluster-api-gallery' + staging_gallery_name: + description: 'Staging gallery name' + required: false + type: string + default: 'staging_gallery' + gallery_name: + description: 'Community gallery name' + required: false + type: string + default: 'community_gallery' + packer_flags: + description: 'Additional Packer flags' + required: false + type: string + default: '' + tags: + description: 'Tags to apply to the image' + required: false + type: string + default: '' + replicated_regions: + description: 'Space-separated Azure regions to replicate the image to (image build region is always included)' + required: false + type: string + default: 'australiaeast canadacentral eastus eastus2 francecentral germanywestcentral northeurope switzerlandnorth uksouth westeurope' + skip_test: + description: 'Skip the test stage' + required: false + type: boolean + default: true + skip_promote: + description: 'Skip the promote stage (requires manual approval)' + required: false + type: boolean + default: false + +permissions: + id-token: write + contents: read + +jobs: + # --------------------------------------------------------------------------- + # Build + # --------------------------------------------------------------------------- + build: + name: Build SIG Image + runs-on: ubuntu-latest + timeout-minutes: 120 + env: + KUBERNETES_VERSION: ${{ inputs.kubernetes_version }} + OS: ${{ inputs.os }} + OS_VERSION: ${{ inputs.os_version }} + RESOURCE_GROUP: ${{ inputs.resource_group }} + STAGING_GALLERY_NAME: ${{ inputs.staging_gallery_name }} + PACKER_FLAGS: ${{ inputs.packer_flags }} + TAGS_INPUT: ${{ inputs.tags }} + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Configure Kubernetes version + uses: ./.github/actions/configure-k8s-version + with: + kubernetes_version: ${{ inputs.kubernetes_version }} + + - name: Check for Windows kube-proxy image + if: inputs.os == 'Windows' + run: | + set -euo pipefail + IMAGE="sigwindowstools/kube-proxy" + TAG="v${KUBERNETES_VERSION/+/_}-calico-hostprocess" + echo "Checking for Windows kube-proxy image ${IMAGE}:${TAG}" + + # Use the Docker Hub Registry v2 API to verify the tag exists. + TOKEN=$(curl -fsSL "https://auth.docker.io/token?service=registry.docker.io&scope=repository:${IMAGE}:pull" | jq -r .token) + HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" \ + -H "Authorization: Bearer ${TOKEN}" \ + -H "Accept: application/vnd.docker.distribution.manifest.v2+json" \ + -H "Accept: application/vnd.docker.distribution.manifest.list.v2+json" \ + "https://registry-1.docker.io/v2/${IMAGE}/manifests/${TAG}") + + if [[ "${HTTP_STATUS}" != "200" ]]; then + echo "kube-proxy image ${IMAGE}:${TAG} not found (HTTP ${HTTP_STATUS})" + exit 1 + fi + echo "kube-proxy image ${IMAGE}:${TAG} exists" + + - name: Azure Login + uses: azure/login@v2 + with: + client-id: ${{ secrets.AZURE_CLIENT_ID }} + tenant-id: ${{ secrets.AZURE_TENANT_ID }} + subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install dependencies + working-directory: images/capi + run: | + pip install ansible ansible-lint + make deps-azure + + - name: Build SIG Image + id: build + working-directory: images/capi + run: | + set -euo pipefail + + os=$(echo "${OS}" | tr '[:upper:]' '[:lower:]') + version=$(echo "${OS_VERSION}" | tr '[:upper:]' '[:lower:]' | tr -d .) + + export RESOURCE_GROUP="${RESOURCE_GROUP:-cluster-api-gallery}" + export RESOURCE_GROUP_NAME="${RESOURCE_GROUP}" + + # timestamp is in RFC-3339 format to match kubetest + export TIMESTAMP="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" + export JOB_NAME="${JOB_NAME:-"image-builder-sig-${os}-${version}"}" + + if [[ -n "${TAGS_INPUT}" ]]; then + export TAGS="${TAGS_INPUT}" + else + export TAGS="creationTimestamp=${TIMESTAMP} jobName=${JOB_NAME} DO-NOT-DELETE=UpstreamInfra" + fi + printf '%s' "${TAGS}" | tee packer/azure/tags.out + + export GALLERY_NAME="${STAGING_GALLERY_NAME:-staging_gallery}" + DISTRO="${os}-${version}" + echo "DISTRO=${DISTRO}" >> $GITHUB_ENV + + export PACKER_FLAGS="${PACKER_FLAGS} --var sig_image_version=${KUBERNETES_VERSION}" + export PATH=$PATH:$HOME/.local/bin + export USE_AZURE_CLI_AUTH="True" + + make build-azure-sig-${os}-${version} | tee packer/azure/packer.out + + - name: Generate SIG publishing info + id: publishing_info + working-directory: images/capi + run: | + set -euo pipefail + + PACKER_OUTPUT=packer/azure/packer.out + OS_TYPE=$(sed -n 's/^OSType: \(.*\)/\1/p' $PACKER_OUTPUT | tail -1) + MANAGED_IMAGE_RESOURCE_GROUP_NAME=$(sed -n "s/^ManagedImageResourceGroupName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + MANAGED_IMAGE_NAME=$(sed -n "s/^ManagedImageName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + MANAGED_IMAGE_ID=$(sed -n "s/^ManagedImageId: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + MANAGED_IMAGE_LOCATION=$(sed -n "s/^ManagedImageLocation: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID=$(sed -n "s/^ManagedImageSharedImageGalleryId: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + SHARED_IMAGE_GALLERY_RESOURCE_GROUP=$(sed -n "s/^SharedImageGalleryResourceGroup: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + SHARED_IMAGE_GALLERY_NAME=$(sed -n "s/^SharedImageGalleryName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + SHARED_IMAGE_GALLERY_IMAGE_NAME=$(sed -n "s/^SharedImageGalleryImageName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(sed -n "s/^SharedImageGalleryImageVersion: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) + TAGS=$(cat packer/azure/tags.out) + + if [[ ${SHARED_IMAGE_GALLERY_IMAGE_NAME} == *gen2 ]]; then + HYPERV_GEN="V2" + else + HYPERV_GEN="V1" + fi + + # Create JSON and output it + PUBLISHING_INFO=$(jq -n \ + --arg distro "${DISTRO}" \ + --arg hyperv_gen "${HYPERV_GEN}" \ + --arg os_type "${OS_TYPE}" \ + --arg managed_image_resource_group_name "${MANAGED_IMAGE_RESOURCE_GROUP_NAME}" \ + --arg managed_image_name "${MANAGED_IMAGE_NAME}" \ + --arg managed_image_id "${MANAGED_IMAGE_ID}" \ + --arg managed_image_location "${MANAGED_IMAGE_LOCATION}" \ + --arg managed_image_shared_image_gallery_id "${MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID}" \ + --arg shared_image_gallery_resource_group "${SHARED_IMAGE_GALLERY_RESOURCE_GROUP}" \ + --arg shared_image_gallery_name "${SHARED_IMAGE_GALLERY_NAME}" \ + --arg shared_image_gallery_image_name "${SHARED_IMAGE_GALLERY_IMAGE_NAME}" \ + --arg shared_image_gallery_image_version "${SHARED_IMAGE_GALLERY_IMAGE_VERSION}" \ + --arg tags "${TAGS}" \ + '$ARGS.named') + + echo "Publishing info: ${PUBLISHING_INFO}" + + # Save to file for artifact + echo "${PUBLISHING_INFO}" > packer/azure/sig-publishing-info.json + + - name: Upload publishing info artifact + uses: actions/upload-artifact@v4 + with: + name: publishing-info + path: images/capi/packer/azure/sig-publishing-info.json + retention-days: 7 + + # --------------------------------------------------------------------------- + # Test + # --------------------------------------------------------------------------- + test: + name: Test SIG Image + needs: build + if: ${{ !inputs.skip_test }} + runs-on: ubuntu-latest + timeout-minutes: 120 + env: + KUBERNETES_VERSION: ${{ inputs.kubernetes_version }} + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Download publishing info artifact + uses: actions/download-artifact@v4 + with: + name: publishing-info + path: images/capi/packer/azure/sig/ + + - name: Import variables from build + id: vars + run: | + set -euo pipefail + + PUBLISHING_INFO=$(jq -c . images/capi/packer/azure/sig/sig-publishing-info.json) + echo "PUBLISHING_INFO=${PUBLISHING_INFO}" + + echo "OS_TYPE=$(echo "$PUBLISHING_INFO" | jq -r .os_type)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_RESOURCE_GROUP_NAME=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_resource_group_name)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_NAME=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_name)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_ID=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_id)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_LOCATION=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_location)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_shared_image_gallery_id)" >> $GITHUB_OUTPUT + echo "SHARED_IMAGE_GALLERY_RESOURCE_GROUP=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_resource_group)" >> $GITHUB_OUTPUT + echo "SHARED_IMAGE_GALLERY_NAME=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_name)" >> $GITHUB_OUTPUT + echo "SHARED_IMAGE_GALLERY_IMAGE_NAME=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_image_name)" >> $GITHUB_OUTPUT + echo "SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_image_version)" >> $GITHUB_OUTPUT + echo "TAGS=$(echo "$PUBLISHING_INFO" | jq -r .tags)" >> $GITHUB_OUTPUT + + - name: Configure Kubernetes version + uses: ./.github/actions/configure-k8s-version + with: + kubernetes_version: ${{ inputs.kubernetes_version }} + + - name: Setup kustomize + working-directory: images/capi + run: | + set -euo pipefail + export PATH=${PATH}:.local/bin + ./packer/azure/scripts/ensure-kustomize.sh + + - name: Generate cluster template + working-directory: images/capi + env: + OS_TYPE: ${{ steps.vars.outputs.OS_TYPE }} + run: | + set -euo pipefail + export PATH=${PATH}:.local/bin + + if [ "$OS_TYPE" == "Windows" ]; then + kustomize build --load-restrictor LoadRestrictionsNone packer/azure/scripts/test-templates/windows/ > packer/azure/scripts/test-templates/cluster-template.yaml + else + kustomize build --load-restrictor LoadRestrictionsNone packer/azure/scripts/test-templates/linux/ > packer/azure/scripts/test-templates/cluster-template.yaml + fi + + - name: Azure Login + uses: azure/login@v2 + with: + client-id: ${{ secrets.AZURE_CLIENT_ID }} + tenant-id: ${{ secrets.AZURE_TENANT_ID }} + subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + + - name: Install Azure CAPI extension + run: | + set -euo pipefail + + # Install the Azure CLI Cluster API extension from the official release + az extension add --name capi --yes || az extension add --source "https://github.com/Azure/azure-capi-cli-extension/releases/latest/download/capi-0.0.vnext-py2.py3-none-any.whl" --yes + + # Install required binaries + mkdir -p ~/test-binaries + export PATH=${PATH}:~/test-binaries + az capi install -a -ip ~/test-binaries + + - name: Create test cluster + working-directory: images/capi + env: + OS_TYPE: ${{ steps.vars.outputs.OS_TYPE }} + MANAGED_IMAGE_LOCATION: ${{ steps.vars.outputs.MANAGED_IMAGE_LOCATION }} + TAGS: ${{ steps.vars.outputs.TAGS }} + run: | + set -euo pipefail + TEST_TEMPLATE="packer/azure/scripts/test-templates/cluster-template.yaml" + + export PATH=${PATH}:~/test-binaries + + params=() + if [ "$OS_TYPE" == "Windows" ]; then + params+=(--windows) + fi + + # Create a dedicated test resource group (not the build resource group) + TEST_RESOURCE_GROUP="image-builder-test-${GITHUB_RUN_ID}" + echo "TEST_RESOURCE_GROUP=${TEST_RESOURCE_GROUP}" >> $GITHUB_ENV + + AZURE_LOCATION="${MANAGED_IMAGE_LOCATION}" + az group create -n "${TEST_RESOURCE_GROUP}" -l "${AZURE_LOCATION}" --tags ${TAGS:-} + + # Create a cluster + az capi create \ + --yes \ + --debug \ + --name testvm \ + --kubernetes-version="${KUBERNETES_VERSION}" \ + --location="${AZURE_LOCATION}" \ + --resource-group="${TEST_RESOURCE_GROUP}" \ + --management-cluster-resource-group-name="${TEST_RESOURCE_GROUP}" \ + --control-plane-machine-count=1 \ + --node-machine-count=1 \ + --template="${TEST_TEMPLATE}" \ + --tags="${TAGS}" \ + --wait-for-nodes=2 \ + "${params[@]}" + + # Test if the VM's provisionState is "Succeeded" otherwise fail + timeout 60s bash -c 'set -o pipefail; while ! az vm list -g "$TEST_RESOURCE_GROUP" | jq -e "length > 0 and all(.provisioningState == \"Succeeded\")"; do sleep 1; done' + + - name: Clean up test resource group + if: always() + run: | + set -euo pipefail + + TEST_RESOURCE_GROUP="${TEST_RESOURCE_GROUP:-}" + if [[ -n "${TEST_RESOURCE_GROUP}" ]]; then + echo "Cleaning up test resource group: ${TEST_RESOURCE_GROUP}" + az group delete -n "${TEST_RESOURCE_GROUP}" --yes --no-wait || true + else + echo "No test resource group to clean up" + fi + + # --------------------------------------------------------------------------- + # Promote + # --------------------------------------------------------------------------- + approve_promotion: + name: "Approve Promotion: ${{ inputs.os }} ${{ inputs.os_version }} (k8s ${{ inputs.kubernetes_version }})" + needs: [build, test] + if: ${{ always() && !inputs.skip_promote && needs.build.result == 'success' && (needs.test.result == 'success' || needs.test.result == 'skipped') }} + runs-on: ubuntu-latest + environment: image-promotion-approval + steps: + - name: Promotion Approved + run: echo "Image promotion approved" + + promote: + name: Promote to Community Gallery + needs: [build, approve_promotion] + if: ${{ always() && needs.build.result == 'success' && needs.approve_promotion.result == 'success' }} + runs-on: ubuntu-latest + timeout-minutes: 120 + env: + RESOURCE_GROUP: ${{ inputs.resource_group }} + GALLERY_NAME: ${{ inputs.gallery_name }} + REPLICATED_REGIONS_INPUT: ${{ inputs.replicated_regions }} + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Download publishing info artifact + uses: actions/download-artifact@v4 + with: + name: publishing-info + path: images/capi/packer/azure/sig/ + + - name: Import variables from build + id: vars + run: | + set -euo pipefail + + PUBLISHING_INFO=$(jq -c . images/capi/packer/azure/sig/sig-publishing-info.json) + echo "PUBLISHING_INFO=${PUBLISHING_INFO}" + + echo "DISTRO=$(echo "$PUBLISHING_INFO" | jq -r .distro)" >> $GITHUB_OUTPUT + echo "HYPERV_GEN=$(echo "$PUBLISHING_INFO" | jq -r .hyperv_gen)" >> $GITHUB_OUTPUT + echo "OS_TYPE=$(echo "$PUBLISHING_INFO" | jq -r .os_type)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_RESOURCE_GROUP_NAME=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_resource_group_name)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_NAME=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_name)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_ID=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_id)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_LOCATION=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_location)" >> $GITHUB_OUTPUT + echo "MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_shared_image_gallery_id)" >> $GITHUB_OUTPUT + echo "SHARED_IMAGE_GALLERY_RESOURCE_GROUP=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_resource_group)" >> $GITHUB_OUTPUT + echo "SHARED_IMAGE_GALLERY_NAME=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_name)" >> $GITHUB_OUTPUT + echo "SHARED_IMAGE_GALLERY_IMAGE_NAME=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_image_name)" >> $GITHUB_OUTPUT + echo "SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_image_version)" >> $GITHUB_OUTPUT + echo "TAGS=$(echo "$PUBLISHING_INFO" | jq -r .tags)" >> $GITHUB_OUTPUT + + - name: Azure Login + uses: azure/login@v2 + with: + client-id: ${{ secrets.AZURE_CLIENT_ID }} + tenant-id: ${{ secrets.AZURE_TENANT_ID }} + subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + + - name: Publish to community gallery + working-directory: images/capi + env: + DISTRO: ${{ steps.vars.outputs.DISTRO }} + HYPERV_GEN: ${{ steps.vars.outputs.HYPERV_GEN }} + OS_TYPE: ${{ steps.vars.outputs.OS_TYPE }} + MANAGED_IMAGE_ID: ${{ steps.vars.outputs.MANAGED_IMAGE_ID }} + MANAGED_IMAGE_LOCATION: ${{ steps.vars.outputs.MANAGED_IMAGE_LOCATION }} + SHARED_IMAGE_GALLERY_IMAGE_NAME: ${{ steps.vars.outputs.SHARED_IMAGE_GALLERY_IMAGE_NAME }} + SHARED_IMAGE_GALLERY_IMAGE_VERSION: ${{ steps.vars.outputs.SHARED_IMAGE_GALLERY_IMAGE_VERSION }} + TAGS: ${{ steps.vars.outputs.TAGS }} + EULA_LINK: ${{ vars.EULA_LINK }} + PUBLISHER_EMAIL: ${{ vars.PUBLISHER_EMAIL }} + PUBLISHER_URI: ${{ vars.PUBLISHER_URI }} + SIG_PUBLISHER: ${{ vars.SIG_PUBLISHER }} + run: | + set -euo pipefail + + EOL_DATE=$(date --date='+6 months' +"%Y-%m-%dT00:00:00+00:00") + GALLERY_DESCRIPTION="Shared image gallery for Cluster API Provider Azure" + GALLERY_NAME="${GALLERY_NAME}" + PUBLIC_NAME_PREFIX="ClusterAPI" + RESOURCE_GROUP="${RESOURCE_GROUP}" + SIG_OFFER="reference-images" + + # Set replicated regions + REPLICATED_REGIONS="${MANAGED_IMAGE_LOCATION} ${REPLICATED_REGIONS_INPUT}" + + # Create the resource group if needed + if ! az group show -n "${RESOURCE_GROUP}" -o none 2>/dev/null; then + az group create -n "${RESOURCE_GROUP}" -l "${MANAGED_IMAGE_LOCATION}" --tags ${TAGS:-} + fi + + # Create the public community shared image gallery if it doesn't exist + if ! az sig show --gallery-name "${GALLERY_NAME}" --resource-group "${RESOURCE_GROUP}" -o none 2>/dev/null; then + sig_create_args=( + --gallery-name "${GALLERY_NAME}" + --resource-group "${RESOURCE_GROUP}" + --description "${GALLERY_DESCRIPTION}" + --eula "${EULA_LINK}" + --location "${MANAGED_IMAGE_LOCATION}" + --public-name-prefix "${PUBLIC_NAME_PREFIX}" + --publisher-email "${PUBLISHER_EMAIL}" + --publisher-uri "${PUBLISHER_URI}" + --permissions Community + ) + if [[ -n "${TAGS:-}" ]]; then + sig_create_args+=(--tags ${TAGS}) + fi + az sig create "${sig_create_args[@]}" + fi + + # Translate prohibited words to alternatives in the image definition name + GALLERY_IMAGE_DEFINITION=${SHARED_IMAGE_GALLERY_IMAGE_NAME//ubuntu/ubun2} + GALLERY_IMAGE_DEFINITION=${GALLERY_IMAGE_DEFINITION//windows/win} + + # Create image definition if it doesn't exist + if ! az sig image-definition show --gallery-name "${GALLERY_NAME}" --gallery-image-definition "${GALLERY_IMAGE_DEFINITION}" --resource-group "${RESOURCE_GROUP}" -o none 2>/dev/null; then + az sig image-definition create \ + --resource-group "${RESOURCE_GROUP}" \ + --gallery-name "${GALLERY_NAME}" \ + --gallery-image-definition "${GALLERY_IMAGE_DEFINITION}" \ + --publisher "${SIG_PUBLISHER}" \ + --offer "${SIG_OFFER}" \ + --sku "${DISTRO}" \ + --hyper-v-generation "${HYPERV_GEN}" \ + --os-type "${OS_TYPE}" \ + | tee -a sig-publishing.json + fi + + # Delete the image version if it exists (always create a new image, overwriting if necessary) + if az sig image-version show --gallery-name "${GALLERY_NAME}" --gallery-image-definition "${GALLERY_IMAGE_DEFINITION}" --gallery-image-version "${SHARED_IMAGE_GALLERY_IMAGE_VERSION}" --resource-group "${RESOURCE_GROUP}" -o none 2>/dev/null; then + az sig image-version delete \ + --resource-group "${RESOURCE_GROUP}" \ + --gallery-name "${GALLERY_NAME}" \ + --gallery-image-definition "${GALLERY_IMAGE_DEFINITION}" \ + --gallery-image-version "${SHARED_IMAGE_GALLERY_IMAGE_VERSION}" + fi + + # Copy the tags from the managed image to the image version + IMAGE_TAGS=$(az tag list --resource-id "${MANAGED_IMAGE_ID}" | jq -r '.properties.tags | to_entries | map("\(.key)=\(.value)") | join(" ")') + + # Create the image version + az sig image-version create \ + --resource-group "${RESOURCE_GROUP}" \ + --gallery-name "${GALLERY_NAME}" \ + --gallery-image-definition "${GALLERY_IMAGE_DEFINITION}" \ + --gallery-image-version "${SHARED_IMAGE_GALLERY_IMAGE_VERSION}" \ + --target-regions ${REPLICATED_REGIONS} \ + --managed-image "${MANAGED_IMAGE_ID}" \ + --end-of-life-date "${EOL_DATE}" \ + --tags ${IMAGE_TAGS:-} \ + | tee sig-publishing.json + + - name: Upload publishing artifact + uses: actions/upload-artifact@v4 + with: + name: sig-publishing + path: images/capi/sig-publishing.json + retention-days: 30 + + # --------------------------------------------------------------------------- + # Clean + # --------------------------------------------------------------------------- + clean: + name: Clean Staging Resources + needs: [build, test, approve_promotion, promote] + if: ${{ always() && needs.build.result == 'success' }} + runs-on: ubuntu-latest + timeout-minutes: 30 + env: + RESOURCE_GROUP: ${{ inputs.resource_group }} + STAGING_GALLERY_NAME: ${{ inputs.staging_gallery_name }} + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Download publishing info artifact + uses: actions/download-artifact@v4 + with: + name: publishing-info + path: images/capi/packer/azure/sig/ + + - name: Import variables from build + id: vars + run: | + set -euo pipefail + + PUBLISHING_INFO=$(jq -c . images/capi/packer/azure/sig/sig-publishing-info.json) + echo "PUBLISHING_INFO=${PUBLISHING_INFO}" + + echo "MANAGED_IMAGE_ID=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_id)" >> $GITHUB_OUTPUT + echo "SHARED_IMAGE_GALLERY_IMAGE_NAME=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_image_name)" >> $GITHUB_OUTPUT + echo "SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_image_version)" >> $GITHUB_OUTPUT + + - name: Azure Login + uses: azure/login@v2 + with: + client-id: ${{ secrets.AZURE_CLIENT_ID }} + tenant-id: ${{ secrets.AZURE_TENANT_ID }} + subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + + - name: Clean up staging resources + working-directory: images/capi + env: + MANAGED_IMAGE_ID: ${{ steps.vars.outputs.MANAGED_IMAGE_ID }} + SHARED_IMAGE_GALLERY_IMAGE_NAME: ${{ steps.vars.outputs.SHARED_IMAGE_GALLERY_IMAGE_NAME }} + SHARED_IMAGE_GALLERY_IMAGE_VERSION: ${{ steps.vars.outputs.SHARED_IMAGE_GALLERY_IMAGE_VERSION }} + run: | + set -euo pipefail + + GALLERY_NAME="${STAGING_GALLERY_NAME:-staging_gallery}" + RESOURCE_GROUP="${RESOURCE_GROUP:-cluster-api-gallery}" + + # Delete the source managed image if it exists + if az image show --ids "${MANAGED_IMAGE_ID}" -o none 2>/dev/null; then + echo "Deleting managed image: ${MANAGED_IMAGE_ID}" + az image delete --ids "${MANAGED_IMAGE_ID}" + else + echo "Managed image not found, skipping deletion" + fi + + # Delete the staging image version if it exists + if az sig image-version show --resource-group "${RESOURCE_GROUP}" --gallery-name "${GALLERY_NAME}" --gallery-image-definition "${SHARED_IMAGE_GALLERY_IMAGE_NAME}" --gallery-image-version "${SHARED_IMAGE_GALLERY_IMAGE_VERSION}" -o none 2>/dev/null; then + echo "Deleting staging image version: ${SHARED_IMAGE_GALLERY_IMAGE_VERSION}" + az sig image-version delete \ + --resource-group "${RESOURCE_GROUP}" \ + --gallery-name "${GALLERY_NAME}" \ + --gallery-image-definition "${SHARED_IMAGE_GALLERY_IMAGE_NAME}" \ + --gallery-image-version "${SHARED_IMAGE_GALLERY_IMAGE_VERSION}" + else + echo "Staging image version not found, skipping deletion" + fi diff --git a/images/capi/packer/azure/.pipelines/build-sig.yaml b/images/capi/packer/azure/.pipelines/build-sig.yaml deleted file mode 100644 index eb4f744e7c..0000000000 --- a/images/capi/packer/azure/.pipelines/build-sig.yaml +++ /dev/null @@ -1,80 +0,0 @@ -# Required pipeline variables: -# - BUILD_POOL - Azure DevOps build pool to use -# - KUBERNETES_VERSION - version of Kubernetes to build the image with, e.g. `1.31.1` -# - OS - operating system distro, such as 'Ubuntu', 'AzureLinux', or `Windows` -# - OS_VERSION - version of distro, such as `24.04` or `2022-containerd` -# - SERVICE_CONNECTION - Azure DevOps service connection to use for the Azure CLI -# Optional pipeline variables: -# - JOB_NAME - name of the job, defaults to `image-builder-sig-${OS}-${OS_VERSION}` -# - PACKER_FLAGS - additional flags to pass to packer -# - RESOURCE_GROUP - name of the Azure resource group to use for the compute galleries -# - STAGING_GALLERY_NAME - name of the Azure compute gallery for initial image publishing -# - TAGS - tags to apply to the image - -jobs: -- job: build_sig - timeoutInMinutes: 120 - strategy: - maxParallel: 0 - pool: - name: $(BUILD_POOL) - steps: - - template: k8s-config.yaml - - script: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - kube_proxy_url="sigwindowstools/kube-proxy:v${KUBERNETES_VERSION/+/_}-calico-hostprocess" - echo "Checking for Windows kube-proxy image $kube_proxy_url" - if ! stderr="$(docker pull $kube_proxy_url 2>&1 > /dev/null)"; then - # It's a Windows image, so expect an error after pulling it on Linux - if [[ $stderr != *"cannot be used on this platform"* ]]; then - echo "Failed to pull kube-proxy image: $stderr" - exit 1 - fi - fi - displayName: Check for Windows kube-proxy image - condition: and(eq(variables['PREFLIGHT_CHECKS'], 'true'), eq(variables['OS'], 'Windows')) - - task: AzureCLI@2 - displayName: Build SIG Image - inputs: - azureSubscription: '$(SERVICE_CONNECTION)' - scriptLocation: inlineScript - scriptType: bash - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - inlineScript: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - # Generate locales properly on Azure Linux or ansible will complain - sudo tdnf -y install glibc-i18n - sudo locale-gen.sh - export LC_ALL=en_US.UTF-8 - - os=$(echo "${OS}" | tr '[:upper:]' '[:lower:]') - version=$(echo "${OS_VERSION}" | tr '[:upper:]' '[:lower:]' | tr -d .) - export RESOURCE_GROUP="${RESOURCE_GROUP:-cluster-api-gallery}" - export RESOURCE_GROUP_NAME="${RESOURCE_GROUP}" - - # timestamp is in RFC-3339 format to match kubetest - export TIMESTAMP="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" - export JOB_NAME="${JOB_NAME:-"image-builder-sig-${os}-${version}"}" - export TAGS="${TAGS:-creationTimestamp=${TIMESTAMP} jobName=${JOB_NAME} DO-NOT-DELETE=UpstreamInfra}" - printf "${TAGS}" | tee packer/azure/tags.out - export GALLERY_NAME="${STAGING_GALLERY_NAME:-staging_gallery}" - DISTRO="${os}-${version}" - echo "##vso[task.setvariable variable=DISTRO]$DISTRO" - - # Add build tags in ADO - echo "##vso[build.addbuildtag]$KUBERNETES_VERSION" - echo "##vso[build.addbuildtag]$DISTRO" - - export PACKER_FLAGS="${PACKER_FLAGS} --var sig_image_version=${KUBERNETES_VERSION}" - export PATH=$PATH:$HOME/.local/bin - export USE_AZURE_CLI_AUTH="True" - make build-azure-sig-$os-$version | tee packer/azure/packer.out - - template: sig-publishing-info.yaml - - task: PublishPipelineArtifact@1 - inputs: - artifact: 'publishing-info' - path: '$(system.defaultWorkingDirectory)/images/capi/packer/azure/sig-publishing-info.json' diff --git a/images/capi/packer/azure/.pipelines/clean-sig.yaml b/images/capi/packer/azure/.pipelines/clean-sig.yaml deleted file mode 100644 index 04e58d384a..0000000000 --- a/images/capi/packer/azure/.pipelines/clean-sig.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# Required pipeline variables: -# - BUILD_POOL - Azure DevOps build pool to use -# - SERVICE_CONNECTION - Azure DevOps service connection to use for the Azure CLI -# Optional pipeline variables: -# - RESOURCE_GROUP - name of the Azure resource group to use for the compute galleries, defaults to "cluster-api-gallery" -# - STAGING_GALLERY_NAME - name of the Azure compute gallery for initial image publishing, defaults to "staging_gallery" - -jobs: -- job: clean_sig - timeoutInMinutes: 120 - strategy: - maxParallel: 0 - pool: - name: $(BUILD_POOL) - steps: - - task: DownloadPipelineArtifact@2 - inputs: - source: current - artifact: publishing-info - path: $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/ - - script: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - MANAGED_IMAGE_ID=$(jq -r .managed_image_id $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_IMAGE_NAME=$(jq -r .shared_image_gallery_image_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(jq -r .shared_image_gallery_image_version $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - - echo "##vso[task.setvariable variable=MANAGED_IMAGE_ID]$MANAGED_IMAGE_ID" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_NAME]$SHARED_IMAGE_GALLERY_IMAGE_NAME" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_VERSION]$SHARED_IMAGE_GALLERY_IMAGE_VERSION" - displayName: Import variables from build SIG job - - task: AzureCLI@2 - displayName: Clean up staging resources - inputs: - azureSubscription: '$(SERVICE_CONNECTION)' - scriptLocation: inlineScript - scriptType: bash - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - inlineScript: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - GALLERY_NAME="${STAGING_GALLERY_NAME:-staging_gallery}" - RESOURCE_GROUP="${RESOURCE_GROUP:-cluster-api-gallery}" - - # Delete the source managed image if it exists - if az image show --ids ${MANAGED_IMAGE_ID} -o none 2>/dev/null; then - az image delete --ids ${MANAGED_IMAGE_ID} - fi - - # Delete the staging image version if it exists - if az sig image-version show --resource-group ${RESOURCE_GROUP} --gallery-name ${GALLERY_NAME} --gallery-image-definition ${SHARED_IMAGE_GALLERY_IMAGE_NAME} --gallery-image-version ${SHARED_IMAGE_GALLERY_IMAGE_VERSION} -o none 2>/dev/null; then - az sig image-version delete \ - --resource-group ${RESOURCE_GROUP} \ - --gallery-name ${GALLERY_NAME} \ - --gallery-image-definition ${SHARED_IMAGE_GALLERY_IMAGE_NAME} \ - --gallery-image-version ${SHARED_IMAGE_GALLERY_IMAGE_VERSION} - fi diff --git a/images/capi/packer/azure/.pipelines/k8s-config.yaml b/images/capi/packer/azure/.pipelines/k8s-config.yaml deleted file mode 100644 index b9ded3ffa6..0000000000 --- a/images/capi/packer/azure/.pipelines/k8s-config.yaml +++ /dev/null @@ -1,10 +0,0 @@ -steps: -- script: | - KUBERNETES_RELEASE=$(echo ${KUBERNETES_VERSION} | cut -d "." -f -2) - sed -i "s/^ \"kubernetes_series\".*/ \"kubernetes_series\": \"v${KUBERNETES_RELEASE}\",/g" kubernetes.json - sed -i "s/^ \"kubernetes_semver\".*/ \"kubernetes_semver\": \"v${KUBERNETES_VERSION}\",/g" kubernetes.json - sed -i "s/^ \"kubernetes_rpm_version\".*/ \"kubernetes_rpm_version\": \"${KUBERNETES_VERSION}\",/g" kubernetes.json - sed -i "s/^ \"kubernetes_deb_version\".*/ \"kubernetes_deb_version\": \"${KUBERNETES_VERSION}-1.1\",/g" kubernetes.json - cat kubernetes.json - displayName: Write configuration files - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi/packer/config' diff --git a/images/capi/packer/azure/.pipelines/promote-sig.yaml b/images/capi/packer/azure/.pipelines/promote-sig.yaml deleted file mode 100644 index 09acea5eda..0000000000 --- a/images/capi/packer/azure/.pipelines/promote-sig.yaml +++ /dev/null @@ -1,153 +0,0 @@ -# Required pipeline variables: -# - BUILD_POOL - Azure DevOps build pool to use -# - SERVICE_CONNECTION - Azure DevOps service connection to use for the Azure CLI -# - EULA_LINK - the URL to the EULA for the image -# - PUBLISHER_EMAIL - the email for the image publisher -# - PUBLISHER_URI - the URI for the image publisher -# - SIG_PUBLISHER - the publisher for the image definition -# Optional pipeline variables: -# - GALLERY_DESCRIPTION - the description for the image gallery, defaults to `Shared image gallery for Cluster API Provider Azure` -# - GALLERY_NAME - name of the Azure community gallery for final image publishing, defaults to `community_gallery` -# - PUBLIC_NAME_PREFIX - the prefix for the community gallery name, defaults to `ClusterAPI` -# - REPLICATED_REGIONS - the regions to replicate the image to, defaults to the location of the managed image -# - RESOURCE_GROUP - name of the Azure resource group to use for the compute galleries, defaults to `cluster-api-gallery` -# - SIG_OFFER - the name of the offer to attach to image definitions, defaults to `reference-images` - -jobs: -- deployment: approve_promotion - displayName: 'Approve Image Promotion' - environment: 'image-promotion-approval' - strategy: - runOnce: - deploy: - steps: - - script: echo "Approved for promotion" -- job: publish_to_sig - dependsOn: approve_promotion - timeoutInMinutes: 120 - strategy: - maxParallel: 0 - pool: - name: $(BUILD_POOL) - steps: - - task: DownloadPipelineArtifact@2 - inputs: - source: current - artifact: publishing-info - path: $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/ - - script: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - DISTRO=$(jq -r .distro $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - HYPERV_GEN=$(jq -r .hyperv_gen $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - OS_TYPE=$(jq -r .os_type $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_RESOURCE_GROUP_NAME=$(jq -r .managed_image_resource_group_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_NAME=$(jq -r .managed_image_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_ID=$(jq -r .managed_image_id $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_LOCATION=$(jq -r .managed_image_location $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID=$(jq -r .managed_image_shared_image_gallery_id $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_RESOURCE_GROUP=$(jq -r .shared_image_gallery_resource_group $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_NAME=$(jq -r .shared_image_gallery_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_IMAGE_NAME=$(jq -r .shared_image_gallery_image_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(jq -r .shared_image_gallery_image_version $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - TAGS=$(jq -r .tags $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - - set +o xtrace - echo "##vso[task.setvariable variable=DISTRO]$DISTRO" - echo "##vso[task.setvariable variable=HYPERV_GEN]$HYPERV_GEN" - echo "##vso[task.setvariable variable=OS_TYPE]$OS_TYPE" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_RESOURCE_GROUP_NAME]$MANAGED_IMAGE_RESOURCE_GROUP_NAME" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_NAME]$MANAGED_IMAGE_NAME" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_ID]$MANAGED_IMAGE_ID" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_LOCATION]$MANAGED_IMAGE_LOCATION" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID]$MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_RESOURCE_GROUP]$SHARED_IMAGE_GALLERY_RESOURCE_GROUP" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_NAME]$SHARED_IMAGE_GALLERY_NAME" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_NAME]$SHARED_IMAGE_GALLERY_IMAGE_NAME" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_VERSION]$SHARED_IMAGE_GALLERY_IMAGE_VERSION" - echo "##vso[task.setvariable variable=TAGS]$TAGS" - displayName: Import variables from build SIG job - - task: AzureCLI@2 - displayName: Publish to community gallery - inputs: - azureSubscription: '$(SERVICE_CONNECTION)' - scriptLocation: inlineScript - scriptType: bash - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - inlineScript: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - EOL_DATE=$(date --date='+6 months' +"%Y-%m-%dT00:00:00+00:00") - GALLERY_DESCRIPTION=${GALLERY_DESCRIPTION:-"Shared image gallery for Cluster API Provider Azure"} - GALLERY_NAME=${GALLERY_NAME:-community_gallery} - PUBLIC_NAME_PREFIX=${PUBLIC_NAME_PREFIX:-ClusterAPI} - REPLICATED_REGIONS="${REPLICATED_REGIONS:-${MANAGED_IMAGE_LOCATION} australiaeast canadacentral eastus eastus2 eastus2euap francecentral germanywestcentral northeurope switzerlandnorth uksouth westeurope}" - RESOURCE_GROUP="${RESOURCE_GROUP:-cluster-api-gallery}" - SIG_OFFER="${SIG_OFFER:-reference-images}" - - # Create the resource group if needed - if ! az group show -n ${RESOURCE_GROUP} -o none 2>/dev/null; then - az group create -n ${RESOURCE_GROUP} -l ${MANAGED_IMAGE_LOCATION} --tags ${TAGS:-} - fi - - # Create the public community shared image gallery if it doesn't exist - if ! az sig show --gallery-name ${GALLERY_NAME} --resource-group ${RESOURCE_GROUP} -o none 2>/dev/null; then - az sig create \ - --gallery-name ${GALLERY_NAME} \ - --resource-group ${RESOURCE_GROUP} \ - --description ${GALLERY_DESCRIPTION} \ - --eula ${EULA_LINK} \ - --location ${MANAGED_IMAGE_LOCATION} \ - --public-name-prefix ${PUBLIC_NAME_PREFIX} \ - --publisher-email ${PUBLISHER_EMAIL} \ - --publisher-uri ${PUBLISHER_URI} \ - --tags ${TAGS} \ - --permissions Community - fi - - # translate prohibited words to alternatives in the image definition name - GALLERY_IMAGE_DEFINITION=${SHARED_IMAGE_GALLERY_IMAGE_NAME//ubuntu/ubun2} - GALLERY_IMAGE_DEFINITION=${GALLERY_IMAGE_DEFINITION//windows/win} - # Create image definition if it doesn't exist - if ! az sig image-definition show --gallery-name ${GALLERY_NAME} --gallery-image-definition ${GALLERY_IMAGE_DEFINITION} --resource-group ${RESOURCE_GROUP} -o none 2>/dev/null; then - az sig image-definition create \ - --resource-group ${RESOURCE_GROUP} \ - --gallery-name ${GALLERY_NAME} \ - --gallery-image-definition ${GALLERY_IMAGE_DEFINITION} \ - --publisher ${SIG_PUBLISHER} \ - --offer ${SIG_OFFER} \ - --sku ${DISTRO} \ - --hyper-v-generation ${HYPERV_GEN} \ - --os-type ${OS_TYPE} \ - | tee -a sig-publishing.json - fi - - # Delete the image version if it exists (always create a new image, overwriting if necessary) - if az sig image-version show --gallery-name ${GALLERY_NAME} --gallery-image-definition ${GALLERY_IMAGE_DEFINITION} --gallery-image-version ${SHARED_IMAGE_GALLERY_IMAGE_VERSION} --resource-group ${RESOURCE_GROUP} -o none 2>/dev/null; then - az sig image-version delete \ - --resource-group ${RESOURCE_GROUP} \ - --gallery-name ${GALLERY_NAME} \ - --gallery-image-definition ${GALLERY_IMAGE_DEFINITION} \ - --gallery-image-version ${SHARED_IMAGE_GALLERY_IMAGE_VERSION} - fi - - # Copy the tags from the managed image to the image version - IMAGE_TAGS=$(az tag list --resource-id ${MANAGED_IMAGE_ID} | jq -r '.properties.tags | to_entries | map("\(.key)=\(.value)") | join(" ")') - - # Create the image version - az sig image-version create \ - --resource-group ${RESOURCE_GROUP} \ - --gallery-name ${GALLERY_NAME} \ - --gallery-image-definition ${GALLERY_IMAGE_DEFINITION} \ - --gallery-image-version ${SHARED_IMAGE_GALLERY_IMAGE_VERSION} \ - --target-regions ${REPLICATED_REGIONS} \ - --managed-image "${MANAGED_IMAGE_ID}" \ - --end-of-life-date ${EOL_DATE} \ - --tags ${IMAGE_TAGS} \ - | tee -a sig-publishing.json - - task: PublishPipelineArtifact@1 - inputs: - artifact: 'sig-publishing' - path: '$(system.defaultWorkingDirectory)/images/capi/sig-publishing.json' diff --git a/images/capi/packer/azure/.pipelines/sig-publishing-info.yaml b/images/capi/packer/azure/.pipelines/sig-publishing-info.yaml deleted file mode 100644 index 6f805f3fe6..0000000000 --- a/images/capi/packer/azure/.pipelines/sig-publishing-info.yaml +++ /dev/null @@ -1,42 +0,0 @@ -steps: -- script: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - PACKER_OUTPUT=packer/azure/packer.out - OS_TYPE=$(sed -n 's/^OSType: \(.*\)/\1/p' $PACKER_OUTPUT | tail -1) - MANAGED_IMAGE_RESOURCE_GROUP_NAME=$(sed -n "s/^ManagedImageResourceGroupName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - MANAGED_IMAGE_NAME=$(sed -n "s/^ManagedImageName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - MANAGED_IMAGE_ID=$(sed -n "s/^ManagedImageId: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - MANAGED_IMAGE_LOCATION=$(sed -n "s/^ManagedImageLocation: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID=$(sed -n "s/^ManagedImageSharedImageGalleryId: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - SHARED_IMAGE_GALLERY_RESOURCE_GROUP=$(sed -n "s/^SharedImageGalleryResourceGroup: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - SHARED_IMAGE_GALLERY_NAME=$(sed -n "s/^SharedImageGalleryName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - SHARED_IMAGE_GALLERY_IMAGE_NAME=$(sed -n "s/^SharedImageGalleryImageName: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(sed -n "s/^SharedImageGalleryImageVersion: \(.*\)/\1/p" $PACKER_OUTPUT | tail -1) - TAGS=$(cat packer/azure/tags.out) - if [[ SHARED_IMAGE_GALLERY_IMAGE_NAME == *gen2 ]]; then - HYPERV_GEN="V2" - else - HYPERV_GEN="V1" - fi - - cat < packer/azure/sig-publishing-info.json - { - "distro": "${DISTRO}", - "hyperv_gen": "${HYPERV_GEN}", - "os_type": "${OS_TYPE}", - "managed_image_resource_group_name": "${MANAGED_IMAGE_RESOURCE_GROUP_NAME}", - "managed_image_name": "${MANAGED_IMAGE_NAME}", - "managed_image_id": "${MANAGED_IMAGE_ID}", - "managed_image_location": "${MANAGED_IMAGE_LOCATION}", - "managed_image_shared_image_gallery_id": "${MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID}", - "shared_image_gallery_resource_group": "${SHARED_IMAGE_GALLERY_RESOURCE_GROUP}", - "shared_image_gallery_name": "${SHARED_IMAGE_GALLERY_NAME}", - "shared_image_gallery_image_name": "${SHARED_IMAGE_GALLERY_IMAGE_NAME}", - "shared_image_gallery_image_version": "${SHARED_IMAGE_GALLERY_IMAGE_VERSION}", - "tags": "${TAGS}" - } - EOF - displayName: Generate SIG publishing info - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' diff --git a/images/capi/packer/azure/.pipelines/stages.yaml b/images/capi/packer/azure/.pipelines/stages.yaml deleted file mode 100644 index a40cb27dfd..0000000000 --- a/images/capi/packer/azure/.pipelines/stages.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# Required pipeline variables: -# - BUILD_POOL - Azure DevOps build pool to use -# - KUBERNETES_VERSION - version of Kubernetes to build the image with, e.g. `1.31.1` -# - OFFER - the name of the offer to create the sku for -# - OS - target of build, one of `Ubuntu` or `Windows` -# - OS_VERSION - target of build, one of `24.04`, `22.04`, `2022-containerd`, or `2019-containerd` -# - PUBLISHER - the name of the publisher to create the sku for -# - RESOURCE_GROUP - name of the Azure resource group to use for the Compute galleries -# - SERVICE_CONNECTION - Azure DevOps service connection to use for the Azure CLI -# - STAGING_GALLERY_NAME - name of the Azure Compute Gallery for initial image publishing - -trigger: none -pr: none - -stages: - - stage: build - jobs: - - template: build-sig.yaml - - - stage: test - condition: not(always()) # skip for now - jobs: - - template: test-sig.yaml - - - stage: promote - condition: not(or(failed(), canceled())) - jobs: - - template: promote-sig.yaml - - - stage: clean - condition: always() - jobs: - - template: clean-sig.yaml diff --git a/images/capi/packer/azure/.pipelines/test-sig.yaml b/images/capi/packer/azure/.pipelines/test-sig.yaml deleted file mode 100644 index 2b89276cea..0000000000 --- a/images/capi/packer/azure/.pipelines/test-sig.yaml +++ /dev/null @@ -1,142 +0,0 @@ -# Required pipeline variables: -# - BUILD_POOL - Azure DevOps build pool to use -# - AZ_CAPI_EXTENSION_URL - URL to the Azure CAPI extension build. -# - KUBERNETES_VERSION - version of Kubernetes to build the image with, e.g. `1.16.2` -# - OS - target of build e.g. `Ubuntu/Windows` -# - OS_VERSION - target of build e.g. `22.04/2004/2019` -# - SERVICE_CONNECTION - Azure DevOps service connection to use for the Azure CLI - -jobs: -- job: test_sig - timeoutInMinutes: 120 - strategy: - maxParallel: 0 - pool: - name: $(BUILD_POOL) - steps: - - task: DownloadPipelineArtifact@2 - inputs: - source: current - artifact: publishing-info - path: $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/ - - script: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - OS_TYPE=$(jq -r .os_type $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_RESOURCE_GROUP_NAME=$(jq -r .managed_image_resource_group_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_NAME=$(jq -r .managed_image_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_ID=$(jq -r .managed_image_id $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_LOCATION=$(jq -r .managed_image_location $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID=$(jq -r .managed_image_shared_image_gallery_id $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_RESOURCE_GROUP=$(jq -r .shared_image_gallery_resource_group $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_NAME=$(jq -r .shared_image_gallery_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_IMAGE_NAME=$(jq -r .shared_image_gallery_image_name $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(jq -r .shared_image_gallery_image_version $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - SHARED_IMAGE_GALLERY_REPLICATED_REGIONS=$(jq -r .shared_image_gallery_replicated_regions $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - TAGS=$(jq -r .tags $(system.defaultWorkingDirectory)/images/capi/packer/azure/sig/sig-publishing-info.json) - - echo "##vso[task.setvariable variable=OS_TYPE;]$OS_TYPE" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_RESOURCE_GROUP_NAME;]$MANAGED_IMAGE_RESOURCE_GROUP_NAME" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_NAME;]$MANAGED_IMAGE_NAME" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_ID;]$MANAGED_IMAGE_ID" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_LOCATION;]$MANAGED_IMAGE_LOCATION" - echo "##vso[task.setvariable variable=MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID;]$MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_RESOURCE_GROUP;]$SHARED_IMAGE_GALLERY_RESOURCE_GROUP" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_NAME;]$SHARED_IMAGE_GALLERY_NAME" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_NAME;]$SHARED_IMAGE_GALLERY_IMAGE_NAME" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_IMAGE_VERSION;]$SHARED_IMAGE_GALLERY_IMAGE_VERSION" - echo "##vso[task.setvariable variable=SHARED_IMAGE_GALLERY_REPLICATED_REGIONS;]$SHARED_IMAGE_GALLERY_REPLICATED_REGIONS" - echo "##vso[task.setvariable variable=TAGS;]$TAGS" - displayName: Import variables from build SIG job - - template: k8s-config.yaml - - script: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - export PATH=${PATH}:.local/bin - ./packer/azure/scripts/ensure-kustomize.sh - - # Generate cluster template with kustomize - if [ "$OS_TYPE" == "Windows" ]; then - kustomize build --load-restrictor LoadRestrictionsNone $(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/windows/ > $(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/cluster-template.yaml - else - kustomize build --load-restrictor LoadRestrictionsNone $(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/linux/ > $(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/cluster-template.yaml - fi - TEST_TEMPLATE=$(system.defaultWorkingDirectory)/images/capi/packer/azure/scripts/test-templates/cluster-template.yaml - echo "##vso[task.setvariable variable=TEST_TEMPLATE;]$TEST_TEMPLATE" - displayName: generate cluster template - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - - task: PipAuthenticate@1 - inputs: - artifactFeeds: 'AzureContainerUpstream' - onlyAddExtraIndex: true - - script: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - os=$(echo "$OS_TYPE" | tr '[:upper:]' '[:lower:]') - - # Set up the Azure CLI Cluster API extension - # For example, https://github.com/Azure/azure-capi-cli-extension/releases/download/az-capi-nightly/capi-0.0.vnext-py2.py3-none-any.whl - az extension add --yes --source "${AZ_CAPI_EXTENSION_URL}" - - # Install required binaries - mkdir ~/test-binaries - export PATH=${PATH}:~/test-binaries - az capi install -a -ip ~/test-binaries - - echo "##vso[task.setvariable variable=PATH;]$PATH" - displayName: Install and configure az capi extension - - task: AzureCLI@2 - displayName: Create a cluster - inputs: - azureSubscription: '$(SERVICE_CONNECTION)' - scriptLocation: inlineScript - scriptType: bash - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - inlineScript: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - params=() - if [ "$OS_TYPE" == "Windows" ]; then - params+=(--windows) - fi - - RESOURCE_GROUP=${MANAGED_IMAGE_RESOURCE_GROUP_NAME} - AZURE_LOCATION=${MANAGED_IMAGE_LOCATION} - # Create a cluster - az capi create \ - --yes \ - --debug \ - --name testvm \ - --kubernetes-version="${KUBERNETES_VERSION}" \ - --location="${AZURE_LOCATION}" \ - --resource-group="${RESOURCE_GROUP}" \ - --management-cluster-resource-group-name="${RESOURCE_GROUP}" \ - --control-plane-machine-count=1 \ - --node-machine-count=1 \ - --template="${TEST_TEMPLATE}" \ - --tags="${TAGS}" \ - --wait-for-nodes=2 \ - "${params[@]}" - - # test if the vm's provisionState is "Succeeded" otherwise fail - # even though the node is reporting Ready, it still takes a moment for the Azure VM to go to Succeeded - timeout 60s bash -c "while ! az vm list -g ${RESOURCE_GROUP} | jq -e 'all(.provisioningState == \"Succeeded\")'; do sleep 1; done" - - task: AzureCLI@2 - displayName: Clean up test resource group - inputs: - azureSubscription: '$(SERVICE_CONNECTION)' - scriptLocation: inlineScript - scriptType: bash - workingDirectory: '$(system.defaultWorkingDirectory)/images/capi' - inlineScript: | - set -euo pipefail - [[ -n ${DEBUG:-} ]] && set -o xtrace - - # Clean up the test resource group - RESOURCE_GROUP=${MANAGED_IMAGE_RESOURCE_GROUP_NAME} - echo az group delete -n "${RESOURCE_GROUP}" --yes --no-wait - condition: always() From 694f5fc1ad868045bc74feb0fe4d07dc6415b1e1 Mon Sep 17 00:00:00 2001 From: Damiano Donati Date: Sat, 14 Mar 2026 10:08:07 +0100 Subject: [PATCH 58/90] fix: skip IMDS crawl in DataSourceEc2KubernetesLocal init-local phase DataSourceEc2KubernetesLocal runs during cloud-init's init-local stage (pre-network). Its _get_data() delegates to DataSourceEc2._get_data() which attempts to crawl the IMDS at 169.254.169.254, but no network is available yet. The TCP connection retries for ~200s before timing out, adding a boot penalty to every EC2 node. This was not visible until cloud-init 25.1.4 changed ds-identify to respect user-configured datasource_list in /etc/cloud/cloud.cfg.d/ [1]. This update was included in Ubuntu 22.04 and 24.04 base images on Feb 17 2026, so any AMI built from a base image after that date is affected. Previously ds-identify wrote its own datasource_list: [ Ec2, None ] to /run/cloud-init/cloud.cfg (highest merge priority), silently overriding the custom Ec2Kubernetes datasource with the standard Ec2Local, which handles init-local correctly by setting up ephemeral DHCP first. Fix: Return False immediately from DataSourceEc2KubernetesLocal._get_data() so cloud-init proceeds to the init-network phase where DataSourceEc2Kubernetes runs with full network access. This matches the existing end-state (init-local always failed) but eliminates the ~200s timeout. [1] https://cloudinit.readthedocs.io/en/latest/reference/breaking_changes.html#strict-datasource-identity-before-network --- .../cloudinit/sources/DataSourceEc2Kubernetes.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/images/capi/ansible/roles/providers/files/usr/lib/python3/dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py b/images/capi/ansible/roles/providers/files/usr/lib/python3/dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py index 97ba291516..fab384a917 100644 --- a/images/capi/ansible/roles/providers/files/usr/lib/python3/dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py +++ b/images/capi/ansible/roles/providers/files/usr/lib/python3/dist-packages/cloudinit/sources/DataSourceEc2Kubernetes.py @@ -125,8 +125,17 @@ def _get_data(self): class DataSourceEc2KubernetesLocal(DataSourceEc2Kubernetes): + # init-local runs before networking is available. The parent + # DataSourceEc2._get_data() crawls the IMDS, which requires network. + # Without it the TCP connection retries for ~232s before timing out. + # Return False so cloud-init moves quickly to the init-network phase + # where DataSourceEc2Kubernetes runs with full network access. def _get_data(self): - return super(DataSourceEc2KubernetesLocal, self)._get_data() + LOG.debug( + "Skipping metadata crawl in init-local phase (no network). " + "DataSourceEc2Kubernetes will run in init-network phase." + ) + return False # Used to match classes to dependencies From cf07d38bc02e175268d123552fc6f3f4fb4750ec Mon Sep 17 00:00:00 2001 From: sivchari Date: Mon, 16 Mar 2026 17:27:00 +0900 Subject: [PATCH 59/90] Install networkd-dispatcher package before enabling service in openstack provider Signed-off-by: sivchari --- images/capi/ansible/roles/providers/tasks/openstack.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/images/capi/ansible/roles/providers/tasks/openstack.yml b/images/capi/ansible/roles/providers/tasks/openstack.yml index 3985be2e1e..641d053619 100644 --- a/images/capi/ansible/roles/providers/tasks/openstack.yml +++ b/images/capi/ansible/roles/providers/tasks/openstack.yml @@ -32,7 +32,14 @@ enabled: false when: ansible_os_family == "Debian" -- name: Install networkd-dispatcher service (Run networkd-dispatcher) +- name: Ensure networkd-dispatcher is installed + ansible.builtin.apt: + name: networkd-dispatcher + state: present + force_apt_get: true + when: ansible_os_family == "Debian" + +- name: Enable networkd-dispatcher service ansible.builtin.systemd: name: networkd-dispatcher state: started From d761106dcee113c677c64dcb0f4f79fd03ddd750 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Tue, 17 Mar 2026 08:26:49 -0600 Subject: [PATCH 60/90] Update docs for image-builder v0.1.49 --- RELEASE.md | 4 ++-- docs/book/src/capi/container-image.md | 10 +++++----- docs/book/src/capi/releasing.md | 10 +++++----- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/RELEASE.md b/RELEASE.md index f41473ae49..024a006b68 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,11 +1,11 @@ # Image Builder Releases -The current release of Image Builder is [v0.1.48][] (December 4, 2025). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.48`. +The current release of Image Builder is [v0.1.49][] (March 17, 2026). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.49`. ## Release Process For more detail about image-builder project releases, see the [Image Builder Book][]. -[v0.1.48]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.48 +[v0.1.49]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.49 [Image Builder Book]: https://image-builder.sigs.k8s.io/capi/releasing.html diff --git a/docs/book/src/capi/container-image.md b/docs/book/src/capi/container-image.md index 5ba8971fff..306ab6b576 100644 --- a/docs/book/src/capi/container-image.md +++ b/docs/book/src/capi/container-image.md @@ -18,7 +18,7 @@ Run the docker build target of Makefile The latest image-builder container image release is available here: ```commandline -docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.48 +docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.49 ``` ### Examples @@ -27,7 +27,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v - If the AWS CLI is already installed on your machine, you can simply mount the `~/.aws` folder that stores all the required credentials. ```commandline - docker run -it --rm -v /Users//.aws:/home/imagebuilder/.aws registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.48 build-ami-ubuntu-2404 + docker run -it --rm -v /Users//.aws:/home/imagebuilder/.aws registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.49 build-ami-ubuntu-2404 ``` - Another alternative is to use an `aws-creds.env` file to load the credentials and pass it during docker run. @@ -37,7 +37,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v ``` ```commandline - docker run -it --rm --env-file aws-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.48 build-ami-ubuntu-2404 + docker run -it --rm --env-file aws-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.49 build-ami-ubuntu-2404 ``` - AZURE @@ -51,7 +51,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v ``` ```commandline - docker run -it --rm --env-file az-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.48 build-azure-sig-ubuntu-2404 + docker run -it --rm --env-file az-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.49 build-azure-sig-ubuntu-2404 ``` - Proxmox @@ -83,7 +83,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v - Docker's `--net=host` option to ensure http server starts with the host IP and not the Docker container IP. This option is Linux specific and thus implies that it can be run only from a Linux machine. ```commandline - docker run -it --rm --net=host --env PACKER_VAR_FILES=/home/imagebuilder/vsphere.json -v :/home/imagebuilder/vsphere.json registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.48 build-node-ova-vsphere-ubuntu-2404 + docker run -it --rm --net=host --env PACKER_VAR_FILES=/home/imagebuilder/vsphere.json -v :/home/imagebuilder/vsphere.json registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.49 build-node-ova-vsphere-ubuntu-2404 ``` In addition to this, further customizations can be done as discussed [here](./capi.md#customization). diff --git a/docs/book/src/capi/releasing.md b/docs/book/src/capi/releasing.md index 4e516f3cdc..30d6647761 100644 --- a/docs/book/src/capi/releasing.md +++ b/docs/book/src/capi/releasing.md @@ -1,6 +1,6 @@ # Image Builder Releases -The current release of Image Builder is [v0.1.48][] (December 4, 2025). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.48`. +The current release of Image Builder is [v0.1.49][] (March 17, 2026). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.49`. ## Release Process @@ -22,7 +22,7 @@ Releases in image-builder follow [semantic versioning][semver] conventions. Curr - *If signing tags with GPG, makes your key available to the `git tag` command.* - Create a new tag: - `export IB_VERSION=v0.1.x` - - *Replace `x` with the next patch version. For example: `v0.1.49`.* + - *Replace `x` with the next patch version. For example: `v0.1.50`.* - `git tag -s -m "Image Builder ${IB_VERSION}" ${IB_VERSION}` - `git push upstream ${IB_VERSION}` @@ -77,14 +77,14 @@ Wait for this PR to merge before communicating the release to users, so image-bu In the [#image-builder channel][] on the Kubernetes Slack, post a message announcing the new release. Include a link to the GitHub release and a thanks to the contributors: ``` -Image-builder v0.1.49 is now available: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.49 +Image-builder v0.1.50 is now available: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.50 Thanks to all contributors! ``` -[v0.1.48]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.48 +[v0.1.49]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.49 [#image-builder channel]: https://kubernetes.slack.com/archives/C01E0Q35A8J [Personal access tokens]: https://github.com/settings/tokens [post-image-builder-push-images]: https://prow.k8s.io/?repo=kubernetes-sigs%2Fimage-builder&type=postsubmit&job=post-image-builder-push-images [releases page]: https://github.com/kubernetes-sigs/image-builder/releases [semver]: https://semver.org/#semantic-versioning-200 -[staging repository]: https://console.cloud.google.com/gcr/images/k8s-staging-scl-image-builder/GLOBAL/cluster-node-image-builder-amd64 +[staging repository]: https://console.cloud.google.com/artifacts/docker/k8s-staging-scl-image-builder/us/gcr.io/cluster-node-image-builder-amd64 From 34a21d437e1b41e2a777cb5f78bfbaf2d5841212 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?K=C3=A9vin=20PEREZ?= Date: Tue, 17 Mar 2026 16:12:11 +0100 Subject: [PATCH 61/90] =?UTF-8?q?=E2=9A=B0=EF=B8=8F=20Remove=20RHEL=208=20?= =?UTF-8?q?and=20related=20unsupported=20OS=20support?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All of these distributions are no longer supported and use GLIBC_2.28. In anticipation of an upgrade to ContainerD version 2.X, GLIBC 2.34 is required. This commit therefore removes support for the unsupported distributions in order to be able to offer a future upgrade of ContainerD. A final release of KubeImageBuilder compatible with these images should be created, and the Release Notes indicate the end of support for these operating systems. --- README.md | 4 - docs/book/src/capi/providers/aws.md | 1 - docs/book/src/capi/providers/gcp.md | 1 - docs/book/src/capi/providers/hcloud.md | 1 - docs/book/src/capi/providers/ibmcloud.md | 11 +- docs/book/src/capi/providers/nutanix.md | 2 - docs/book/src/capi/providers/oci.md | 3 +- docs/book/src/capi/providers/vsphere.md | 1 - images/capi/Makefile | 108 ++++++------------ .../capi/ansible/roles/node/defaults/main.yml | 2 +- .../ansible/roles/sysprep/tasks/redhat.yml | 14 --- images/capi/azure_targets.sh | 4 +- images/capi/hack/image-build-ova.py | 3 - images/capi/packer/ami/rhel-8.json | 17 --- images/capi/packer/ami/rockylinux-8.json | 16 --- images/capi/packer/azure/rhel-8.json | 11 -- images/capi/packer/azure/scripts/init-sig.sh | 3 - images/capi/packer/gce/rhel-8.json | 11 -- images/capi/packer/hcloud/rockylinux-8.json | 10 -- images/capi/packer/nutanix/rhel-8.json | 12 -- images/capi/packer/nutanix/rockylinux-8.json | 15 --- images/capi/packer/oci/oracle-linux-8.json | 8 -- .../ova/linux/centos/http/8/ks.cfg.tmpl | 75 ------------ .../packer/ova/linux/rhel/http/8/ks.cfg.tmpl | 75 ------------ .../ova/linux/rockylinux/http/8/ks.cfg.tmpl | 96 ---------------- images/capi/packer/ova/rhel-8.json | 19 --- images/capi/packer/ova/rockylinux-8.json | 20 ---- images/capi/packer/powervs/centos-8.json | 9 -- .../qemu/linux/centos/http/8/ks.cfg.tmpl | 74 ------------ .../packer/qemu/linux/rhel/http/8/ks.cfg.tmpl | 74 ------------ .../qemu/linux/rockylinux/http/8/ks.cfg.tmpl | 95 --------------- images/capi/packer/qemu/qemu-rhel-8.json | 16 --- .../qemu/qemu-rockylinux-8-cloudimg.json | 17 --- .../capi/packer/qemu/qemu-rockylinux-8.json | 17 --- .../packer/raw/linux/rhel/http/8/ks.cfg.tmpl | 74 ------------ images/capi/packer/raw/raw-rhel-8.json | 16 --- images/capi/scripts/ci-ova.sh | 2 - 37 files changed, 45 insertions(+), 892 deletions(-) delete mode 100644 images/capi/packer/ami/rhel-8.json delete mode 100644 images/capi/packer/ami/rockylinux-8.json delete mode 100644 images/capi/packer/azure/rhel-8.json delete mode 100644 images/capi/packer/gce/rhel-8.json delete mode 100644 images/capi/packer/hcloud/rockylinux-8.json delete mode 100644 images/capi/packer/nutanix/rhel-8.json delete mode 100644 images/capi/packer/nutanix/rockylinux-8.json delete mode 100644 images/capi/packer/oci/oracle-linux-8.json delete mode 100644 images/capi/packer/ova/linux/centos/http/8/ks.cfg.tmpl delete mode 100644 images/capi/packer/ova/linux/rhel/http/8/ks.cfg.tmpl delete mode 100644 images/capi/packer/ova/linux/rockylinux/http/8/ks.cfg.tmpl delete mode 100644 images/capi/packer/ova/rhel-8.json delete mode 100644 images/capi/packer/ova/rockylinux-8.json delete mode 100644 images/capi/packer/powervs/centos-8.json delete mode 100644 images/capi/packer/qemu/linux/centos/http/8/ks.cfg.tmpl delete mode 100644 images/capi/packer/qemu/linux/rhel/http/8/ks.cfg.tmpl delete mode 100644 images/capi/packer/qemu/linux/rockylinux/http/8/ks.cfg.tmpl delete mode 100644 images/capi/packer/qemu/qemu-rhel-8.json delete mode 100644 images/capi/packer/qemu/qemu-rockylinux-8-cloudimg.json delete mode 100644 images/capi/packer/qemu/qemu-rockylinux-8.json delete mode 100644 images/capi/packer/raw/linux/rhel/http/8/ks.cfg.tmpl delete mode 100644 images/capi/packer/raw/raw-rhel-8.json diff --git a/README.md b/README.md index e83b26fff8..5c257f1a2d 100644 --- a/README.md +++ b/README.md @@ -26,16 +26,12 @@ The table below shows the currently provided operating systems for each provider | Amazon Linux 2 | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | Amazon Linux 2023 | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | Azure Linux 3 | ❌ | 💙 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | -| CentOS 8 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | | CentOS 9 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ | | Flatcar | ✅ | 💙 | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | 💙 | ❌ | ✅ | ✅ | ✅ | ❌ | ❌ | -| Oracle Linux 8 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | Oracle Linux 9 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | Photon 4 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | Photon 5 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | 💙 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | -| RHEL 8 | ✅ | ✅ | ❌ | 💙 | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | | RHEL 9 | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | -| Rocky Linux 8 | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | | Rocky Linux 9 | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | 💙 | ❌ | ✅ | ✅ | ❌ | ✅ | ❌ | | Ubuntu 22.04 | ✅ | 💙 | ✅ | 💙 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | 💙 | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | | Ubuntu 24.04 | ✅ | 💙 | ✅ | 💙 | ✅ | ❌ | ✅ | ✅ | ❌ | ✅ | ✅ | 💙 | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | diff --git a/docs/book/src/capi/providers/aws.md b/docs/book/src/capi/providers/aws.md index 17d6889223..f6f11c527e 100644 --- a/docs/book/src/capi/providers/aws.md +++ b/docs/book/src/capi/providers/aws.md @@ -43,7 +43,6 @@ the different operating systems. | `amazon-2.json` | The settings for the Amazon 2 Linux image | | `flatcar.json` | The settings for the Flatcar image | | `flatcar-arm64.json` | The settings for the Flatcar arm64 image | -| `rhel-8.json` | The settings for the RHEL 8 image | | `rockylinux.json` | The settings for the Rocky Linux image | | `ubuntu-2204.json` | The settings for the Ubuntu 22.04 image | | `ubuntu-2404.json` | The settings for the Ubuntu 24.04 image | diff --git a/docs/book/src/capi/providers/gcp.md b/docs/book/src/capi/providers/gcp.md index f84852d0a0..0fec5d46c2 100644 --- a/docs/book/src/capi/providers/gcp.md +++ b/docs/book/src/capi/providers/gcp.md @@ -50,7 +50,6 @@ The `gce` sub-directory inside `images/capi/packer` stores JSON configuration fi | -------- | -------- | `ubuntu-2204.json` | Settings for Ubuntu 22.04 image | | `ubuntu-2404.json` | Settings for Ubuntu 24.04 image | -| `rhel-8.json` | Settings for RHEL 8 image | #### Common GCP options diff --git a/docs/book/src/capi/providers/hcloud.md b/docs/book/src/capi/providers/hcloud.md index 97dbae2c9e..e35759075b 100644 --- a/docs/book/src/capi/providers/hcloud.md +++ b/docs/book/src/capi/providers/hcloud.md @@ -26,7 +26,6 @@ the different operating systems. |----------------------|------------------------------------------| | `flatcar.json` | The settings for the Flatcar image | | `flatcar-arm64.json` | The settings for the Flatcar arm64 image | -| `rockylinux-8.json` | The settings for the RockyLinux 8 image | | `rockylinux-9.json` | The settings for the RockyLinux 9 image | | `ubuntu-2204.json` | The settings for the Ubuntu 22.04 image | | `ubuntu-2404.json` | The settings for the Ubuntu 24.04 image | diff --git a/docs/book/src/capi/providers/ibmcloud.md b/docs/book/src/capi/providers/ibmcloud.md index d700c6877b..6ef9225bf0 100644 --- a/docs/book/src/capi/providers/ibmcloud.md +++ b/docs/book/src/capi/providers/ibmcloud.md @@ -18,14 +18,14 @@ $ cd image-builder/images/capi/ $ make deps-powervs ``` -From the `images/capi` directory, run `make build-powervs-centos-8`. The image is built and uploaded to your bucket capibm-powervs-{BUILD_NAME}-{KUBERNETES_VERSION}-{BUILD_TIMESTAMP}. +From the `images/capi` directory, run `make build-powervs-centos-9`. The image is built and uploaded to your bucket capibm-powervs-{BUILD_NAME}-{KUBERNETES_VERSION}-{BUILD_TIMESTAMP}. > **Note:** Fill the required fields which are listed [here](#common-powervs-options) in a json file and pass it to the `PACKER_VAR_FILES` environment variable while building the image. -For building a centos-streams8 based CAPI image, run the following commands - +For building a centos-streams9 based CAPI image, run the following commands - ```bash -$ ANSIBLE_SSH_ARGS="-o HostKeyAlgorithms=+ssh-rsa -o PubkeyAcceptedAlgorithms=+ssh-rsa" PACKER_VAR_FILES=variables.json make build-powervs-centos-8 +$ ANSIBLE_SSH_ARGS="-o HostKeyAlgorithms=+ssh-rsa -o PubkeyAcceptedAlgorithms=+ssh-rsa" PACKER_VAR_FILES=variables.json make build-powervs-centos-9 ``` ### Configuration @@ -34,8 +34,7 @@ In addition to the configuration found in `images/capi/packer/config`, the `powe | File | Description | |------|-------------| -| `centos-8.json` | The settings for the CentOS 8 image | -| `centos-9.json` | The settings for the CentOS 8 image | +| `centos-9.json` | The settings for the CentOS 9 image | #### Common PowerVS options @@ -60,7 +59,7 @@ The parameters can be set via a variable file and passed via `PACKER_VAR_FILES`. > **Note:** -> 1. When setting `dhcp_network: true`, you need to build an OS image with certain network settings using [pvsadm tool](https://github.com/ppc64le-cloud/pvsadm/blob/main/docs/Build%20DHCP%20enabled%20Centos%20Images.md) and replace [the fields](https://github.com/kubernetes-sigs/image-builder/blob/cb925047f388090a0db3430ca3172da63eff952c/images/capi/packer/powervs/centos-8.json#L6) with the custom image details. +> 1. When setting `dhcp_network: true`, you need to build an OS image with certain network settings using [pvsadm tool](https://github.com/ppc64le-cloud/pvsadm/blob/main/docs/Build%20DHCP%20enabled%20Centos%20Images.md) and replace [the fields](https://github.com/kubernetes-sigs/image-builder/blob/main/images/capi/packer/powervs/centos-9.json#L6) with the custom image details. > 2. Clone the image-builder repo and run `make build` commands from a system where the DHCP private IP can be reached and SSH able. ## CAPIBM - VPC diff --git a/docs/book/src/capi/providers/nutanix.md b/docs/book/src/capi/providers/nutanix.md index 888f8b6e74..79d11776ce 100644 --- a/docs/book/src/capi/providers/nutanix.md +++ b/docs/book/src/capi/providers/nutanix.md @@ -89,9 +89,7 @@ The `nutanix` sub-directory inside `images/capi/packer` stores JSON configuratio | File | Description | |---------------------|-----------------------------------------------| | `ubuntu-2204.json` | Settings for Ubuntu 22.04 image | -| `rockylinux-8.json` | Settings for Rocky Linux 8 image (UEFI) | | `rockylinux-9.json` | Settings for Rocky Linux 9 image | -| `rhel-8.json` | Settings for RedHat Enterprise Linux 8 image | | `rhel-9.json` | Settings for RedHat Enterprise Linux 9 image | | `flatcar.json` | Settings for Flatcar Linux image (beta) | | `windows-2022.json` | Settings for Windows Server 2022 image (beta) | diff --git a/docs/book/src/capi/providers/oci.md b/docs/book/src/capi/providers/oci.md index 722adcff42..290c12d47a 100644 --- a/docs/book/src/capi/providers/oci.md +++ b/docs/book/src/capi/providers/oci.md @@ -26,7 +26,6 @@ the different operating systems. | File | Description | |------|-------------| -| `oracle-linux-8.json` | The settings for the Oracle Linux 8 image | | `oracle-linux-9.json` | The settings for the Oracle Linux 9 image | | `ubuntu-2204.json` | The settings for the Ubuntu 22.04 image | | `windows-2019.json` | The settings for the Windows Server 2019 image | @@ -62,7 +61,7 @@ Create a file with the following contents and name it as `oci.json` #### Example make command with Packer VAR file ```bash -PACKER_VAR_FILES=oci.json make build-oci-oracle-linux-8 +PACKER_VAR_FILES=oci.json make build-oci-oracle-linux-9 ``` #### Build an Arm based image diff --git a/docs/book/src/capi/providers/vsphere.md b/docs/book/src/capi/providers/vsphere.md index b17e7e290d..f774f4cff5 100644 --- a/docs/book/src/capi/providers/vsphere.md +++ b/docs/book/src/capi/providers/vsphere.md @@ -79,7 +79,6 @@ In addition to the configuration found in `images/capi/packer/config`, the `ova` |--------------------|--------------------------------------------------------------| | `flatcar.json` | The settings for the Flatcar image | | `photon-4.json` | The settings for the Photon 4 image | -| `rhel-8.json` | The settings for the RHEL 8 image | | `rhel-9.json` | The settings for the RHEL 9 image | | `ubuntu-2204.json` | The settings for the Ubuntu 22.04 image | | `ubuntu-2204-efi.json` | The settings for the Ubuntu 22.04 EFI image | diff --git a/images/capi/Makefile b/images/capi/Makefile index a87d440b9b..37ba448bac 100644 --- a/images/capi/Makefile +++ b/images/capi/Makefile @@ -339,8 +339,8 @@ PACKER_POWERVS_NODE_FLAGS := $(foreach f,$(abspath $(COMMON_POWERVS_VAR_FILES)), ## -------------------------------------- FLATCAR_VERSIONS := flatcar PHOTON_VERSIONS := photon-4 photon-5 -RHEL_VERSIONS := rhel-8 rhel-9 -ROCKYLINUX_VERSIONS := rockylinux-8 rockylinux-9 +RHEL_VERSIONS := rhel-9 +ROCKYLINUX_VERSIONS := rockylinux-9 ALMALINUX_VERSIONS := almalinux-9 UBUNTU_VERSIONS := ubuntu-2204 ubuntu-2204-efi ubuntu-2404 ubuntu-2404-efi WINDOWS_VERSIONS := windows-2019 windows-2019-efi windows-2022 windows-2022-efi @@ -369,9 +369,9 @@ NODE_OVA_VSPHERE_BUILD_NAMES := $(addprefix node-ova-vsphere-,$(PLATFORMS_AND_V NODE_OVA_VSPHERE_BASE_BUILD_NAMES := $(addprefix node-ova-vsphere-base-,$(PLATFORMS_AND_VERSIONS)) NODE_OVA_VSPHERE_CLONE_BUILD_NAMES := $(addprefix node-ova-vsphere-clone-,$(PLATFORMS_AND_VERSIONS)) -AMI_BUILD_NAMES ?= ami-ubuntu-2204 ami-ubuntu-2404 ami-amazon-2 ami-amazon-2023 ami-flatcar ami-flatcar-arm64 ami-windows-2019 ami-rockylinux-8 ami-rhel-8 +AMI_BUILD_NAMES ?= ami-ubuntu-2204 ami-ubuntu-2404 ami-amazon-2 ami-amazon-2023 ami-flatcar ami-flatcar-arm64 ami-windows-2019 HUAWEICLOUD_BUILD_NAMES ?= huaweicloud-ubuntu-2204 -GCE_BUILD_NAMES ?= gce-ubuntu-2204 gce-ubuntu-2404 gce-rhel-8 +GCE_BUILD_NAMES ?= gce-ubuntu-2204 gce-ubuntu-2404 # Make needs these lists to be space delimited, no quotes VHD_TARGETS := $(shell grep VHD_TARGETS azure_targets.sh | sed 's/VHD_TARGETS=//' | tr -d \") @@ -383,7 +383,7 @@ AZURE_BUILD_SIG_NAMES ?= $(addprefix azure-sig-,$(SIG_TARGETS)) AZURE_BUILD_SIG_GEN2_NAMES ?= $(addsuffix -gen2,$(addprefix azure-sig-,$(SIG_GEN2_TARGETS))) AZURE_BUILD_SIG_CVM_NAMES ?= $(addsuffix -cvm,$(addprefix azure-sig-,$(SIG_CVM_TARGETS))) -OCI_BUILD_NAMES ?= oci-ubuntu-2204 oci-oracle-linux-8 oci-oracle-linux-9 oci-windows-2019 oci-windows-2022 +OCI_BUILD_NAMES ?= oci-ubuntu-2204 oci-oracle-linux-9 oci-windows-2019 oci-windows-2022 DO_BUILD_NAMES ?= do-ubuntu-2204 do-ubuntu-2404 @@ -391,17 +391,17 @@ OPENSTACK_BUILD_NAMES ?= openstack-ubuntu-2204 openstack-ubuntu-2404 openstack- OSC_BUILD_NAMES ?= osc-ubuntu-2204 osc-ubuntu-2404 -QEMU_BUILD_NAMES ?= qemu-ubuntu-2204 qemu-ubuntu-2204-cloudimg qemu-ubuntu-2404 qemu-ubuntu-2404-efi qemu-ubuntu-2204-efi qemu-centos-9 qemu-rhel-8 qemu-rhel-9 qemu-rockylinux-8 qemu-rockylinux-8-cloudimg qemu-rockylinux-9 qemu-rockylinux-9-cloudimg qemu-flatcar +QEMU_BUILD_NAMES ?= qemu-ubuntu-2204 qemu-ubuntu-2204-cloudimg qemu-ubuntu-2404 qemu-ubuntu-2404-efi qemu-ubuntu-2204-efi qemu-centos-9 qemu-rhel-9 qemu-rockylinux-9 qemu-rockylinux-9-cloudimg qemu-flatcar QEMU_KUBEVIRT_BUILD_NAMES := $(addprefix kubevirt-,$(QEMU_BUILD_NAMES)) -RAW_BUILD_NAMES ?= raw-ubuntu-2204 raw-ubuntu-2204-efi raw-ubuntu-2404 raw-ubuntu-2404-efi raw-flatcar raw-rhel-8 raw-rhel-9 raw-rhel-9-efi +RAW_BUILD_NAMES ?= raw-ubuntu-2204 raw-ubuntu-2204-efi raw-ubuntu-2404 raw-ubuntu-2404-efi raw-flatcar raw-rhel-9 raw-rhel-9-efi -POWERVS_BUILD_NAMES ?= powervs-centos-8 powervs-centos-9 +POWERVS_BUILD_NAMES ?= powervs-centos-9 -NUTANIX_BUILD_NAMES ?= nutanix-ubuntu-2204 nutanix-ubuntu-2404 nutanix-rhel-8 nutanix-rhel-9 nutanix-rockylinux-8 nutanix-rockylinux-9 nutanix-flatcar nutanix-windows-2022 +NUTANIX_BUILD_NAMES ?= nutanix-ubuntu-2204 nutanix-ubuntu-2404 nutanix-rhel-9 nutanix-rockylinux-9 nutanix-flatcar nutanix-windows-2022 -HCLOUD_BUILD_NAMES ?= hcloud-ubuntu-2204 hcloud-ubuntu-2404 hcloud-rockylinux-8 hcloud-rockylinux-9 hcloud-flatcar hcloud-flatcar-arm64 +HCLOUD_BUILD_NAMES ?= hcloud-ubuntu-2204 hcloud-ubuntu-2404 hcloud-rockylinux-9 hcloud-flatcar hcloud-flatcar-arm64 PROXMOX_BUILD_NAMES ?= proxmox-ubuntu-2204 proxmox-ubuntu-2404 proxmox-ubuntu-2404-efi proxmox-rockylinux-9 proxmox-flatcar @@ -700,8 +700,6 @@ build-ami-amazon-2: ## Builds Amazon-2 Linux AMI build-ami-amazon-2023: ## Builds Amazon-2023 Linux AMI build-ami-ubuntu-2204: ## Builds Ubuntu 22.04 AMI build-ami-ubuntu-2404: ## Builds Ubuntu 24.04 AMI -build-ami-rockylinux-8: ## Builds RockyLinux 8 AMI -build-ami-rhel-8: ## Builds RHEL-8 AMI build-ami-flatcar: ## Builds Flatcar build-ami-flatcar-arm64: ## Builds Flatcar arm64 build-ami-windows-2019: ## Build Windows Server 2019 AMI Packer config @@ -710,7 +708,6 @@ build-ami-all: $(AMI_BUILD_TARGETS) ## Builds all AMIs build-azure-sig-ubuntu-2204: ## Builds Ubuntu 22.04 Azure managed image in Shared Image Gallery build-azure-sig-ubuntu-2404: ## Builds Ubuntu 24.04 Azure managed image in Shared Image Gallery build-azure-sig-azurelinux-3: ## Builds Azure Linux 3 Azure managed image in Shared Image Gallery -build-azure-sig-rhel-8: ## Builds RHEL 8 Azure managed image in Shared Image Gallery build-azure-sig-windows-2019-containerd: ## Builds Windows Server 2019 with containerd Azure managed image in Shared Image Gallery build-azure-sig-windows-2022-containerd: ## Builds Windows Server 2022 with containerd Azure managed image in Shared Image Gallery build-azure-sig-windows-2025-containerd: ## Builds Windows Server 2025 with containerd Azure managed image in Shared Image Gallery @@ -719,7 +716,6 @@ build-azure-sig-windows-2022-containerd-cvm: ## Builds Windows Server 2022 with build-azure-vhd-ubuntu-2204: ## Builds Ubuntu 22.04 VHD image for Azure build-azure-vhd-ubuntu-2404: ## Builds Ubuntu 24.04 VHD image for Azure build-azure-vhd-azurelinux-3: ## Builds Azure Linux 3 VHD image for Azure -build-azure-vhd-rhel-8: ## Builds RHEL 8 VHD image for Azure build-azure-vhd-windows-2019-containerd: ## Builds for Windows Server 2019 with containerd build-azure-vhd-windows-2022-containerd: ## Builds for Windows Server 2022 with containerd build-azure-sig-windows-annual-containerd: ## Builds for Windows Server Annual Channel with containerd @@ -739,7 +735,6 @@ build-do-all: $(DO_BUILD_TARGETS) ## Builds all DigitalOcean Snapshot build-gce-ubuntu-2204: ## Builds the GCE ubuntu-2204 image build-gce-ubuntu-2404: ## Builds the GCE ubuntu-2404 image -build-gce-rhel-8: ## Builds the GCE rhel-8 image build-gce-all: $(GCE_BUILD_TARGETS) ## Builds all GCE image build-huaweicloud-ubuntu-2204: ## Builds Ubuntu 22.04 HuaweiCloud image @@ -748,9 +743,7 @@ build-huaweicloud-all: $(HUAWEICLOUD_BUILD_TARGETS) ## Builds all HuaweiCloud im build-node-ova-local-flatcar: ## Builds Flatcar stable Node OVA w local hypervisor build-node-ova-local-photon-4: ## Builds Photon 4 Node OVA w local hypervisor build-node-ova-local-photon-5: ## Builds Photon 5 Node OVA w local hypervisor -build-node-ova-local-rhel-8: ## Builds RHEL 8 Node OVA w local hypervisor build-node-ova-local-rhel-9: ## Builds RHEL 9 Node OVA w local hypervisor -build-node-ova-local-rockylinux-8: ## Builds RockyLinux 8 Node OVA w local hypervisor build-node-ova-local-rockylinux-9: ## Builds RockyLinux 9 Node OVA w local hypervisor build-node-ova-local-almalinux-9: ## Builds AlmaLinux 9 Node OVA w local hypervisor build-node-ova-local-windows-2019: ## Builds for Windows Server 2019 Node OVA w local hypervisor @@ -759,9 +752,7 @@ build-node-ova-local-all: $(NODE_OVA_LOCAL_BUILD_TARGETS) ## Builds all Node OVA build-node-ova-vsphere-flatcar: ## Builds Flatcar stable Node OVA and template on vSphere build-node-ova-vsphere-photon-4: ## Builds Photon 4 Node OVA and template on vSphere build-node-ova-vsphere-photon-5: ## Builds Photon 5 Node OVA and template on vSphere -build-node-ova-vsphere-rhel-8: ## Builds RHEL 8 Node OVA and template on vSphere build-node-ova-vsphere-rhel-9: ## Builds RHEL 9 Node OVA and template on vSphere -build-node-ova-vsphere-rockylinux-8: ## Builds RockyLinux 8 Node OVA and template on vSphere build-node-ova-vsphere-rockylinux-9: ## Builds RockyLinux 9 Node OVA and template on vSphere build-node-ova-vsphere-almalinux-9: ## Builds AlmaLinux 9 Node OVA and template on vSphere build-node-ova-vsphere-ubuntu-2204: ## Builds Ubuntu 22.04 Node OVA and template on vSphere @@ -776,9 +767,7 @@ build-node-ova-vsphere-all: $(NODE_OVA_VSPHERE_BUILD_TARGETS) ## Builds all Node build-node-ova-vsphere-clone-photon-4: ## Builds Photon 4 Node OVA and template on vSphere build-node-ova-vsphere-clone-photon-5: ## Builds Photon 5 Node OVA and template on vSphere -build-node-ova-vsphere-clone-rhel-8: ## Builds RHEL 8 Node OVA and template on vSphere build-node-ova-vsphere-clone-rhel-9: ## Builds RHEL 9 Node OVA and template on vSphere -build-node-ova-vsphere-clone-rockylinux-8: ## Builds RockyLinux 8 Node OVA and template on vSphere build-node-ova-vsphere-clone-rockylinux-9: ## Builds RockyLinux 9 Node OVA and template on vSphere build-node-ova-vsphere-clone-almalinux-9: ## Builds AlmaLinux 9 Node OVA and template on vSphere build-node-ova-vsphere-clone-ubuntu-2204: ## Builds Ubuntu 22.04 Node OVA and template on vSphere @@ -789,9 +778,7 @@ build-node-ova-vsphere-clone-all: $(NODE_OVA_VSPHERE_CLONE_BUILD_TARGETS) ## Bui build-node-ova-vsphere-base-photon-4: ## Builds base Photon 4 Node OVA and template on vSphere build-node-ova-vsphere-base-photon-5: ## Builds base Photon 5 Node OVA and template on vSphere -build-node-ova-vsphere-base-rhel-8: ## Builds base RHEL 8 Node OVA and template on vSphere build-node-ova-vsphere-base-rhel-9: ## Builds base RHEL 9 Node OVA and template on vSphere -build-node-ova-vsphere-base-rockylinux-8: ## Builds base RockyLinux 8 Node OVA and template on vSphere build-node-ova-vsphere-base-rockylinux-9: ## Builds base RockyLinux 9 Node OVA and template on vSphere build-node-ova-vsphere-base-almalinux-9: ## Builds base AlmaLinux 9 Node OVA and template on vSphere build-node-ova-vsphere-base-ubuntu-2204: ## Builds base Ubuntu 22.04 Node OVA and template on vSphere @@ -802,19 +789,15 @@ build-node-ova-vsphere-base-all: $(NODE_OVA_VSPHERE_BASE_BUILD_TARGETS) ## Build build-node-ova-local-vmx-photon-4: ## Builds Photon 4 Node OVA from VMX file w local hypervisor build-node-ova-local-vmx-photon-5: ## Builds Photon 5 Node OVA from VMX file w local hypervisor -build-node-ova-local-vmx-rhel-8: ## Builds RHEL 8 Node OVA from VMX file w local hypervisor build-node-ova-local-vmx-rhel-9: ## Builds RHEL 9 Node OVA from VMX file w local hypervisor -build-node-ova-local-vmx-rockylinux-8: ## Builds RockyLinux 8 Node OVA from VMX file w local hypervisor build-node-ova-local-vmx-rockylinux-9: ## Builds RockyLinux 9 Node OVA from VMX file w local hypervisor build-node-ova-local-vmx-almalinux-9: ## Builds AlmaLinux 9 Node OVA from VMX file w local hypervisor build-node-ova-local-base-photon-4: ## Builds Photon 4 Base Node OVA w local hypervisor build-node-ova-local-base-photon-5: ## Builds Photon 5 Base Node OVA w local hypervisor -build-node-ova-local-base-rhel-8: ## Builds RHEL 8 Base Node OVA w local hypervisor build-node-ova-local-base-rhel-9: ## Builds RHEL 9 Base Node OVA w local hypervisor -build-node-ova-local-base-rockylinux-8: ## Builds RockyLinux 8 Base Node OVA w local hypervisor -build-node-ova-local-base-rockylinux-9: ## Builds RockyLinux 9 Base Node OVA w local hypervisor -build-node-ova-local-base-almalinux-9: ## Builds AlmaLinux 9 Base Node OVA w local hypervisor +build-node-ova-local-base-rockylinux-9: ## Builds base RockyLinux 9 Base Node OVA w local hypervisor +build-node-ova-local-base-almalinux-9: ## Builds base AlmaLinux 9 Base Node OVA w local hypervisor build-openstack-ubuntu-2204: ## Builds Ubuntu 22.04 OpenStack image build-openstack-ubuntu-2404: ## Builds Ubuntu 24.04 OpenStack image @@ -829,10 +812,7 @@ build-qemu-ubuntu-2204-efi: ## Builds Ubuntu 22.04 QEMU image that EFI boots build-qemu-ubuntu-2404: ## Builds Ubuntu 24.04 QEMU image build-qemu-ubuntu-2404-efi: ## Builds Ubuntu 24.04 QEMU image that EFI boots build-qemu-centos-9: ## Builds CentOS 9 Stream QEMU image -build-qemu-rhel-8: ## Builds RHEL 8 QEMU image build-qemu-rhel-9: ## Builds RHEL 9 QEMU image -build-qemu-rockylinux-8: ## Builds Rocky 8 QEMU image -build-qemu-rockylinux-8-cloudimg: ## Builds Rocky 8 QEMU image using cloud image build-qemu-rockylinux-9: ## Builds Rocky 9 QEMU image build-qemu-rockylinux-9-cloudimg: ## Builds Rocky 9 QEMU image using cloud image build-qemu-all: $(QEMU_BUILD_TARGETS) ## Builds all Qemu images @@ -845,13 +825,11 @@ build-raw-ubuntu-2204: ## Builds Ubuntu 22.04 RAW image build-raw-ubuntu-2204-efi: ## Builds Ubuntu 22.04 RAW image that EFI boots build-raw-ubuntu-2404: ## Builds Ubuntu 24.04 RAW image build-raw-ubuntu-2404-efi: ## Builds Ubuntu 24.04 RAW image that EFI boots -build-raw-rhel-8: ## Builds RHEL 8 RAW image build-raw-rhel-9: ## Builds RHEL 9 RAW image build-raw-rhel-9-efi: ## Builds RHEL 9 RAW image that EFI boots build-raw-all: $(RAW_BUILD_TARGETS) ## Builds all RAW images build-oci-ubuntu-2204: ## Builds the OCI ubuntu-2204 image -build-oci-oracle-linux-8: ## Builds the OCI Oracle Linux 8.x image build-oci-oracle-linux-9: ## Builds the OCI Oracle Linux 9.x image build-oci-windows-2019: ## Builds the OCI Windows Server 2019 image build-oci-windows-2022: ## Builds the OCI Windows Server 2022 image @@ -861,37 +839,36 @@ build-osc-ubuntu-2204: ## Builds Ubuntu 22.04 Outscale Snapshot build-osc-ubuntu-2404: ## Builds Ubuntu 24.04 Outscale Snapshot build-osc-all: $(OSC_BUILD_TARGETS) ## Builds all Outscale Snapshot -build-nutanix-ubuntu-2204: ## Builds the Nutanix ubuntu-2204 image -build-nutanix-ubuntu-2404: ## Builds the Nutanix ubuntu-2404 image -build-nutanix-rhel-8: ## Builds the Nutanix RedHat Enterprise Linux 8 image -build-nutanix-rhel-9: ## Builds the Nutanix edHat Enterprise Linux 9 image -build-nutanix-rockylinux-8: ## Builds the Nutanix Rocky Linux 8 image +build-nutanix-ubuntu-2204: ## Builds Ubuntu 22.04 Nutanix image +build-nutanix-ubuntu-2404: ## Builds Ubuntu 24.04 Nutanix image +build-nutanix-rhel-9: ## Builds the Nutanix RedHat Enterprise Linux 9 image build-nutanix-rockylinux-9: ## Builds the Nutanix Rocky Linux 9 image build-nutanix-flatcar: ## Builds the Nutanix Flatcar image build-nutanix-windows-2022: ## Builds the Nutanix Windows 2022 image build-nutanix-all: $(NUTANIX_BUILD_TARGETS) ## Builds all Nutanix image -build-hcloud-ubuntu-2204: ## Builds the Hetzner Cloud ubuntu-2204 image -build-hcloud-ubuntu-2404: ## Builds the Hetzner Cloud ubuntu-2404 image -build-hcloud-rockylinux-8: ## Builds the Hetzner Cloud Rocky Linux 8 image +build-hcloud-ubuntu-2204: ## Builds the Hetzner Cloud Ubuntu 22.04 image +build-hcloud-ubuntu-2404: ## Builds the Hetzner Cloud Ubuntu 24.04 image build-hcloud-rockylinux-9: ## Builds the Hetzner Cloud Rocky Linux 9 image build-hcloud-flatcar: ## Builds the Hetzner Cloud Flatcar image build-hcloud-flatcar-arm64: ## Builds the Hetzner Cloud Flatcar arm64 image build-hcloud-all: $(HCLOUD_BUILD_TARGETS) ## Builds all Hetzner Cloud image +build-proxmox-ubuntu-2204: ## Builds Ubuntu 22.04 Proxmox image +build-proxmox-ubuntu-2404: ## Builds Ubuntu 24.04 Proxmox image +build-proxmox-ubuntu-2404-efi: ## Builds Ubuntu 24.04 Proxmox image that EFI boots +build-proxmox-rockylinux-9: ## Builds Rocky Linux 9 Proxmox image build-proxmox-flatcar: ## Builds Flatcar Proxmox image -build-proxmox-ubuntu-2204: ## Builds the Proxmox ubuntu-2204 image -build-proxmox-ubuntu-2404: ## Builds the Proxmox ubuntu-2404 image -build-proxmox-ubuntu-2404-efi: ## Builds the Proxmox ubuntu-2404-efi image that EFI boots -build-proxmox-rockylinux-9: ## Builds the Proxmox rockylinux-9 image +build-proxmox-all: $(PROXMOX_BUILD_TARGETS) ## Builds all Proxmox images build-vultr-ubuntu-2204: ## Builds Ubuntu 22.04 Vultr Snapshot build-vultr-ubuntu-2404: ## Builds Ubuntu 24.04 Vultr Snapshot +build-vultr-all: $(VULTR_BUILD_TARGETS) ## Builds all Vultr Snapshots -build-scaleway-rockylinux-9: ## Builds the Scaleway rockylinux-9 image +build-scaleway-rockylinux-9: ## Builds Rocky Linux 9 Scaleway image build-scaleway-ubuntu-2204: ## Builds Ubuntu 22.04 Scaleway image build-scaleway-ubuntu-2404: ## Builds Ubuntu 24.04 Scaleway image -build-scaleway-all: $(SCALEWAY_BUILD_TARGETS) ## Builds all Scaleway Cloud images +build-scaleway-all: $(SCALEWAY_BUILD_TARGETS) ## Builds all Scaleway images ## -------------------------------------- ## Document dynamic validate targets @@ -899,19 +876,17 @@ build-scaleway-all: $(SCALEWAY_BUILD_TARGETS) ## Builds all Scaleway Cloud image ##@ Validate packer config validate-ami-amazon-2: ## Validates Amazon-2 Linux AMI Packer config validate-ami-amazon-2023: ## Validates Amazon-2023 Linux AMI Packer config -validate-ami-rockylinux-8: ## Validates RockyLinux 8 AMI Packer config -validate-ami-rhel-8: ## Validates RHEL-8 AMI Packer config validate-ami-flatcar: ## Validates Flatcar AMI Packer config validate-ami-flatcar-arm64: ## Validates Flatcar arm64 AMI Packer config validate-ami-ubuntu-2204: ## Validates Ubuntu 22.04 AMI Packer config -validate-ami-ubuntu-2404: ## Validates Ubuntu 22.04 AMI Packer config + +validate-ami-ubuntu-2404: ## Validates Ubuntu 24.04 AMI Packer config validate-ami-windows-2019: ## Validates Windows Server 2019 AMI Packer config validate-ami-all: $(AMI_VALIDATE_TARGETS) ## Validates all AMIs Packer config validate-huaweicloud-ubuntu-2204: ## Validates Ubuntu 22.04 HuaweiCloud Snapshot Packer config validate-azure-sig-azurelinux-3: ## Validates Azure Linux 3 Azure managed image in Shared Image Gallery Packer config -validate-azure-sig-rhel-8: ## Validates RHEL 8 Azure managed image in Shared Image Gallery Packer config validate-azure-sig-ubuntu-2204: ## Validates Ubuntu 22.04 Azure managed image in Shared Image Gallery Packer config validate-azure-sig-ubuntu-2404: ## Validates Ubuntu 24.04 Azure managed image in Shared Image Gallery Packer config validate-azure-sig-windows-2019-containerd: ## Validate Windows Server 2019 with containerd Azure managed image in Shared Image Gallery Packer config @@ -919,7 +894,6 @@ validate-azure-sig-windows-2022-containerd: ## Validate Windows Server 2022 with validate-azure-sig-windows-2025-containerd: ## Validate Windows Server 2025 with containerd Azure managed image in Shared Image Gallery Packer config validate-azure-sig-windows-annual-containerd: ## Validate Windows Server Annual Channel with containerd Azure managed image in Shared Image Gallery Packer config validate-azure-vhd-azurelinux-3: ## Validates Azure Linux 3 VHD image Azure Packer config -validate-azure-vhd-rhel-8: ## Validates RHEL 8 VHD image Azure Packer config validate-azure-vhd-ubuntu-2204: ## Validates Ubuntu 22.04 VHD image Azure Packer config validate-azure-vhd-ubuntu-2404: ## Validates Ubuntu 24.04 VHD image Azure Packer config validate-azure-vhd-windows-2019-containerd: ## Validate Windows Server 2019 VHD with containerd image Azure Packer config @@ -943,15 +917,12 @@ validate-openstack-all: $(OPENSTACK_VALIDATE_TARGETS) ## Validates all Openstack validate-gce-ubuntu-2204: ## Validates Ubuntu 22.04 GCE Snapshot Packer config validate-gce-ubuntu-2404: ## Validates Ubuntu 24.04 GCE Snapshot Packer config -validate-gce-rhel-8: ## Validates RHEL 8 GCE Snapshot Packer config validate-gce-all: $(GCE_VALIDATE_TARGETS) ## Validates all GCE Snapshot Packer config validate-node-ova-local-flatcar: ## Validates Flatcar stable Node OVA Packer config w local hypervisor validate-node-ova-local-photon-4: ## Validates Photon 4 Node OVA Packer config w local hypervisor validate-node-ova-local-photon-5: ## Validates Photon 5 Node OVA Packer config w local hypervisor -validate-node-ova-local-rhel-8: ## Validates RHEL 8 Node OVA Packer config w local hypervisor validate-node-ova-local-rhel-9: ## Validates RHEL 9 Node OVA Packer config w local hypervisor -validate-node-ova-local-rockylinux-8: ## Validates RockyLinux 8 Node OVA Packer config w local hypervisor validate-node-ova-local-rockylinux-9: ## Validates RockyLinux 9 Node OVA Packer config w local hypervisor validate-node-ova-local-almalinux-9: ## Validates AlmaLinux 9 Node OVA Packer config w local hypervisor validate-node-ova-local-ubuntu-2204: ## Validates Ubuntu 22.04 Node OVA Packer config w local hypervisor @@ -962,9 +933,7 @@ validate-node-ova-local-all: $(NODE_OVA_LOCAL_VALIDATE_TARGETS) ## Validates all validate-node-ova-local-vmx-photon-4: ## Validates Photon 4 Node OVA from VMX file w local hypervisor validate-node-ova-local-vmx-photon-5: ## Validates Photon 5 Node OVA from VMX file w local hypervisor -validate-node-ova-local-vmx-rhel-8: ## Validates RHEL 8 Node OVA from VMX file w local hypervisor validate-node-ova-local-vmx-rhel-9: ## Validates RHEL 9 Node OVA from VMX file w local hypervisor -validate-node-ova-local-vmx-rockylinux-8: ## Validates RockyLinux 8 Node OVA from VMX file w local hypervisor validate-node-ova-local-vmx-rockylinux-9: ## Validates RockyLinux 9 Node OVA from VMX file w local hypervisor validate-node-ova-local-vmx-almalinux-9: ## Validates AlmaLinux 9 Node OVA from VMX file w local hypervisor validate-node-ova-local-vmx-ubuntu-2204: ## Validates Ubuntu 22.04 Node OVA from VMX file w local hypervisor @@ -972,9 +941,7 @@ validate-node-ova-local-vmx-ubuntu-2404: ## Validates Ubuntu 24.04 Node OVA from validate-node-ova-local-base-photon-4: ## Validates Photon 4 Base Node OVA w local hypervisor validate-node-ova-local-base-photon-5: ## Validates Photon 5 Base Node OVA w local hypervisor -validate-node-ova-local-base-rhel-8: ## Validates RHEL 8 Base Node OVA w local hypervisor validate-node-ova-local-base-rhel-9: ## Validates RHEL 9 Base Node OVA w local hypervisor -validate-node-ova-local-base-rockylinux-8: ## Validates RockyLinux 8 Base Node OVA w local hypervisor validate-node-ova-local-base-rockylinux-9: ## Validates RockyLinux 9 Base Node OVA w local hypervisor validate-node-ova-local-base-almalinux-9: ## Validates AlmaLinux 9 Base Node OVA w local hypervisor validate-node-ova-local-base-ubuntu-2204: ## Validates Ubuntu 22.04 Base Node OVA w local hypervisor @@ -986,10 +953,7 @@ validate-qemu-ubuntu-2204-cloudimg: ## Validates Ubuntu 22.04 QEMU image packer validate-qemu-ubuntu-2204-efi: ## Validates Ubuntu 22.04 QEMU EFI image packer config validate-qemu-ubuntu-2404: ## Validates Ubuntu 24.04 QEMU image packer config validate-qemu-ubuntu-2404-efi: ## Validates Ubuntu 24.04 QEMU EFI image packer config -validate-qemu-rhel-8: ## Validates RHEL 8 QEMU image validate-qemu-rhel-9: ## Validates RHEL 9 QEMU image -validate-qemu-rockylinux-8: ## Validates Rocky Linux 8 QEMU image packer config -validate-qemu-rockylinux-8-cloudimg: ## Validates Rocky Linux 8 QEMU image packer config using cloud image validate-qemu-rockylinux-9: ## Validates Rocky Linux 9 QEMU image packer config validate-qemu-rockylinux-9-cloudimg: ## Validates Rocky Linux 9 QEMU image packer config using cloud image validate-qemu-all: $(QEMU_VALIDATE_TARGETS) ## Validates all Qemu Packer config @@ -999,11 +963,10 @@ validate-raw-ubuntu-2204: ## Validates Ubuntu 22.04 RAW image packer config validate-raw-ubuntu-2204-efi: ## Validates Ubuntu 22.04 RAW EFI image packer config validate-raw-ubuntu-2404: ## Validates Ubuntu 24.04 RAW image packer config validate-raw-ubuntu-2404-efi: ## Validates Ubuntu 24.04 RAW EFI image packer config -validate-raw-rhel-8: ## Validates RHEL 8 RAW image packer config +validate-raw-rhel-9: ## Validates RHEL 9 RAW image packer config validate-raw-all: $(RAW_VALIDATE_TARGETS) ## Validates all RAW Packer config validate-oci-ubuntu-2204: ## Validates the OCI ubuntu-2204 image packer config -validate-oci-oracle-linux-8: ## Validates the OCI Oracle Linux 8.x image packer config validate-oci-oracle-linux-9: ## Validates the OCI Oracle Linux 9.x image packer config validate-oci-windows-2019: ## Validates the OCI Windows 2019 image packer config validate-oci-windows-2022: ## Validates the OCI Windows 2022 image packer config @@ -1013,15 +976,12 @@ validate-osc-ubuntu-2204: ## Validates Ubuntu 22.04 Outscale Snapshot Packer con validate-osc-ubuntu-2404: ## Validates Ubuntu 24.04 Outscale Snapshot Packer config validate-osc-all: $(OSC_VALIDATE_TARGETS) ## Validates all Outscale Snapshot Packer config -validate-powervs-centos-8: ## Validates the PowerVS CentOS 8 image packer config validate-powervs-centos-9: ## Validates the PowerVS CentOS 9 image packer config validate-powervs-all: $(POWERVS_VALIDATE_TARGETS) ## Validates all PowerVS Packer config validate-nutanix-ubuntu-2204: ## Validates Ubuntu 22.04 Nutanix Packer config validate-nutanix-ubuntu-2404: ## Validates Ubuntu 24.04 Nutanix Packer config -validate-nutanix-rhel-8: ## Validates RedHat Enterprise Linux 8 Nutanix Packer config validate-nutanix-rhel-9: ## Validates RedHat Enterprise Linux 9 Nutanix Packer config -validate-nutanix-rockylinux-8: ## Validates Rocky Linux 8 Nutanix Packer config validate-nutanix-rockylinux-9: ## Validates Rocky Linux 9 Nutanix Packer config validate-nutanix-flatcar: ## Validates the Nutanix Flatcar Nutanix Packer config validate-nutanix-windows-2022: ## Validates Windows Server 2022 Nutanix Packer config @@ -1029,17 +989,21 @@ validate-nutanix-all: $(NUTANIX_VALIDATE_TARGETS) ## Validates all Nutanix Packe validate-hcloud-ubuntu-2204: ## Validates Ubuntu 22.04 Hetzner Cloud Packer config validate-hcloud-ubuntu-2404: ## Validates Ubuntu 24.04 Hetzner Cloud Packer config -validate-hcloud-rockylinux-8: ## Validates Rocky Linux 8 Hetzner Cloud Packer config -validate-hcloud-rockylinux-9: ## Validates the Hetzner Cloud Rocky Linux 9 Packer config +validate-hcloud-rockylinux-9: ## Validates Rocky Linux 9 Hetzner Cloud Packer config validate-hcloud-flatcar: ## Validates the Hetzner Cloud Flatcar Packer config validate-hcloud-flatcar-arm64: ## Validates the Hetzner Cloud Flatcar arm64 Packer config validate-hcloud-all: $(HCLOUD_VALIDATE_TARGETS) ## Validates all Hetzner Cloud Packer config validate-proxmox-ubuntu-2204: ## Validates Ubuntu 22.04 Proxmox Packer config +validate-proxmox-ubuntu-2404: ## Validates Ubuntu 24.04 Proxmox Packer config +validate-proxmox-ubuntu-2404-efi: ## Validates Ubuntu 24.04 EFI Proxmox Packer config +validate-proxmox-rockylinux-9: ## Validates Rocky Linux 9 Proxmox Packer config validate-proxmox-flatcar: ## Validates Flatcar Proxmox Packer config +validate-proxmox-all: $(PROXMOX_VALIDATE_TARGETS) ## Validates all Proxmox Packer config validate-vultr-ubuntu-2204: ## Validates Ubuntu 22.04 Vultr Snapshot Packer config validate-vultr-ubuntu-2404: ## Validates Ubuntu 24.04 Vultr Snapshot Packer config +validate-vultr-all: $(VULTR_VALIDATE_TARGETS) ## Validates all Vultr Snapshot Packer config validate-scaleway-rockylinux-9: ## Validates Rocky Linux 9 Scaleway image Packer config validate-scaleway-ubuntu-2204: ## Validates Ubuntu 22.04 Scaleway image Packer config @@ -1055,10 +1019,10 @@ validate-all: validate-ami-all \ validate-raw-all \ validate-oci-all \ validate-osc-all \ - validate-powervs-all \ validate-nutanix-all \ validate-hcloud-all \ - validate-scaleway-all + validate-scaleway-all \ + validate-powervs-all validate-all: ## Validates the Packer config for all build targets diff --git a/images/capi/ansible/roles/node/defaults/main.yml b/images/capi/ansible/roles/node/defaults/main.yml index 7865a18d08..ed7af0748c 100644 --- a/images/capi/ansible/roles/node/defaults/main.yml +++ b/images/capi/ansible/roles/node/defaults/main.yml @@ -45,7 +45,7 @@ azurelinux_rpms: - yum-utils - lsof -# Used for RedHat based distributions =! 7 (ex. RHEL-8, RockyLinux-8, RockyLinux-9 etc.) +# Used for RedHat based distributions != 7 (ex. RockyLinux-9 etc.) rh8_rpms: - curl - yum-utils diff --git a/images/capi/ansible/roles/sysprep/tasks/redhat.yml b/images/capi/ansible/roles/sysprep/tasks/redhat.yml index 1b8870d9ec..25aa26f323 100644 --- a/images/capi/ansible/roles/sysprep/tasks/redhat.yml +++ b/images/capi/ansible/roles/sysprep/tasks/redhat.yml @@ -40,20 +40,6 @@ - ansible.builtin.import_tasks: rpm_repos.yml -# Oracle Linux does not have temp-disk-swapfile service -- name: Disable swap service and ensure it is masked - ansible.builtin.systemd: - name: temp-disk-swapfile - enabled: false - masked: true - when: ansible_memory_mb.swap.total != 0 and ansible_distribution_major_version|int <= 7 - -- name: Disable swap service and ensure it is masked on RHEL 8 - ansible.builtin.systemd: - name: swap.target - enabled: false - masked: true - when: ansible_memory_mb.swap.total != 0 and ansible_distribution_major_version|int == 8 - name: Remove RHEL subscription when: diff --git a/images/capi/azure_targets.sh b/images/capi/azure_targets.sh index df40a78c2f..2f03cbe7bc 100644 --- a/images/capi/azure_targets.sh +++ b/images/capi/azure_targets.sh @@ -1,4 +1,4 @@ -VHD_TARGETS="ubuntu-2204 ubuntu-2404 azurelinux-3 rhel-8 windows-2022-containerd" -SIG_TARGETS="ubuntu-2204 ubuntu-2404 azurelinux-3 rhel-8 windows-2022-containerd windows-2025-containerd flatcar" +VHD_TARGETS="ubuntu-2204 ubuntu-2404 azurelinux-3 windows-2022-containerd" +SIG_TARGETS="ubuntu-2204 ubuntu-2404 azurelinux-3 windows-2022-containerd windows-2025-containerd flatcar" SIG_GEN2_TARGETS="ubuntu-2204 ubuntu-2404 azurelinux-3 flatcar" SIG_CVM_TARGETS="ubuntu-2204 ubuntu-2404 windows-2022-containerd" diff --git a/images/capi/hack/image-build-ova.py b/images/capi/hack/image-build-ova.py index 78ba124cb4..4c7f622b25 100755 --- a/images/capi/hack/image-build-ova.py +++ b/images/capi/hack/image-build-ova.py @@ -108,9 +108,6 @@ def main(): vmdk = vmdk_files[0] OS_id_map = {"vmware-photon-64": {"id": "36", "version": "", "type": "vmwarePhoton64Guest"}, - "centos7-64": {"id": "107", "version": "7", "type": "centos7_64Guest"}, - "centos8-64": {"id": "107", "version": "8", "type": "centos8_64Guest"}, - "rhel8-64": {"id": "80", "version": "8", "type": "rhel8_64Guest"}, "rhel9-64": {"id": "80", "version": "9", "type": "rhel9_64Guest"}, "rockylinux-64": {"id": "80", "version": "", "type": "rockylinux_64Guest"}, "ubuntu-64": {"id": "94", "version": "", "type": "ubuntu64Guest"}, diff --git a/images/capi/packer/ami/rhel-8.json b/images/capi/packer/ami/rhel-8.json deleted file mode 100644 index ea506ccb7a..0000000000 --- a/images/capi/packer/ami/rhel-8.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "ami_filter_arch": "x86_64", - "ami_filter_name": "RHEL-8.6.0_HVM-*", - "ami_filter_owners": "309956199498", - "arch": "amd64", - "build_name": "rhel-8", - "builder_instance_type": "m5.large", - "distribution": "rhel", - "distribution_release": "Enterprise", - "distribution_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "root_device_name": "/dev/sda1", - "source_ami": "", - "ssh_username": "ec2-user", - "volume_size": "10" -} diff --git a/images/capi/packer/ami/rockylinux-8.json b/images/capi/packer/ami/rockylinux-8.json deleted file mode 100644 index cc2c494720..0000000000 --- a/images/capi/packer/ami/rockylinux-8.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "ami_filter_arch": "x86_64", - "ami_filter_name": "Rocky-8-ec2-8.5-*", - "ami_filter_owners": "679593333241", - "arch": "amd64", - "build_name": "rockylinux-8", - "distribution": "rockylinux", - "distribution_release": "Core", - "distribution_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "root_device_name": "/dev/sda1", - "source_ami": "", - "ssh_username": "rocky", - "volume_size": "10" -} diff --git a/images/capi/packer/azure/rhel-8.json b/images/capi/packer/azure/rhel-8.json deleted file mode 100644 index 6796dc42f9..0000000000 --- a/images/capi/packer/azure/rhel-8.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "build_name": "rhel-8", - "distribution": "rhel", - "distribution_release": "rhel-8", - "distribution_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "image_offer": "RHEL", - "image_publisher": "RedHat", - "image_sku": "8_7", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm" -} diff --git a/images/capi/packer/azure/scripts/init-sig.sh b/images/capi/packer/azure/scripts/init-sig.sh index 730c55b943..0f2d46318a 100755 --- a/images/capi/packer/azure/scripts/init-sig.sh +++ b/images/capi/packer/azure/scripts/init-sig.sh @@ -150,9 +150,6 @@ case ${SIG_TARGET} in azurelinux-3) create_image_definition ${SIG_TARGET} "azurelinux-3" "V1" "Linux" ;; - rhel-8) - create_image_definition "rhel-8" "rhel-8" "V1" "Linux" - ;; windows-2019-containerd) create_image_definition ${SIG_TARGET} "win-2019-containerd" "V1" "Windows" ;; diff --git a/images/capi/packer/gce/rhel-8.json b/images/capi/packer/gce/rhel-8.json deleted file mode 100644 index d328a66235..0000000000 --- a/images/capi/packer/gce/rhel-8.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "build_name": "rhel-8", - "distribution": "rhel", - "distribution_release": "rhel-8", - "distribution_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "source_image_family": "rhel-8", - "ssh_username": "packer", - "zone": "us-central1-a" -} diff --git a/images/capi/packer/hcloud/rockylinux-8.json b/images/capi/packer/hcloud/rockylinux-8.json deleted file mode 100644 index 720c62b16c..0000000000 --- a/images/capi/packer/hcloud/rockylinux-8.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "build_name": "rockylinux-8", - "distribution": "rockylinux", - "distribution_release": "Core", - "distribution_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "image": "rocky-8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "ssh_username": "root" -} diff --git a/images/capi/packer/nutanix/rhel-8.json b/images/capi/packer/nutanix/rhel-8.json deleted file mode 100644 index 9aba21d66a..0000000000 --- a/images/capi/packer/nutanix/rhel-8.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "build_name": "rhel-8", - "distribution_version": "8", - "distro_name": "rhel", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "extra_rpms": "", - "guest_os_type": "Linux", - "image_url": "https://REPLACE_YOUR_SERVER/redhat/8/rhel-8.8-x86_64-kvm.qcow2", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "shutdown_command": "shutdown -P now", - "user_data": "I2Nsb3VkLWNvbmZpZwp1c2VyczoKICAtIG5hbWU6IGJ1aWxkZXIKICAgIHN1ZG86IFsnQUxMPShBTEwpIE5PUEFTU1dEOkFMTCddCmNocGFzc3dkOgogIGxpc3Q6IHwKICAgIGJ1aWxkZXI6YnVpbGRlcgogIGV4cGlyZTogRmFsc2UKc3NoX3B3YXV0aDogVHJ1ZQ==" -} diff --git a/images/capi/packer/nutanix/rockylinux-8.json b/images/capi/packer/nutanix/rockylinux-8.json deleted file mode 100644 index c40ce5e8ab..0000000000 --- a/images/capi/packer/nutanix/rockylinux-8.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "boot_type": "uefi", - "build_name": "rockylinux-8", - "distribution": "rockylinux", - "distribution_release": "Core", - "distribution_version": "8", - "distro_name": "rockylinux", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "extra_rpms": "python3", - "guest_os_type": "Linux", - "image_url": "https://dl.rockylinux.org/pub/rocky/8/images/x86_64/Rocky-8-GenericCloud-Base.latest.x86_64.qcow2", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "shutdown_command": "shutdown -P now", - "user_data": "I2Nsb3VkLWNvbmZpZwp1c2VyczoKICAtIG5hbWU6IGJ1aWxkZXIKICAgIHN1ZG86IFsnQUxMPShBTEwpIE5PUEFTU1dEOkFMTCddCmNocGFzc3dkOgogIGxpc3Q6IHwKICAgIGJ1aWxkZXI6YnVpbGRlcgogIGV4cGlyZTogRmFsc2UKc3NoX3B3YXV0aDogVHJ1ZQ==" -} diff --git a/images/capi/packer/oci/oracle-linux-8.json b/images/capi/packer/oci/oracle-linux-8.json deleted file mode 100644 index 3949ed1c51..0000000000 --- a/images/capi/packer/oci/oracle-linux-8.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "build_name": "oracle-linux-8", - "distribution": "Oracle Linux", - "operating_system": "Oracle Linux", - "operating_system_version": "8", - "redhat_epel_rpm": "oracle-epel-release-el8", - "ssh_username": "opc" -} diff --git a/images/capi/packer/ova/linux/centos/http/8/ks.cfg.tmpl b/images/capi/packer/ova/linux/centos/http/8/ks.cfg.tmpl deleted file mode 100644 index 5607fa080e..0000000000 --- a/images/capi/packer/ova/linux/centos/http/8/ks.cfg.tmpl +++ /dev/null @@ -1,75 +0,0 @@ -# version=RHEL8 -# Install OS instead of upgrade -install -cdrom -auth --enableshadow --passalgo=sha512 --kickstart -# License agreement -eula --agreed -# Use text mode install -text -# Disable Initial Setup on first boot -firstboot --disable -# Keyboard layout -keyboard --vckeymap=us --xlayouts='us' -# System language -lang en_US.UTF-8 -# Network information -network --bootproto=dhcp --device=link --activate -network --hostname=rhel8 -firewall --disabled -# Root password -rootpw builder --plaintext -# SELinux configuration -selinux --permissive -# Do not configure the X Window System -skipx -# System timezone -timezone UTC -# Add a user named builder -user --groups=wheel --name=builder --password=$SSH_PASSWORD --plaintext --gecos="builder" - -# System bootloader configuration -bootloader --location=mbr --boot-drive=sda -# Clear the Master Boot Record -zerombr -clearpart --all --initlabel --drives=sda -part / --fstype="ext4" --grow --asprimary --label=slash --ondisk=sda - -# Reboot after successful installation -reboot - -%packages --ignoremissing --excludedocs -# dnf group info minimal-environment -@^minimal-environment -@core -openssh-server -sed -sudo -python3 -open-vm-tools - -# Exclude unnecessary firmwares --iwl*firmware -%end - -# Enable/disable the following services -services --enabled=sshd - -%post --nochroot --logfile=/mnt/sysimage/root/ks-post.log -# Disable quiet boot and splash screen -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/etc/default/grub -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/boot/grub2/grubenv - -# Passwordless sudo for the user 'builder' -echo "builder ALL=(ALL) NOPASSWD: ALL" >> /mnt/sysimage/etc/sudoers.d/builder -# Remove the package cache -yum -y clean all - -# Disable swap -swapoff -a -rm -f /swapfile -sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab - -sed -i '/^\(HWADDR\|UUID\)=/d' /etc/sysconfig/network-scripts/ifcfg-* - -%end \ No newline at end of file diff --git a/images/capi/packer/ova/linux/rhel/http/8/ks.cfg.tmpl b/images/capi/packer/ova/linux/rhel/http/8/ks.cfg.tmpl deleted file mode 100644 index 5607fa080e..0000000000 --- a/images/capi/packer/ova/linux/rhel/http/8/ks.cfg.tmpl +++ /dev/null @@ -1,75 +0,0 @@ -# version=RHEL8 -# Install OS instead of upgrade -install -cdrom -auth --enableshadow --passalgo=sha512 --kickstart -# License agreement -eula --agreed -# Use text mode install -text -# Disable Initial Setup on first boot -firstboot --disable -# Keyboard layout -keyboard --vckeymap=us --xlayouts='us' -# System language -lang en_US.UTF-8 -# Network information -network --bootproto=dhcp --device=link --activate -network --hostname=rhel8 -firewall --disabled -# Root password -rootpw builder --plaintext -# SELinux configuration -selinux --permissive -# Do not configure the X Window System -skipx -# System timezone -timezone UTC -# Add a user named builder -user --groups=wheel --name=builder --password=$SSH_PASSWORD --plaintext --gecos="builder" - -# System bootloader configuration -bootloader --location=mbr --boot-drive=sda -# Clear the Master Boot Record -zerombr -clearpart --all --initlabel --drives=sda -part / --fstype="ext4" --grow --asprimary --label=slash --ondisk=sda - -# Reboot after successful installation -reboot - -%packages --ignoremissing --excludedocs -# dnf group info minimal-environment -@^minimal-environment -@core -openssh-server -sed -sudo -python3 -open-vm-tools - -# Exclude unnecessary firmwares --iwl*firmware -%end - -# Enable/disable the following services -services --enabled=sshd - -%post --nochroot --logfile=/mnt/sysimage/root/ks-post.log -# Disable quiet boot and splash screen -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/etc/default/grub -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/boot/grub2/grubenv - -# Passwordless sudo for the user 'builder' -echo "builder ALL=(ALL) NOPASSWD: ALL" >> /mnt/sysimage/etc/sudoers.d/builder -# Remove the package cache -yum -y clean all - -# Disable swap -swapoff -a -rm -f /swapfile -sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab - -sed -i '/^\(HWADDR\|UUID\)=/d' /etc/sysconfig/network-scripts/ifcfg-* - -%end \ No newline at end of file diff --git a/images/capi/packer/ova/linux/rockylinux/http/8/ks.cfg.tmpl b/images/capi/packer/ova/linux/rockylinux/http/8/ks.cfg.tmpl deleted file mode 100644 index 61c9f80758..0000000000 --- a/images/capi/packer/ova/linux/rockylinux/http/8/ks.cfg.tmpl +++ /dev/null @@ -1,96 +0,0 @@ -# Use CDROM installation media -repo --name="AppStream" --baseurl="http://download.rockylinux.org/pub/rocky/8/AppStream/x86_64/os/" -cdrom - -# Use text install -text - -# Don't run the Setup Agent on first boot -firstboot --disabled -eula --agreed - -# Keyboard layouts -keyboard --vckeymap=us --xlayouts='us' - -# System language -lang en_US.UTF-8 - -# Network information -network --bootproto=dhcp --onboot=on --ipv6=auto --activate --hostname=capv.vm - -# Lock Root account -rootpw --lock - -# Create builder user -user --name=builder --groups=wheel --password=$SSH_PASSWORD --plaintext --shell=/bin/bash - -# System services -selinux --permissive -firewall --disabled -services --enabled="NetworkManager,sshd,chronyd" - -# System timezone -timezone UTC - -# System booloader configuration -bootloader --location=mbr --boot-drive=sda -zerombr -clearpart --all --initlabel --drives=sda -part / --fstype="ext4" --grow --asprimary --label=slash --ondisk=sda - -skipx - -%packages --ignoremissing --excludedocs -openssh-server -open-vm-tools -sudo -sed -python3 - -# unnecessary firmware --aic94xx-firmware --atmel-firmware --b43-openfwwf --bfa-firmware --ipw2100-firmware --ipw2200-firmware --ivtv-firmware --iwl*-firmware --libertas-usb8388-firmware --ql*-firmware --rt61pci-firmware --rt73usb-firmware --xorg-x11-drv-ati-firmware --zd1211-firmware --cockpit --quota --alsa-* --fprintd-pam --intltool --microcode_ctl -%end - -%addon com_redhat_kdump --disable -%end - -reboot - -%post - -echo 'builder ALL=(ALL) NOPASSWD: ALL' >/etc/sudoers.d/builder -chmod 440 /etc/sudoers.d/builder - -# Remove the package cache -yum -y clean all - -swapoff -a -rm -f /swapfile -sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab - -systemctl enable vmtoolsd -systemctl start vmtoolsd - -# Ensure on next boot that network devices get assigned unique IDs. -sed -i '/^\(HWADDR\|UUID\)=/d' /etc/sysconfig/network-scripts/ifcfg-* - -%end diff --git a/images/capi/packer/ova/rhel-8.json b/images/capi/packer/ova/rhel-8.json deleted file mode 100644 index e5ffe440ca..0000000000 --- a/images/capi/packer/ova/rhel-8.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "boot_command_prefix": " text inst.ks=", - "boot_command_suffix": "/8/ks.cfg", - "boot_media_path": "http://{{ .HTTPIP }}:{{ .HTTPPort }}", - "build_name": "rhel-8", - "distro_arch": "amd64", - "distro_name": "rhel", - "distro_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "guest_os_type": "rhel8-64", - "http_directory": "./packer/ova/linux/{{user `distro_name`}}/http/", - "iso_checksum": "48f955712454c32718dcde858dea5aca574376a1d7a4b0ed6908ac0b85597811", - "iso_checksum_type": "sha256", - "iso_url": "file:///rhel-8.4-x86_64-dvd.iso", - "os_display_name": "RHEL 8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "shutdown_command": "shutdown -P now", - "vsphere_guest_os_type": "rhel8_64Guest" -} diff --git a/images/capi/packer/ova/rockylinux-8.json b/images/capi/packer/ova/rockylinux-8.json deleted file mode 100644 index e18608c3f4..0000000000 --- a/images/capi/packer/ova/rockylinux-8.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "boot_command_prefix": " text inst.ks=", - "boot_command_suffix": "hd:LABEL=cidata:/ks.cfg", - "build_name": "rockylinux-8", - "cd_content_location": "./packer/ova/linux/{{user `distro_name`}}/http/{{user `distro_version`}}/*", - "cd_label": "cidata", - "distro_arch": "amd64", - "distro_name": "rockylinux", - "distro_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "guest_os_type": "rockylinux-64", - "iso_checksum": "2c735d3b0de921bd671a0e2d08461e3593ac84f64cdaef32e3ed56ba01f74f4b", - "iso_checksum_type": "sha256", - "iso_url": "https://download.rockylinux.org/pub/rocky/8/isos/x86_64/Rocky-8.10-x86_64-minimal.iso", - "os_display_name": "RockyLinux 8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "shutdown_command": "/sbin/halt -h -p", - "vmx_version": "20", - "vsphere_guest_os_type": "rockylinux_64Guest" -} diff --git a/images/capi/packer/powervs/centos-8.json b/images/capi/packer/powervs/centos-8.json deleted file mode 100644 index eb1c1a2953..0000000000 --- a/images/capi/packer/powervs/centos-8.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "build_name": "centos-streams8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "source_cos_bucket": "power-oss-bucket", - "source_cos_object": "centos-streams-8.ova.gz", - "source_cos_region": "us-south", - "ssh_username": "root" -} diff --git a/images/capi/packer/qemu/linux/centos/http/8/ks.cfg.tmpl b/images/capi/packer/qemu/linux/centos/http/8/ks.cfg.tmpl deleted file mode 100644 index 6dfed57404..0000000000 --- a/images/capi/packer/qemu/linux/centos/http/8/ks.cfg.tmpl +++ /dev/null @@ -1,74 +0,0 @@ -# version=RHEL8 -# Install OS instead of upgrade -install -cdrom -auth --enableshadow --passalgo=sha512 --kickstart -# License agreement -eula --agreed -# Use text mode install -text -# Disable Initial Setup on first boot -firstboot --disable -# Keyboard layout -keyboard --vckeymap=us --xlayouts='us' -# System language -lang en_US.UTF-8 -# Network information -network --bootproto=dhcp --device=link --activate -network --hostname=rhel8 -firewall --disabled -# Root password -rootpw builder --plaintext -# SELinux configuration -selinux --permissive -# Do not configure the X Window System -skipx -# System timezone -timezone UTC -# Add a user named builder -user --groups=wheel --name=builder --password=$SSH_PASSWORD --plaintext --gecos="builder" - -# System bootloader configuration -bootloader --location=mbr --boot-drive=sda -# Clear the Master Boot Record -zerombr -clearpart --all --initlabel --drives=sda -part / --fstype="ext4" --grow --asprimary --label=slash --ondisk=sda - -# Reboot after successful installation -reboot - -%packages --ignoremissing --excludedocs -# dnf group info minimal-environment -@^minimal-environment -@core -openssh-server -sed -sudo -python3 - -# Exclude unnecessary firmwares --iwl*firmware -%end - -# Enable/disable the following services -services --enabled=sshd - -%post --nochroot --logfile=/mnt/sysimage/root/ks-post.log -# Disable quiet boot and splash screen -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/etc/default/grub -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/boot/grub2/grubenv - -# Passwordless sudo for the user 'builder' -echo "builder ALL=(ALL) NOPASSWD: ALL" >> /mnt/sysimage/etc/sudoers.d/builder -# Remove the package cache -yum -y clean all - -# Disable swap -swapoff -a -rm -f /swapfile -sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab - -sed -i '/^\(HWADDR\|UUID\)=/d' /etc/sysconfig/network-scripts/ifcfg-* - -%end diff --git a/images/capi/packer/qemu/linux/rhel/http/8/ks.cfg.tmpl b/images/capi/packer/qemu/linux/rhel/http/8/ks.cfg.tmpl deleted file mode 100644 index 6dfed57404..0000000000 --- a/images/capi/packer/qemu/linux/rhel/http/8/ks.cfg.tmpl +++ /dev/null @@ -1,74 +0,0 @@ -# version=RHEL8 -# Install OS instead of upgrade -install -cdrom -auth --enableshadow --passalgo=sha512 --kickstart -# License agreement -eula --agreed -# Use text mode install -text -# Disable Initial Setup on first boot -firstboot --disable -# Keyboard layout -keyboard --vckeymap=us --xlayouts='us' -# System language -lang en_US.UTF-8 -# Network information -network --bootproto=dhcp --device=link --activate -network --hostname=rhel8 -firewall --disabled -# Root password -rootpw builder --plaintext -# SELinux configuration -selinux --permissive -# Do not configure the X Window System -skipx -# System timezone -timezone UTC -# Add a user named builder -user --groups=wheel --name=builder --password=$SSH_PASSWORD --plaintext --gecos="builder" - -# System bootloader configuration -bootloader --location=mbr --boot-drive=sda -# Clear the Master Boot Record -zerombr -clearpart --all --initlabel --drives=sda -part / --fstype="ext4" --grow --asprimary --label=slash --ondisk=sda - -# Reboot after successful installation -reboot - -%packages --ignoremissing --excludedocs -# dnf group info minimal-environment -@^minimal-environment -@core -openssh-server -sed -sudo -python3 - -# Exclude unnecessary firmwares --iwl*firmware -%end - -# Enable/disable the following services -services --enabled=sshd - -%post --nochroot --logfile=/mnt/sysimage/root/ks-post.log -# Disable quiet boot and splash screen -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/etc/default/grub -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/boot/grub2/grubenv - -# Passwordless sudo for the user 'builder' -echo "builder ALL=(ALL) NOPASSWD: ALL" >> /mnt/sysimage/etc/sudoers.d/builder -# Remove the package cache -yum -y clean all - -# Disable swap -swapoff -a -rm -f /swapfile -sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab - -sed -i '/^\(HWADDR\|UUID\)=/d' /etc/sysconfig/network-scripts/ifcfg-* - -%end diff --git a/images/capi/packer/qemu/linux/rockylinux/http/8/ks.cfg.tmpl b/images/capi/packer/qemu/linux/rockylinux/http/8/ks.cfg.tmpl deleted file mode 100644 index b8f8480f5c..0000000000 --- a/images/capi/packer/qemu/linux/rockylinux/http/8/ks.cfg.tmpl +++ /dev/null @@ -1,95 +0,0 @@ -# Use CDROM installation media -repo --name="AppStream" --baseurl="http://download.rockylinux.org/pub/rocky/8/AppStream/x86_64/os/" -cdrom - -# Use text install -text - -# Don't run the Setup Agent on first boot -firstboot --disabled -eula --agreed - -# Keyboard layouts -keyboard --vckeymap=us --xlayouts='us' - -# System language -lang en_US.UTF-8 - -# Network information -network --bootproto=dhcp --onboot=on --ipv6=auto --activate --hostname=capv.vm - -# Lock Root account -rootpw --lock - -# Create builder user -user --name=builder --groups=wheel --password=$SSH_PASSWORD --plaintext --shell=/bin/bash - -# System services -selinux --permissive -firewall --disabled -services --enabled="NetworkManager,sshd,chronyd" - -# System timezone -timezone UTC - -# System booloader configuration -bootloader --location=mbr --boot-drive=sda -zerombr -clearpart --all --initlabel --drives=sda -part / --fstype="ext4" --grow --asprimary --label=slash --ondisk=sda - -skipx - -%packages --ignoremissing --excludedocs -openssh-server -sudo -sed -python3 - -# unnecessary firmware --aic94xx-firmware --atmel-firmware --b43-openfwwf --bfa-firmware --ipw2100-firmware --ipw2200-firmware --ivtv-firmware --iwl*-firmware --libertas-usb8388-firmware --ql*-firmware --rt61pci-firmware --rt73usb-firmware --xorg-x11-drv-ati-firmware --zd1211-firmware --cockpit --quota --alsa-* --fprintd-pam --intltool --microcode_ctl -%end - -%addon com_redhat_kdump --disable -%end - -reboot - -%post - -echo 'builder ALL=(ALL) NOPASSWD: ALL' >/etc/sudoers.d/builder -chmod 440 /etc/sudoers.d/builder - -# Remove the package cache -yum -y clean all - -swapoff -a -rm -f /swapfile -sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab - -systemctl enable vmtoolsd -systemctl start vmtoolsd - -# Ensure on next boot that network devices get assigned unique IDs. -sed -i '/^\(HWADDR\|UUID\)=/d' /etc/sysconfig/network-scripts/ifcfg-* - -%end diff --git a/images/capi/packer/qemu/qemu-rhel-8.json b/images/capi/packer/qemu/qemu-rhel-8.json deleted file mode 100644 index 5e6f9f5af3..0000000000 --- a/images/capi/packer/qemu/qemu-rhel-8.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "boot_command_prefix": " text inst.ks=", - "boot_command_suffix": "/8/ks.cfg", - "build_name": "rhel-8", - "distribution_version": "8", - "distro_name": "rhel", - "distro_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "guest_os_type": "RedHat_64", - "iso_checksum": "48f955712454c32718dcde858dea5aca574376a1d7a4b0ed6908ac0b85597811", - "iso_checksum_type": "sha256", - "iso_url": "rhel-8.4-x86_64-dvd.iso", - "os_display_name": "RHEL 8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "shutdown_command": "shutdown -P now" -} diff --git a/images/capi/packer/qemu/qemu-rockylinux-8-cloudimg.json b/images/capi/packer/qemu/qemu-rockylinux-8-cloudimg.json deleted file mode 100644 index 8c204d7a72..0000000000 --- a/images/capi/packer/qemu/qemu-rockylinux-8-cloudimg.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "build_name": "rockylinux-8", - "cd_files": "./packer/qemu/cloud-init/*", - "disk_image": "true", - "distribution_version": "8", - "distro_arch": "amd64", - "distro_name": "rockylinux", - "distro_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "guest_os_type": "centos8-64", - "iso_checksum": "https://dl.rockylinux.org/pub/rocky/8/images/x86_64/CHECKSUM", - "iso_checksum_type": "file", - "iso_url": "https://dl.rockylinux.org/pub/rocky/8/images/x86_64/Rocky-8-GenericCloud-Base.latest.x86_64.qcow2", - "os_display_name": "RockyLinux 8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "shutdown_command": "/sbin/halt -h -p" -} diff --git a/images/capi/packer/qemu/qemu-rockylinux-8.json b/images/capi/packer/qemu/qemu-rockylinux-8.json deleted file mode 100644 index 9453c9944b..0000000000 --- a/images/capi/packer/qemu/qemu-rockylinux-8.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "boot_command_prefix": " text inst.ks=", - "boot_command_suffix": "/8/ks.cfg", - "build_name": "rockylinux-8", - "distribution_version": "8", - "distro_arch": "amd64", - "distro_name": "rockylinux", - "distro_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "guest_os_type": "centos8-64", - "iso_checksum": "06019fd7c4f956b2b0ed37393e81c577885e4ebd518add249769846711a09dc4", - "iso_checksum_type": "sha256", - "iso_url": "https://download.rockylinux.org/pub/rocky/8/isos/x86_64/Rocky-8.9-x86_64-minimal.iso", - "os_display_name": "RockyLinux 8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "shutdown_command": "/sbin/halt -h -p" -} diff --git a/images/capi/packer/raw/linux/rhel/http/8/ks.cfg.tmpl b/images/capi/packer/raw/linux/rhel/http/8/ks.cfg.tmpl deleted file mode 100644 index 6dfed57404..0000000000 --- a/images/capi/packer/raw/linux/rhel/http/8/ks.cfg.tmpl +++ /dev/null @@ -1,74 +0,0 @@ -# version=RHEL8 -# Install OS instead of upgrade -install -cdrom -auth --enableshadow --passalgo=sha512 --kickstart -# License agreement -eula --agreed -# Use text mode install -text -# Disable Initial Setup on first boot -firstboot --disable -# Keyboard layout -keyboard --vckeymap=us --xlayouts='us' -# System language -lang en_US.UTF-8 -# Network information -network --bootproto=dhcp --device=link --activate -network --hostname=rhel8 -firewall --disabled -# Root password -rootpw builder --plaintext -# SELinux configuration -selinux --permissive -# Do not configure the X Window System -skipx -# System timezone -timezone UTC -# Add a user named builder -user --groups=wheel --name=builder --password=$SSH_PASSWORD --plaintext --gecos="builder" - -# System bootloader configuration -bootloader --location=mbr --boot-drive=sda -# Clear the Master Boot Record -zerombr -clearpart --all --initlabel --drives=sda -part / --fstype="ext4" --grow --asprimary --label=slash --ondisk=sda - -# Reboot after successful installation -reboot - -%packages --ignoremissing --excludedocs -# dnf group info minimal-environment -@^minimal-environment -@core -openssh-server -sed -sudo -python3 - -# Exclude unnecessary firmwares --iwl*firmware -%end - -# Enable/disable the following services -services --enabled=sshd - -%post --nochroot --logfile=/mnt/sysimage/root/ks-post.log -# Disable quiet boot and splash screen -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/etc/default/grub -sed --follow-symlinks -i "s/ rhgb quiet//" /mnt/sysimage/boot/grub2/grubenv - -# Passwordless sudo for the user 'builder' -echo "builder ALL=(ALL) NOPASSWD: ALL" >> /mnt/sysimage/etc/sudoers.d/builder -# Remove the package cache -yum -y clean all - -# Disable swap -swapoff -a -rm -f /swapfile -sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab - -sed -i '/^\(HWADDR\|UUID\)=/d' /etc/sysconfig/network-scripts/ifcfg-* - -%end diff --git a/images/capi/packer/raw/raw-rhel-8.json b/images/capi/packer/raw/raw-rhel-8.json deleted file mode 100644 index e1f1735c7a..0000000000 --- a/images/capi/packer/raw/raw-rhel-8.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "boot_command_prefix": " text inst.ks=", - "boot_command_suffix": "/8/ks.cfg", - "build_name": "rhel-8", - "build_target": "raw", - "distro_name": "rhel", - "distro_version": "8", - "epel_rpm_gpg_key": "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8", - "guest_os_type": "RedHat_64", - "iso_checksum": "a6a7418a75d721cc696d3cbdd648b5248808e7fef0f8742f518e43b46fa08139", - "iso_checksum_type": "sha256", - "iso_url": "file:///rhel-8.7-x86_64-dvd.iso", - "os_display_name": "RHEL 8", - "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm", - "shutdown_command": "shutdown -P now" -} diff --git a/images/capi/scripts/ci-ova.sh b/images/capi/scripts/ci-ova.sh index 80867cc7f5..6a98a8540a 100755 --- a/images/capi/scripts/ci-ova.sh +++ b/images/capi/scripts/ci-ova.sh @@ -29,12 +29,10 @@ export ARTIFACTS="${ARTIFACTS:-${PWD}/_artifacts}" # The following are currently having issues running in the # test environment so are specifically excluded for now # - Photon-4 -# - RockyLinux-8 TARGETS=( $(make build-node-ova-vsphere-all --recon -d | grep "Must remake" | \ grep -v build-node-ova-vsphere-all | \ grep -E -v 'rhel|windows|efi' | \ grep -v build-node-ova-vsphere-photon-4 | \ - grep -v build-node-ova-vsphere-rockylinux-8 | \ grep -E -o 'build-node-ova-vsphere-[a-zA-Z0-9\-]+' ) ) export BOSKOS_RESOURCE_OWNER=image-builder From e1a559a80d4c2d828517202dd8989656651ca4d1 Mon Sep 17 00:00:00 2001 From: Mark Rossetti Date: Tue, 17 Mar 2026 13:27:28 -0700 Subject: [PATCH 62/90] Check powershell execution policy for Windows before trying to set a new one Signed-off-by: Mark Rossetti --- images/capi/ansible/windows/ansible_winrm.ps1 | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/images/capi/ansible/windows/ansible_winrm.ps1 b/images/capi/ansible/windows/ansible_winrm.ps1 index 04ba0997fa..9a85f5ab9a 100644 --- a/images/capi/ansible/windows/ansible_winrm.ps1 +++ b/images/capi/ansible/windows/ansible_winrm.ps1 @@ -16,7 +16,25 @@ # https://www.packer.io/docs/provisioners/ansible.html#winrm-communicator # https://www.packer.io/docs/builders/amazon/ebs#connecting-to-windows-instances-using-winrm -Set-ExecutionPolicy Unrestricted -Scope LocalMachine -Force -ErrorAction Ignore +# Log execution policies at all scopes for diagnostics +Write-Output "Current execution policy settings:" +Get-ExecutionPolicy -List | Format-Table -AutoSize | Out-String | Write-Output + +# Only set execution policy if the current effective policy is more restrictive +# than what we need. Policies like Bypass or Unrestricted are already sufficient. +$currentPolicy = Get-ExecutionPolicy +$sufficientPolicies = @('Bypass', 'Unrestricted') +if ($currentPolicy -notin $sufficientPolicies) { + Write-Output "Effective execution policy '$currentPolicy' is insufficient, setting to Unrestricted" + try { + Set-ExecutionPolicy Unrestricted -Scope LocalMachine -Force -ErrorAction Stop + Write-Output "Execution policy set to Unrestricted" + } catch { + Write-Output "Failed to set execution policy: $_" + } +} else { + Write-Output "Effective execution policy '$currentPolicy' is sufficient, skipping Set-ExecutionPolicy" +} # Don't set this before Set-ExecutionPolicy as it throws an error $ErrorActionPreference = "stop" From b18049552c973313d2a8e8826991426bb650358b Mon Sep 17 00:00:00 2001 From: Mark Rossetti Date: Tue, 17 Mar 2026 14:13:16 -0700 Subject: [PATCH 63/90] Check if WindowsAzureTelemtryService is installed before checking service status Signed-off-by: Mark Rossetti --- images/capi/packer/azure/scripts/sysprep.ps1 | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/images/capi/packer/azure/scripts/sysprep.ps1 b/images/capi/packer/azure/scripts/sysprep.ps1 index a540be4c7b..48a7e6eae5 100644 --- a/images/capi/packer/azure/scripts/sysprep.ps1 +++ b/images/capi/packer/azure/scripts/sysprep.ps1 @@ -16,8 +16,13 @@ # The Windows Azure Guest Agent is required for sysprep: https://www.packer.io/docs/builders/azure/arm#windows Write-Output '>>> Waiting for GA Service (RdAgent) to start ...' while ((Get-Service RdAgent).Status -ne 'Running') { Start-Sleep -s 5 } -Write-Output '>>> Waiting for GA Service (WindowsAzureTelemetryService) to start ...' -while ((Get-Service WindowsAzureTelemetryService) -and ((Get-Service WindowsAzureTelemetryService).Status -ne 'Running')) { Start-Sleep -s 5 } +$telemetryService = Get-Service WindowsAzureTelemetryService -ErrorAction SilentlyContinue +if ($telemetryService) { + Write-Output '>>> Waiting for GA Service (WindowsAzureTelemetryService) to start ...' + while ($telemetryService.Status -ne 'Running') { Start-Sleep -s 5; $telemetryService.Refresh() } +} else { + Write-Output '>>> GA Service (WindowsAzureTelemetryService) not installed, skipping ...' +} Write-Output '>>> Waiting for GA Service (WindowsAzureGuestAgent) to start ...' while ((Get-Service WindowsAzureGuestAgent).Status -ne 'Running') { Start-Sleep -s 5 } Write-Output '>>> Sysprepping VM ...' From 924a0f6d13be3ee418f5fe39f7bc97ade512d4ad Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Mon, 16 Mar 2026 16:21:52 -0600 Subject: [PATCH 64/90] Fix Windows failure caused by WinRM connection reset --- images/capi/ansible/windows/ansible_winrm.ps1 | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/images/capi/ansible/windows/ansible_winrm.ps1 b/images/capi/ansible/windows/ansible_winrm.ps1 index 9a85f5ab9a..33ec89d678 100644 --- a/images/capi/ansible/windows/ansible_winrm.ps1 +++ b/images/capi/ansible/windows/ansible_winrm.ps1 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# This file is from packer documentation: +# This file is from packer documentation: # https://www.packer.io/docs/provisioners/ansible.html#winrm-communicator # https://www.packer.io/docs/builders/amazon/ebs#connecting-to-windows-instances-using-winrm @@ -61,6 +61,12 @@ cmd.exe /c winrm set "winrm/config/service/auth" '@{CredSSP="true"}' cmd.exe /c winrm set "winrm/config/listener?Address=*+Transport=HTTPS" "@{Port=`"5986`";Hostname=`"packer`";CertificateThumbprint=`"$($Cert.Thumbprint)`"}" cmd.exe /c netsh advfirewall firewall set rule group="remote administration" new enable=yes cmd.exe /c netsh firewall add portopening TCP 5986 "Port 5986" -cmd.exe /c net stop winrm cmd.exe /c sc config winrm start= auto -cmd.exe /c net start winrm + +# Restart WinRM via a scheduled task so the current session can finish +# cleanly before the service cycles. This prevents Packer's WinRM +# connection (which is running this script) from being severed mid-flight. +$taskAction = New-ScheduledTaskAction -Execute "powershell.exe" -Argument "-Command Restart-Service winrm -Force" +$taskTrigger = New-ScheduledTaskTrigger -Once -At ((Get-Date).AddSeconds(5)) +Register-ScheduledTask -TaskName "RestartWinRM" -Action $taskAction -Trigger $taskTrigger -User "SYSTEM" -RunLevel Highest -Force +write-output "Scheduled WinRM restart in 5 seconds" From 84a43cb6fac563e94b4b4c9789fbcae650f07f00 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Wed, 18 Mar 2026 11:15:09 -0600 Subject: [PATCH 65/90] Add Packer debug logging to Azure GH workflow --- .github/workflows/build-azure-sig.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/build-azure-sig.yaml b/.github/workflows/build-azure-sig.yaml index dd2701dffc..bd6e5c716f 100644 --- a/.github/workflows/build-azure-sig.yaml +++ b/.github/workflows/build-azure-sig.yaml @@ -25,6 +25,7 @@ # - replicated_regions - space-separated list of Azure regions to replicate the image to # - skip_test - skip the test stage # - skip_promote - skip the promote stage +# - packer_debug - enable Packer debug logging (sets PACKER_LOG=1) name: Build Azure SIG Image @@ -87,6 +88,11 @@ on: required: false type: boolean default: false + packer_debug: + description: 'Enable Packer debug logging (PACKER_LOG=1)' + required: false + type: boolean + default: false permissions: id-token: write @@ -108,6 +114,7 @@ jobs: STAGING_GALLERY_NAME: ${{ inputs.staging_gallery_name }} PACKER_FLAGS: ${{ inputs.packer_flags }} TAGS_INPUT: ${{ inputs.tags }} + PACKER_LOG: ${{ inputs.packer_debug && '1' || '0' }} steps: - name: Checkout repository From 6fe3d3bf10c14ea00a3f6c5c0cb9e15d81487d45 Mon Sep 17 00:00:00 2001 From: sivchari Date: Thu, 19 Mar 2026 14:14:13 +0900 Subject: [PATCH 66/90] delete unnecessary script Signed-off-by: sivchari --- images/capi/packer/azure/scripts/init-sig.sh | 68 +------------------- 1 file changed, 1 insertion(+), 67 deletions(-) diff --git a/images/capi/packer/azure/scripts/init-sig.sh b/images/capi/packer/azure/scripts/init-sig.sh index 0f2d46318a..4fde65c3d7 100755 --- a/images/capi/packer/azure/scripts/init-sig.sh +++ b/images/capi/packer/azure/scripts/init-sig.sh @@ -58,71 +58,6 @@ SECURITY_TYPE_CVM_SUPPORTED_FEATURE="SecurityType=ConfidentialVmSupported" SIG_TARGET=$1 -# Accept Azure VM image terms if available and required -accept_image_terms() { - # SIG_TARGET is expected to be a global variable - if [[ -z "$SIG_TARGET" ]]; then - echo "Error: SIG_TARGET is not set. Exiting." - exit 1 - fi - # AZURE_LOCATION is expected to be a global variable - if [[ -z "$AZURE_LOCATION" ]]; then - echo "Error: AZURE_LOCATION is not set. Exiting." - exit 1 - fi - - # Resolve the JSON file path and extract necessary fields - target_json="$(realpath "packer/azure/${SIG_TARGET}.json")" - distribution=$(jq -r '.distribution' "$target_json") - distribution_version=$(jq -r '.distribution_version' "$target_json") - - # Return early if not a Windows distribution - if [[ "$distribution" != "windows" ]]; then - return - fi - - # Extract purchase plan details - plan_publisher=$(jq -r '.plan_image_publisher' "$target_json") - plan_offer=$(jq -r '.plan_image_offer' "$target_json") - plan_name=$(jq -r '.plan_image_sku' "$target_json") - plan_version=${PLAN_VERSION:-"latest"} - - # Proceed only if all plan details are valid - if [[ "$plan_publisher" == "null" || "$plan_offer" == "null" || "$plan_name" == "null" ]]; then - echo "Purchase plan details are missing. Skipping terms acceptance." - return - fi - - # Populate the global plan_args variable - PLAN_ARGS=( - --plan-name "${plan_name}" - --plan-product "${plan_offer}" - --plan-publisher "${plan_publisher}" - ) - - plan_urn="${plan_publisher}:${plan_offer}:${plan_name}:${plan_version}" - - # Check if the image has terms to accept - if [[ "$(az vm image show --location "$AZURE_LOCATION" --urn "${plan_urn}" -o json | jq -r '.plan')" == "null" ]]; then - echo "Image '${plan_urn}' has no terms to accept." - return - fi - - echo "Plan info: ${plan_urn}" - - # Check acceptance status and accept terms if not already accepted - if [[ "$(az vm image terms show --urn "$plan_urn" -o json | jq -r '.accepted')" == "true" ]]; then - echo "Terms for image URN: ${plan_urn} are already accepted." - return - fi - - echo "Accepting terms for image URN: ${plan_urn}" - az vm image terms accept --urn "$plan_urn" -} - -PLAN_ARGS=() -accept_image_terms - # Create a shared image gallery image definition if it does not exist create_image_definition() { if ! az sig image-definition show --gallery-name ${GALLERY_NAME} --gallery-image-definition ${SIG_IMAGE_DEFINITION:-capi-${SIG_SKU:-$1}} --resource-group ${RESOURCE_GROUP_NAME} -o none 2>/dev/null; then @@ -135,8 +70,7 @@ create_image_definition() { --sku ${SIG_SKU:-$2} \ --hyper-v-generation ${3} \ --os-type ${4} \ - --features ${5:-''} \ - "${plan_args[@]}" # TODO: Delete this line after the image is GA + --features ${5:-''} fi } From ed8e9049bedebc7545812fbaf6050b9cb1bf3389 Mon Sep 17 00:00:00 2001 From: Arnaud Pons Date: Fri, 20 Mar 2026 08:56:44 +0100 Subject: [PATCH 67/90] fix: use ubuntu repo vars in apt config for ubuntu 24 instead of hardcoded value --- .../setup/templates/etc/apt/sources.list.d/ubuntu.sources.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.d/ubuntu.sources.j2 b/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.d/ubuntu.sources.j2 index 012e7dd781..818a454887 100644 --- a/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.d/ubuntu.sources.j2 +++ b/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.d/ubuntu.sources.j2 @@ -1,11 +1,11 @@ Types: deb -URIs: http://us.archive.ubuntu.com/ubuntu/ +URIs: {{ ubuntu_repo }} Suites: {{ ansible_distribution_release }} {{ ansible_distribution_release }}-updates {{ ansible_distribution_release }}-backports Components: main restricted universe multiverse Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg Types: deb -URIs: http://security.ubuntu.com/ubuntu/ +URIs: {{ ubuntu_security_repo }} Suites: {{ ansible_distribution_release }}-security Components: main restricted universe multiverse Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg \ No newline at end of file From 1ebb604a58986d3380799a4e502b8047cec47b7f Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Mon, 23 Mar 2026 09:23:27 -0600 Subject: [PATCH 68/90] Authenticate with GITHUB_TOKEN to avoid rate limiting --- .github/workflows/build-azure-sig.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build-azure-sig.yaml b/.github/workflows/build-azure-sig.yaml index dd2701dffc..942b2644fd 100644 --- a/.github/workflows/build-azure-sig.yaml +++ b/.github/workflows/build-azure-sig.yaml @@ -154,6 +154,8 @@ jobs: - name: Install dependencies working-directory: images/capi + env: + PACKER_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | pip install ansible ansible-lint make deps-azure From d67174f3e07cb59267c6dc20816f1a65f150fc44 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Wed, 25 Mar 2026 11:08:57 -0600 Subject: [PATCH 69/90] Pin Python path to survive tdnf upgrade --- images/capi/ansible/roles/setup/tasks/photon.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/images/capi/ansible/roles/setup/tasks/photon.yml b/images/capi/ansible/roles/setup/tasks/photon.yml index 5bc552245b..6af05298d5 100644 --- a/images/capi/ansible/roles/setup/tasks/photon.yml +++ b/images/capi/ansible/roles/setup/tasks/photon.yml @@ -12,6 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. --- +# Pin the Python interpreter to the versionless symlink so that it +# survives tdnf distro-sync upgrading the Python version (e.g. 3.11→3.12), +# which removes the old versioned binary that Ansible auto-discovered. +- name: Pin ansible_python_interpreter to /usr/bin/python3 + ansible.builtin.set_fact: + ansible_python_interpreter: /usr/bin/python3 + - name: Add bash_profile ansible.builtin.template: dest: /home/builder/.bash_profile From f5bfb95270508165ccc71ffe340984354bf6165d Mon Sep 17 00:00:00 2001 From: Tomy Guichard Date: Wed, 25 Mar 2026 14:58:01 +0000 Subject: [PATCH 70/90] scaleway: fix missing containerd_service_url variable --- images/capi/packer/scaleway/packer.json | 1 + 1 file changed, 1 insertion(+) diff --git a/images/capi/packer/scaleway/packer.json b/images/capi/packer/scaleway/packer.json index e9838f8596..e2b84755a3 100644 --- a/images/capi/packer/scaleway/packer.json +++ b/images/capi/packer/scaleway/packer.json @@ -80,6 +80,7 @@ "commercial_type": "DEV1-S", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, "existing_ansible_ssh_args": "{{env `ANSIBLE_SSH_ARGS`}}", From 356da477ea89add213b281b5922fbc165b5aca32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?K=C3=A9vin=20PEREZ?= Date: Thu, 26 Mar 2026 13:25:25 +0100 Subject: [PATCH 71/90] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Upgrade=20ContainerD?= =?UTF-8?q?=20(1.7=20->=202.2)=20&=20Runc=20Version=20(1.2=20->=201.3)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Upgrade of ContainerD is mandatory for Kubernetes 1.36 https://kubernetes.io/blog/2025/08/27/kubernetes-v1-34-release/#kubernetes-to-end-containerd-1-x-support-in-v1-36 The 2.2 release of ContainerD is not LTS. We will be able to upgrade to 2.3 later (maybe in May 2026) once the LTS is out. --- .../roles/containerd/defaults/main.yml | 2 +- images/capi/packer/config/containerd.json | 4 +- .../packer/config/ppc64le/containerd.json | 4 +- images/capi/packer/goss/goss-command.yaml | 48 +++++++++++++++++-- 4 files changed, 49 insertions(+), 9 deletions(-) diff --git a/images/capi/ansible/roles/containerd/defaults/main.yml b/images/capi/ansible/roles/containerd/defaults/main.yml index d3642188c2..a470d15d2e 100644 --- a/images/capi/ansible/roles/containerd/defaults/main.yml +++ b/images/capi/ansible/roles/containerd/defaults/main.yml @@ -19,5 +19,5 @@ containerd_baseurl: https://github.com/containerd/containerd/releases/download/v containerd_filename: "containerd-{{ containerd_version }}-{{ system }}-{{ arch }}.tar.gz" containerd_url: "{{ containerd_baseurl }}/{{ containerd_filename }}" containerd_runc_url: "https://github.com/opencontainers/runc/releases/download/v{{ runc_version }}/runc.{{ arch }}" -runc_version: "1.2.3" +runc_version: "1.3.4" containerd_runc_checksum_url: "https://github.com/opencontainers/runc/releases/download/v{{ runc_version }}/runc.sha256sum" diff --git a/images/capi/packer/config/containerd.json b/images/capi/packer/config/containerd.json index 6bdd1ee574..56f4590d38 100644 --- a/images/capi/packer/config/containerd.json +++ b/images/capi/packer/config/containerd.json @@ -3,6 +3,6 @@ "containerd_cri_socket": "/var/run/containerd/containerd.sock", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", - "containerd_version": "1.7.29", - "runc_version": "1.2.8" + "containerd_version": "2.2.2", + "runc_version": "1.3.4" } diff --git a/images/capi/packer/config/ppc64le/containerd.json b/images/capi/packer/config/ppc64le/containerd.json index b7e2aca97b..6e04e0334f 100644 --- a/images/capi/packer/config/ppc64le/containerd.json +++ b/images/capi/packer/config/ppc64le/containerd.json @@ -1,4 +1,4 @@ { - "containerd_sha256": "b2d4e44946e55a10835a327cbd98c0c2063011bbdebb95ef8c5e5677312f1d29", - "containerd_version": "1.7.25" + "containerd_sha256": "8f7a8190f2a635cd0e5580a131408a275ba277f7a04edffba4a4005960093987", + "containerd_version": "2.2.2" } diff --git a/images/capi/packer/goss/goss-command.yaml b/images/capi/packer/goss/goss-command.yaml index 2bd64e54ed..0c1d11a451 100644 --- a/images/capi/packer/goss/goss-command.yaml +++ b/images/capi/packer/goss/goss-command.yaml @@ -198,25 +198,65 @@ command: - "{{.Vars.containerd_version}}" timeout: 30000 {{ if (semverCompare ">=2.0.0" .Vars.containerd_version) }} - Correct Containerd config: - exec: "\"/Program Files/containerd/containerd.exe\" config dump" + Correct Containerd sandbox config: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'sandbox = \\\"{{.Vars.pause_image}}\\\"'\"" exit-status: 0 stdout: - "sandbox = \"{{.Vars.pause_image}}\"" + timeout: 30000 + Correct Containerd CNI conf_dir: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'conf_dir = \\\"C:/etc/cni/net.d\\\"'\"" + exit-status: 0 + stdout: - "conf_dir = \"C:/etc/cni/net.d\"" + timeout: 30000 + Correct Containerd CNI bin_dir: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'bin_dir = \\\"C:/opt/cni/bin\\\"'\"" + exit-status: 0 + stdout: - "bin_dir = \"C:/opt/cni/bin\"" + timeout: 30000 + Correct Containerd root: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'root = \\\"C:\\\\ProgramData\\\\containerd\\\\root\\\"'\"" + exit-status: 0 + stdout: - "root = \"C:\\\\ProgramData\\\\containerd\\\\root\"" + timeout: 30000 + Correct Containerd state: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'state = \\\"C:\\\\ProgramData\\\\containerd\\\\state\\\"'\"" + exit-status: 0 + stdout: - "state = \"C:\\\\ProgramData\\\\containerd\\\\state\"" timeout: 30000 {{ else }} - Correct Containerd config: - exec: "\"/Program Files/containerd/containerd.exe\" config dump" + Correct Containerd sandbox config: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'sandbox_image = \\\"{{.Vars.pause_image}}\\\"'\"" exit-status: 0 stdout: - "sandbox_image = \"{{.Vars.pause_image}}\"" + timeout: 30000 + Correct Containerd CNI conf_dir: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'conf_dir = \\\"C:/etc/cni/net.d\\\"'\"" + exit-status: 0 + stdout: - "conf_dir = \"C:/etc/cni/net.d\"" + timeout: 30000 + Correct Containerd CNI bin_dir: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'bin_dir = \\\"C:/opt/cni/bin\\\"'\"" + exit-status: 0 + stdout: - "bin_dir = \"C:/opt/cni/bin\"" + timeout: 30000 + Correct Containerd root: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'root = \\\"C:\\\\ProgramData\\\\containerd\\\\root\\\"'\"" + exit-status: 0 + stdout: - "root = \"C:\\\\ProgramData\\\\containerd\\\\root\"" + timeout: 30000 + Correct Containerd state: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'state = \\\"C:\\\\ProgramData\\\\containerd\\\\state\\\"'\"" + exit-status: 0 + stdout: - "state = \"C:\\\\ProgramData\\\\containerd\\\\state\"" timeout: 30000 {{ end }} From 38157df15cb9b08d79ce301c6db060f37e5c0c0b Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Mon, 30 Mar 2026 06:10:17 -0600 Subject: [PATCH 72/90] Windows build fixes (#1963) * Reduce Packer ansible provisioner max_retries from 5 to 1 for Windows Azure builds * Fix kubernetes_base_url path: binaries/node/windows -> bin/windows --- images/capi/packer/ami/packer-windows.json | 2 +- images/capi/packer/azure/packer-windows.json | 4 ++-- images/capi/packer/nutanix/packer-windows.json | 2 +- images/capi/packer/oci/packer-windows.json | 2 +- images/capi/packer/ova/packer-windows.json | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/images/capi/packer/ami/packer-windows.json b/images/capi/packer/ami/packer-windows.json index d2ed468ded..e393817220 100644 --- a/images/capi/packer/ami/packer-windows.json +++ b/images/capi/packer/ami/packer-windows.json @@ -183,7 +183,7 @@ "ib_version": "{{env `IB_VERSION`}}", "image_name": "capa-ami-{{user `build_name`}}-{{user `kubernetes_semver`}}-{{user `build_timestamp`}}", "kms_key_id": "", - "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/binaries/node/windows/{{user `kubernetes_goarch`}}", + "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/bin/windows/{{user `kubernetes_goarch`}}", "manifest_output": "manifest.json", "nssm_url": null, "prepull": null, diff --git a/images/capi/packer/azure/packer-windows.json b/images/capi/packer/azure/packer-windows.json index 482c3b2644..a1f0361ffe 100644 --- a/images/capi/packer/azure/packer-windows.json +++ b/images/capi/packer/azure/packer-windows.json @@ -139,7 +139,7 @@ "--extra-vars", "gmsa_keyvault_url={{user `gmsa_keyvault_url`}}" ], - "max_retries": 5, + "max_retries": 1, "pause_before": "15s", "playbook_file": "ansible/windows/node_windows.yml", "type": "ansible", @@ -226,7 +226,7 @@ "image_publisher": "", "image_sku": "", "image_version": "latest", - "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/binaries/node/windows/{{user `kubernetes_goarch`}}", + "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/bin/windows/{{user `kubernetes_goarch`}}", "manifest_output": "manifest.json", "nssm_url": null, "os_disk_size_gb": "", diff --git a/images/capi/packer/nutanix/packer-windows.json b/images/capi/packer/nutanix/packer-windows.json index a6c43475e0..070196e294 100644 --- a/images/capi/packer/nutanix/packer-windows.json +++ b/images/capi/packer/nutanix/packer-windows.json @@ -142,7 +142,7 @@ "image_delete": "false", "image_export": "false", "image_name": "{{user `build_name`}}-kube-{{user `kubernetes_semver`}}", - "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/binaries/node/windows/{{user `kubernetes_goarch`}}", + "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/bin/windows/{{user `kubernetes_goarch`}}", "kubernetes_container_registry": null, "kubernetes_http_package_url": "", "kubernetes_http_source": null, diff --git a/images/capi/packer/oci/packer-windows.json b/images/capi/packer/oci/packer-windows.json index 4eadb67cde..123f1181b7 100644 --- a/images/capi/packer/oci/packer-windows.json +++ b/images/capi/packer/oci/packer-windows.json @@ -127,7 +127,7 @@ "containerd_version": null, "ib_version": "{{env `IB_VERSION`}}", "image_version": "latest", - "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/binaries/node/windows/{{user `kubernetes_goarch`}}", + "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/bin/windows/{{user `kubernetes_goarch`}}", "manifest_output": "manifest.json", "nssm_url": null, "ocpus": "2", diff --git a/images/capi/packer/ova/packer-windows.json b/images/capi/packer/ova/packer-windows.json index 820aeddefb..065b0bfdc8 100644 --- a/images/capi/packer/ova/packer-windows.json +++ b/images/capi/packer/ova/packer-windows.json @@ -251,7 +251,7 @@ "http_port_max": "", "http_port_min": "", "ib_version": "{{env `IB_VERSION`}}", - "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/binaries/node/windows/{{user `kubernetes_goarch`}}", + "kubernetes_base_url": "https://kubernetesreleases.blob.core.windows.net/kubernetes/{{user `kubernetes_semver`}}/bin/windows/{{user `kubernetes_goarch`}}", "kubernetes_http_package_url": "", "kubernetes_typed_version": "kube-{{user `kubernetes_semver`}}", "manifest_output": "manifest.json", From 562d8a531848dc0bb75b85123a12c0876df9fe78 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Mon, 2 Feb 2026 20:47:37 +0000 Subject: [PATCH 73/90] Revert "fix: update Azure build VM size to a more modern v2 generation" This reverts commit ce0d0b8406c1f2621c7dc3e7a99f139e28e047c1. --- images/capi/packer/azure/azure-config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/capi/packer/azure/azure-config.json b/images/capi/packer/azure/azure-config.json index 15dc8d11ac..9949048dd9 100644 --- a/images/capi/packer/azure/azure-config.json +++ b/images/capi/packer/azure/azure-config.json @@ -5,5 +5,5 @@ "containerd_wasm_shims_runtimes": "lunatic,slight,spin,wws", "subscription_id": "{{env `AZURE_SUBSCRIPTION_ID`}}", "use_azure_cli_auth": "{{env `USE_AZURE_CLI_AUTH`}}", - "vm_size": "Standard_B2als_v2" + "vm_size": "Standard_B2ms" } From 6ca73dc308e3efaa24126e2d322a42d26f38e08b Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Mon, 30 Mar 2026 14:04:47 -0600 Subject: [PATCH 74/90] Update Azure build regions --- images/capi/scripts/ci-azure-e2e.sh | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/images/capi/scripts/ci-azure-e2e.sh b/images/capi/scripts/ci-azure-e2e.sh index 4aa39762cb..f80065a329 100755 --- a/images/capi/scripts/ci-azure-e2e.sh +++ b/images/capi/scripts/ci-azure-e2e.sh @@ -66,11 +66,20 @@ fi set -o nounset get_random_region() { - local REGIONS=("australiaeast" "canadacentral" "eastus" "eastus2" "northcentralus" "northeurope" "uksouth" "westeurope" "westus2") + # Regions appear more than once to represent the approximate relative amount + # of Standard BS v2 quota in each region. + local REGIONS=( + "australiaeast" + "canadacentral" "canadacentral" "canadacentral" + "francecentral" + "germanywestcentral" + "switzerlandnorth" "switzerlandnorth" "switzerlandnorth" + "uksouth" + ) echo "${REGIONS[${RANDOM} % ${#REGIONS[@]}]}" } -export VALID_CVM_LOCATIONS=("eastus" "northeurope" "westeurope" "westus") +export VALID_CVM_LOCATIONS=("eastus" "germanywestcentral" "northeurope" "switzerlandnorth" "uksouth" "westeurope" "westus") get_random_cvm_region() { echo "${VALID_CVM_LOCATIONS[${RANDOM} % ${#VALID_CVM_LOCATIONS[@]}]}" } From 01c5223c281448a31ee3370c9b5eecf316d55623 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Wed, 1 Apr 2026 07:35:04 -0600 Subject: [PATCH 75/90] Update docs for image-builder v0.1.50 --- RELEASE.md | 4 ++-- docs/book/src/capi/container-image.md | 10 +++++----- docs/book/src/capi/releasing.md | 8 ++++---- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/RELEASE.md b/RELEASE.md index 024a006b68..a120cce83a 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,11 +1,11 @@ # Image Builder Releases -The current release of Image Builder is [v0.1.49][] (March 17, 2026). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.49`. +The current release of Image Builder is [v0.1.50][] (April 1, 2026). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.50`. ## Release Process For more detail about image-builder project releases, see the [Image Builder Book][]. -[v0.1.49]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.49 +[v0.1.50]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.50 [Image Builder Book]: https://image-builder.sigs.k8s.io/capi/releasing.html diff --git a/docs/book/src/capi/container-image.md b/docs/book/src/capi/container-image.md index 306ab6b576..c843ba07e6 100644 --- a/docs/book/src/capi/container-image.md +++ b/docs/book/src/capi/container-image.md @@ -18,7 +18,7 @@ Run the docker build target of Makefile The latest image-builder container image release is available here: ```commandline -docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.49 +docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.50 ``` ### Examples @@ -27,7 +27,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v - If the AWS CLI is already installed on your machine, you can simply mount the `~/.aws` folder that stores all the required credentials. ```commandline - docker run -it --rm -v /Users//.aws:/home/imagebuilder/.aws registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.49 build-ami-ubuntu-2404 + docker run -it --rm -v /Users//.aws:/home/imagebuilder/.aws registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.50 build-ami-ubuntu-2404 ``` - Another alternative is to use an `aws-creds.env` file to load the credentials and pass it during docker run. @@ -37,7 +37,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v ``` ```commandline - docker run -it --rm --env-file aws-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.49 build-ami-ubuntu-2404 + docker run -it --rm --env-file aws-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.50 build-ami-ubuntu-2404 ``` - AZURE @@ -51,7 +51,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v ``` ```commandline - docker run -it --rm --env-file az-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.49 build-azure-sig-ubuntu-2404 + docker run -it --rm --env-file az-creds.env registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.50 build-azure-sig-ubuntu-2404 ``` - Proxmox @@ -83,7 +83,7 @@ docker pull registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v - Docker's `--net=host` option to ensure http server starts with the host IP and not the Docker container IP. This option is Linux specific and thus implies that it can be run only from a Linux machine. ```commandline - docker run -it --rm --net=host --env PACKER_VAR_FILES=/home/imagebuilder/vsphere.json -v :/home/imagebuilder/vsphere.json registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.49 build-node-ova-vsphere-ubuntu-2404 + docker run -it --rm --net=host --env PACKER_VAR_FILES=/home/imagebuilder/vsphere.json -v :/home/imagebuilder/vsphere.json registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.50 build-node-ova-vsphere-ubuntu-2404 ``` In addition to this, further customizations can be done as discussed [here](./capi.md#customization). diff --git a/docs/book/src/capi/releasing.md b/docs/book/src/capi/releasing.md index 30d6647761..f9ba7fbaa3 100644 --- a/docs/book/src/capi/releasing.md +++ b/docs/book/src/capi/releasing.md @@ -1,6 +1,6 @@ # Image Builder Releases -The current release of Image Builder is [v0.1.49][] (March 17, 2026). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.49`. +The current release of Image Builder is [v0.1.50][] (April 1, 2026). The corresponding container image is `registry.k8s.io/scl-image-builder/cluster-node-image-builder-amd64:v0.1.50`. ## Release Process @@ -22,7 +22,7 @@ Releases in image-builder follow [semantic versioning][semver] conventions. Curr - *If signing tags with GPG, makes your key available to the `git tag` command.* - Create a new tag: - `export IB_VERSION=v0.1.x` - - *Replace `x` with the next patch version. For example: `v0.1.50`.* + - *Replace `x` with the next patch version. For example: `v0.1.51`.* - `git tag -s -m "Image Builder ${IB_VERSION}" ${IB_VERSION}` - `git push upstream ${IB_VERSION}` @@ -77,11 +77,11 @@ Wait for this PR to merge before communicating the release to users, so image-bu In the [#image-builder channel][] on the Kubernetes Slack, post a message announcing the new release. Include a link to the GitHub release and a thanks to the contributors: ``` -Image-builder v0.1.50 is now available: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.50 +Image-builder v0.1.51 is now available: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.51 Thanks to all contributors! ``` -[v0.1.49]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.49 +[v0.1.50]: https://github.com/kubernetes-sigs/image-builder/releases/tag/v0.1.50 [#image-builder channel]: https://kubernetes.slack.com/archives/C01E0Q35A8J [Personal access tokens]: https://github.com/settings/tokens [post-image-builder-push-images]: https://prow.k8s.io/?repo=kubernetes-sigs%2Fimage-builder&type=postsubmit&job=post-image-builder-push-images From f60f2a56d55b3a3aa80a73a3874625b2cce81afc Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Wed, 1 Apr 2026 08:16:52 -0600 Subject: [PATCH 76/90] Bump ansible to v2.16.18 --- images/capi/hack/utils.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/capi/hack/utils.sh b/images/capi/hack/utils.sh index 4eda08563d..68efa371c8 100755 --- a/images/capi/hack/utils.sh +++ b/images/capi/hack/utils.sh @@ -15,7 +15,7 @@ # limitations under the License. # Note: ansible-core v2.16 supports Python 3.10-3.12. -_version_ansible_core="2.16.16" +_version_ansible_core="2.16.18" case "${OSTYPE}" in linux*) From c7ec99c62b0301a5e40e56c83d153b5dfd662c14 Mon Sep 17 00:00:00 2001 From: Josh French Date: Mon, 6 Apr 2026 15:41:47 -0400 Subject: [PATCH 77/90] set default ubuntu repos for setup role These are only available to Ansible because the templates pass them in via ansible_common_vars. Set defaults so this role doesn't depend on the specifics of the build pipeline. --- images/capi/ansible/roles/setup/defaults/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/images/capi/ansible/roles/setup/defaults/main.yml b/images/capi/ansible/roles/setup/defaults/main.yml index 8432b7868d..8f5a40710d 100644 --- a/images/capi/ansible/roles/setup/defaults/main.yml +++ b/images/capi/ansible/roles/setup/defaults/main.yml @@ -19,6 +19,8 @@ redhat_epel_rpm: https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noa epel_rpm_gpg_key: https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7 rpms: "" extra_rpms: "" +ubuntu_repo: "http://us.archive.ubuntu.com/ubuntu/" +ubuntu_security_repo: "http://security.ubuntu.com/ubuntu/" disable_public_repos: false external_binary_path: "{{ '/opt/bin' if ansible_os_family == 'Flatcar' else '/usr/local/bin' }}" From 6aca20f1ea06657a778663eff4c27d19754e02be Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Tue, 7 Apr 2026 06:41:33 -0600 Subject: [PATCH 78/90] Refactor Azure GH workflow smoke test (#1969) * Refactor Azure GH workflow smoke test * Check containerd version in goss tests * Use mktemp and reduce timeout * Fail on kubeadm init --dry-run failures * Add northcentralus to replicated regions default * De-duplicate replicated regions * Default build region to northcentralus * Prepend comprehensive PATH to help find containerd w/ goss test --- .github/workflows/build-azure-sig.yaml | 369 ++++++++++++++---- .../test-templates/linux/kustomization.yaml | 7 - .../azuremachinetemplate-controlplane.yaml | 11 - .../patches/azuremachinetemplate-windows.yaml | 11 - .../azuremachinetemplate-workload.yaml | 11 - .../patches/kubeadmcontrolplane-windows.yaml | 8 - .../patches/machinedeployment-windows.yaml | 8 - .../test-templates/windows/kustomization.yaml | 8 - images/capi/packer/goss/goss-command.yaml | 5 +- 9 files changed, 299 insertions(+), 139 deletions(-) delete mode 100644 images/capi/packer/azure/scripts/test-templates/linux/kustomization.yaml delete mode 100644 images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-controlplane.yaml delete mode 100644 images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-windows.yaml delete mode 100644 images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-workload.yaml delete mode 100644 images/capi/packer/azure/scripts/test-templates/patches/kubeadmcontrolplane-windows.yaml delete mode 100644 images/capi/packer/azure/scripts/test-templates/patches/machinedeployment-windows.yaml delete mode 100644 images/capi/packer/azure/scripts/test-templates/windows/kustomization.yaml diff --git a/.github/workflows/build-azure-sig.yaml b/.github/workflows/build-azure-sig.yaml index 6a81b108e3..dd0e60586d 100644 --- a/.github/workflows/build-azure-sig.yaml +++ b/.github/workflows/build-azure-sig.yaml @@ -73,16 +73,29 @@ on: required: false type: string default: '' + azure_location: + description: 'Azure region for image build (must match community gallery location and have VM quota for testing)' + required: false + type: choice + options: + - northcentralus + - canadacentral + - switzerlandnorth + - australiaeast + - francecentral + - germanywestcentral + - uksouth + default: 'northcentralus' replicated_regions: description: 'Space-separated Azure regions to replicate the image to (image build region is always included)' required: false type: string - default: 'australiaeast canadacentral eastus eastus2 francecentral germanywestcentral northeurope switzerlandnorth uksouth westeurope' + default: 'australiaeast canadacentral eastus eastus2 francecentral germanywestcentral northcentralus northeurope switzerlandnorth uksouth westeurope' skip_test: description: 'Skip the test stage' required: false type: boolean - default: true + default: false skip_promote: description: 'Skip the promote stage (requires manual approval)' required: false @@ -112,6 +125,7 @@ jobs: OS_VERSION: ${{ inputs.os_version }} RESOURCE_GROUP: ${{ inputs.resource_group }} STAGING_GALLERY_NAME: ${{ inputs.staging_gallery_name }} + AZURE_LOCATION: ${{ inputs.azure_location }} PACKER_FLAGS: ${{ inputs.packer_flags }} TAGS_INPUT: ${{ inputs.tags }} PACKER_LOG: ${{ inputs.packer_debug && '1' || '0' }} @@ -257,12 +271,17 @@ jobs: # --------------------------------------------------------------------------- # Test # --------------------------------------------------------------------------- + # Boots a single VM from the image built in the previous stage and verifies + # that the key Kubernetes components (kubelet, kubeadm, containerd/dockerd) + # are present and report the expected version. For Linux images, the VM is + # accessed via SSH; for Windows images, via az vm run-command. + # --------------------------------------------------------------------------- test: name: Test SIG Image needs: build if: ${{ !inputs.skip_test }} runs-on: ubuntu-latest - timeout-minutes: 120 + timeout-minutes: 15 env: KUBERNETES_VERSION: ${{ inputs.kubernetes_version }} @@ -285,105 +304,307 @@ jobs: echo "PUBLISHING_INFO=${PUBLISHING_INFO}" echo "OS_TYPE=$(echo "$PUBLISHING_INFO" | jq -r .os_type)" >> $GITHUB_OUTPUT - echo "MANAGED_IMAGE_RESOURCE_GROUP_NAME=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_resource_group_name)" >> $GITHUB_OUTPUT - echo "MANAGED_IMAGE_NAME=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_name)" >> $GITHUB_OUTPUT echo "MANAGED_IMAGE_ID=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_id)" >> $GITHUB_OUTPUT echo "MANAGED_IMAGE_LOCATION=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_location)" >> $GITHUB_OUTPUT - echo "MANAGED_IMAGE_SHARED_IMAGE_GALLERY_ID=$(echo "$PUBLISHING_INFO" | jq -r .managed_image_shared_image_gallery_id)" >> $GITHUB_OUTPUT - echo "SHARED_IMAGE_GALLERY_RESOURCE_GROUP=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_resource_group)" >> $GITHUB_OUTPUT - echo "SHARED_IMAGE_GALLERY_NAME=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_name)" >> $GITHUB_OUTPUT - echo "SHARED_IMAGE_GALLERY_IMAGE_NAME=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_image_name)" >> $GITHUB_OUTPUT - echo "SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_image_version)" >> $GITHUB_OUTPUT echo "TAGS=$(echo "$PUBLISHING_INFO" | jq -r .tags)" >> $GITHUB_OUTPUT - - name: Configure Kubernetes version - uses: ./.github/actions/configure-k8s-version + # Read expected containerd version from the packer config + CONTAINERD_VERSION=$(jq -r .containerd_version images/capi/packer/config/containerd.json) + echo "CONTAINERD_VERSION=${CONTAINERD_VERSION}" + echo "CONTAINERD_VERSION=${CONTAINERD_VERSION}" >> $GITHUB_OUTPUT + + - name: Azure Login + uses: azure/login@v2 with: - kubernetes_version: ${{ inputs.kubernetes_version }} + client-id: ${{ secrets.AZURE_CLIENT_ID }} + tenant-id: ${{ secrets.AZURE_TENANT_ID }} + subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} - - name: Setup kustomize - working-directory: images/capi + - name: Create test resource group + env: + MANAGED_IMAGE_LOCATION: ${{ steps.vars.outputs.MANAGED_IMAGE_LOCATION }} + TAGS: ${{ steps.vars.outputs.TAGS }} run: | set -euo pipefail - export PATH=${PATH}:.local/bin - ./packer/azure/scripts/ensure-kustomize.sh + TEST_RESOURCE_GROUP="image-builder-test-${GITHUB_RUN_ID}" + echo "TEST_RESOURCE_GROUP=${TEST_RESOURCE_GROUP}" >> $GITHUB_ENV + az group create \ + -n "${TEST_RESOURCE_GROUP}" \ + -l "${MANAGED_IMAGE_LOCATION}" \ + --tags ${TAGS:-} - - name: Generate cluster template - working-directory: images/capi + - name: Create test VM + id: vm env: + MANAGED_IMAGE_ID: ${{ steps.vars.outputs.MANAGED_IMAGE_ID }} + MANAGED_IMAGE_LOCATION: ${{ steps.vars.outputs.MANAGED_IMAGE_LOCATION }} OS_TYPE: ${{ steps.vars.outputs.OS_TYPE }} + TAGS: ${{ steps.vars.outputs.TAGS }} run: | set -euo pipefail - export PATH=${PATH}:.local/bin - if [ "$OS_TYPE" == "Windows" ]; then - kustomize build --load-restrictor LoadRestrictionsNone packer/azure/scripts/test-templates/windows/ > packer/azure/scripts/test-templates/cluster-template.yaml + VM_NAME="image-test-vm" + echo "VM_NAME=${VM_NAME}" >> $GITHUB_ENV + + # Generate SSH key for Linux VMs + TMPDIR=$(mktemp -d) + echo "TMPDIR=${TMPDIR}" >> $GITHUB_ENV + ssh-keygen -t rsa -b 2048 -f "${TMPDIR}/sshkey" -N "" -q + + create_args=( + --resource-group "${TEST_RESOURCE_GROUP}" + --name "${VM_NAME}" + --image "${MANAGED_IMAGE_ID}" + --size "Standard_D2s_v3" + --location "${MANAGED_IMAGE_LOCATION}" + --tags ${TAGS:-} + ) + + if [[ "${OS_TYPE}" == "Windows" ]]; then + create_args+=( + --admin-username "capi" + --admin-password "Capi\$Test$(date +%s | tail -c 6)!" + ) else - kustomize build --load-restrictor LoadRestrictionsNone packer/azure/scripts/test-templates/linux/ > packer/azure/scripts/test-templates/cluster-template.yaml + create_args+=( + --admin-username "capi" + --ssh-key-values "${TMPDIR}/sshkey.pub" + --authentication-type ssh + ) fi - - name: Azure Login - uses: azure/login@v2 - with: - client-id: ${{ secrets.AZURE_CLIENT_ID }} - tenant-id: ${{ secrets.AZURE_TENANT_ID }} - subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + echo "Creating test VM '${VM_NAME}' from image …" + az vm create "${create_args[@]}" \ + --public-ip-sku Standard \ + --output json | tee "${TMPDIR}/vm-create.json" - - name: Install Azure CAPI extension + PUBLIC_IP=$(jq -r '.publicIpAddress' "${TMPDIR}/vm-create.json") + echo "VM public IP: ${PUBLIC_IP}" + echo "PUBLIC_IP=${PUBLIC_IP}" >> $GITHUB_ENV + + - name: Wait for VM to be ready + env: + OS_TYPE: ${{ steps.vars.outputs.OS_TYPE }} run: | set -euo pipefail - # Install the Azure CLI Cluster API extension from the official release - az extension add --name capi --yes || az extension add --source "https://github.com/Azure/azure-capi-cli-extension/releases/latest/download/capi-0.0.vnext-py2.py3-none-any.whl" --yes + if [[ "${OS_TYPE}" == "Windows" ]]; then + echo "Waiting for Windows VM agent to be ready …" + timeout 300 bash -c " + until az vm get-instance-view \ + --resource-group '${TEST_RESOURCE_GROUP}' \ + --name '${VM_NAME}' \ + --query 'instanceView.vmAgent.statuses[0].displayStatus' \ + -o tsv 2>/dev/null | grep -q 'Ready'; do + sleep 10 + done + " + else + echo "Waiting for SSH to become available …" + timeout 300 bash -c " + until ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 \ + -i "${TMPDIR}/sshkey" capi@'${PUBLIC_IP}' 'echo ready' 2>/dev/null; do + sleep 10 + done + " + fi - # Install required binaries - mkdir -p ~/test-binaries - export PATH=${PATH}:~/test-binaries - az capi install -a -ip ~/test-binaries + echo "VM is ready" - - name: Create test cluster - working-directory: images/capi + - name: Run smoke tests env: OS_TYPE: ${{ steps.vars.outputs.OS_TYPE }} - MANAGED_IMAGE_LOCATION: ${{ steps.vars.outputs.MANAGED_IMAGE_LOCATION }} - TAGS: ${{ steps.vars.outputs.TAGS }} + CONTAINERD_VERSION: ${{ steps.vars.outputs.CONTAINERD_VERSION }} run: | set -euo pipefail - TEST_TEMPLATE="packer/azure/scripts/test-templates/cluster-template.yaml" - export PATH=${PATH}:~/test-binaries + EXPECTED_K8S="v${KUBERNETES_VERSION}" + FAILURES=0 + + if [[ "${OS_TYPE}" == "Windows" ]]; then + echo "::group::Windows smoke tests" + + az vm run-command invoke \ + --resource-group "${TEST_RESOURCE_GROUP}" \ + --name "${VM_NAME}" \ + --command-id RunPowerShellScript \ + --scripts ' + $ErrorActionPreference = "Stop" + Write-Output "=== kubelet ===" + & "C:\k\kubelet.exe" --version + Write-Output "=== kubeadm ===" + & "C:\k\kubeadm.exe" version -o short + Write-Output "=== kubectl ===" + & "C:\k\kubectl.exe" version --client -o yaml + Write-Output "=== containerd ===" + & "C:\Program Files\containerd\containerd.exe" --version + Write-Output "=== crictl ===" + & "C:\k\crictl.exe" version + Write-Output "=== containerd service ===" + Get-Service containerd | Format-Table -AutoSize + Write-Output "=== System info ===" + Get-ComputerInfo | Select-Object WindowsProductName, OsVersion, OsArchitecture + ' --output json | tee /tmp/smoke-output.json + + OUTPUT=$(jq -r '.value[0].message' /tmp/smoke-output.json) + echo "${OUTPUT}" + + echo "::endgroup::" + + # Verify Kubernetes version + if echo "${OUTPUT}" | grep -q "${EXPECTED_K8S}"; then + echo "✓ Kubernetes version ${EXPECTED_K8S} confirmed" + else + echo "::error::Expected Kubernetes version ${EXPECTED_K8S} not found in output" + FAILURES=$((FAILURES + 1)) + fi + + # Verify containerd version + if echo "${OUTPUT}" | grep -q "${CONTAINERD_VERSION}"; then + echo "✓ containerd version ${CONTAINERD_VERSION} confirmed" + else + echo "::error::Expected containerd version ${CONTAINERD_VERSION} not found in output" + FAILURES=$((FAILURES + 1)) + fi + + else + SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=10 -i ${TMPDIR}/sshkey" + SSH_CMD="ssh ${SSH_OPTS} capi@${PUBLIC_IP}" + + echo "::group::Linux smoke tests — version checks" + + echo "--- OS info ---" + ${SSH_CMD} 'cat /etc/os-release' || true + + echo "--- kubelet version ---" + KUBELET_VERSION=$(${SSH_CMD} 'kubelet --version') + echo "${KUBELET_VERSION}" + + echo "--- kubeadm version ---" + KUBEADM_VERSION=$(${SSH_CMD} 'kubeadm version -o short') + echo "${KUBEADM_VERSION}" + + echo "--- kubectl version ---" + ${SSH_CMD} 'kubectl version --client -o yaml 2>/dev/null || kubectl version --client' || true + + echo "--- containerd version ---" + ACTUAL_CONTAINERD=$(${SSH_CMD} 'containerd --version') + echo "${ACTUAL_CONTAINERD}" + + echo "--- crictl version ---" + ${SSH_CMD} 'sudo crictl version' || true + + echo "::endgroup::" + + echo "::group::Linux smoke tests — post-boot validation" + + echo "--- cloud-init status ---" + CLOUD_INIT_STATUS=$(${SSH_CMD} 'cloud-init status 2>/dev/null || echo "not available"') + echo "${CLOUD_INIT_STATUS}" + + echo "--- systemd service states ---" + ${SSH_CMD} 'systemctl is-enabled kubelet containerd' || true + ${SSH_CMD} 'systemctl is-active containerd' || true + + echo "--- containerd runtime ready (crictl info) ---" + ${SSH_CMD} 'sudo crictl info' || true + + echo "--- pre-pulled container images ---" + ${SSH_CMD} "sudo crictl images" || true + + echo "--- kubeadm init dry-run ---" + if ${SSH_CMD} "sudo kubeadm init --dry-run --kubernetes-version '${EXPECTED_K8S}'" 2>&1; then + echo "✓ kubeadm init dry-run succeeded" + else + echo "::error::kubeadm init --dry-run failed" + FAILURES=$((FAILURES + 1)) + fi - params=() - if [ "$OS_TYPE" == "Windows" ]; then - params+=(--windows) + echo "::endgroup::" + + # Verify Kubernetes version + if echo "${KUBELET_VERSION}" | grep -q "${EXPECTED_K8S}"; then + echo "✓ Kubernetes version ${EXPECTED_K8S} confirmed (kubelet)" + else + echo "::error::Expected kubelet version ${EXPECTED_K8S} but got: ${KUBELET_VERSION}" + FAILURES=$((FAILURES + 1)) + fi + + if echo "${KUBEADM_VERSION}" | grep -q "${EXPECTED_K8S}"; then + echo "✓ Kubernetes version ${EXPECTED_K8S} confirmed (kubeadm)" + else + echo "::error::Expected kubeadm version ${EXPECTED_K8S} but got: ${KUBEADM_VERSION}" + FAILURES=$((FAILURES + 1)) + fi + + # Verify containerd version + if echo "${ACTUAL_CONTAINERD}" | grep -q "${CONTAINERD_VERSION}"; then + echo "✓ containerd version ${CONTAINERD_VERSION} confirmed" + else + echo "::error::Expected containerd version ${CONTAINERD_VERSION} but got: ${ACTUAL_CONTAINERD}" + FAILURES=$((FAILURES + 1)) + fi + + # Verify cloud-init completed (not applicable to all distros) + if echo "${CLOUD_INIT_STATUS}" | grep -q "done\|disabled\|not available"; then + echo "✓ cloud-init status OK" + else + echo "::warning::cloud-init status: ${CLOUD_INIT_STATUS}" + fi fi - # Create a dedicated test resource group (not the build resource group) - TEST_RESOURCE_GROUP="image-builder-test-${GITHUB_RUN_ID}" - echo "TEST_RESOURCE_GROUP=${TEST_RESOURCE_GROUP}" >> $GITHUB_ENV + if [[ "${FAILURES}" -gt 0 ]]; then + echo "::error::${FAILURES} smoke test(s) failed" + exit 1 + fi + echo "All smoke tests passed" - AZURE_LOCATION="${MANAGED_IMAGE_LOCATION}" - az group create -n "${TEST_RESOURCE_GROUP}" -l "${AZURE_LOCATION}" --tags ${TAGS:-} - - # Create a cluster - az capi create \ - --yes \ - --debug \ - --name testvm \ - --kubernetes-version="${KUBERNETES_VERSION}" \ - --location="${AZURE_LOCATION}" \ - --resource-group="${TEST_RESOURCE_GROUP}" \ - --management-cluster-resource-group-name="${TEST_RESOURCE_GROUP}" \ - --control-plane-machine-count=1 \ - --node-machine-count=1 \ - --template="${TEST_TEMPLATE}" \ - --tags="${TAGS}" \ - --wait-for-nodes=2 \ - "${params[@]}" - - # Test if the VM's provisionState is "Succeeded" otherwise fail - timeout 60s bash -c 'set -o pipefail; while ! az vm list -g "$TEST_RESOURCE_GROUP" | jq -e "length > 0 and all(.provisioningState == \"Succeeded\")"; do sleep 1; done' + # -- Collect diagnostics on failure -------------------------------------- + - name: Collect diagnostics + if: failure() + env: + OS_TYPE: ${{ steps.vars.outputs.OS_TYPE }} + run: | + set -euo pipefail + ARTIFACTS="${GITHUB_WORKSPACE}/_artifacts" + mkdir -p "${ARTIFACTS}" + + echo "--- VM instance view ---" + az vm get-instance-view \ + --resource-group "${TEST_RESOURCE_GROUP}" \ + --name "${VM_NAME}" \ + --output json 2>&1 | tee "${ARTIFACTS}/vm-instance-view.json" || true + + if [[ "${OS_TYPE}" == "Windows" ]]; then + az vm run-command invoke \ + --resource-group "${TEST_RESOURCE_GROUP}" \ + --name "${VM_NAME}" \ + --command-id RunPowerShellScript \ + --scripts 'Get-EventLog -LogName Application -Newest 50 | Format-Table -AutoSize' \ + --output json 2>&1 | tee "${ARTIFACTS}/windows-eventlog.json" || true + else + SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=10 -i ${TMPDIR}/sshkey" + SSH_CMD="ssh ${SSH_OPTS} capi@${PUBLIC_IP}" + + echo "--- journalctl kubelet ---" + ${SSH_CMD} 'sudo journalctl -u kubelet --no-pager -n 100' 2>&1 | tee "${ARTIFACTS}/kubelet.log" || true + echo "--- journalctl containerd ---" + ${SSH_CMD} 'sudo journalctl -u containerd --no-pager -n 100' 2>&1 | tee "${ARTIFACTS}/containerd.log" || true + echo "--- cloud-init status ---" + ${SSH_CMD} 'cloud-init status --long' 2>&1 | tee "${ARTIFACTS}/cloud-init.log" || true + echo "--- dmesg tail ---" + ${SSH_CMD} 'dmesg | tail -50' 2>&1 | tee "${ARTIFACTS}/dmesg.log" || true + fi + + - name: Upload diagnostics + if: failure() + uses: actions/upload-artifact@v4 + with: + name: test-diagnostics + path: _artifacts/ + retention-days: 7 + # -- Cleanup (always runs) ----------------------------------------------- - name: Clean up test resource group if: always() run: | @@ -391,8 +612,10 @@ jobs: TEST_RESOURCE_GROUP="${TEST_RESOURCE_GROUP:-}" if [[ -n "${TEST_RESOURCE_GROUP}" ]]; then - echo "Cleaning up test resource group: ${TEST_RESOURCE_GROUP}" - az group delete -n "${TEST_RESOURCE_GROUP}" --yes --no-wait || true + echo "Deleting test resource group: ${TEST_RESOURCE_GROUP}" + az group delete -n "${TEST_RESOURCE_GROUP}" --yes --no-wait \ + --force-deletion-types=Microsoft.Compute/virtualMachines \ + || true else echo "No test resource group to clean up" fi @@ -485,8 +708,8 @@ jobs: RESOURCE_GROUP="${RESOURCE_GROUP}" SIG_OFFER="reference-images" - # Set replicated regions - REPLICATED_REGIONS="${MANAGED_IMAGE_LOCATION} ${REPLICATED_REGIONS_INPUT}" + # Set replicated regions (deduplicate in case build region is already in the list) + REPLICATED_REGIONS=$(echo "${MANAGED_IMAGE_LOCATION} ${REPLICATED_REGIONS_INPUT}" | tr ' ' '\n' | sort -u | tr '\n' ' ') # Create the resource group if needed if ! az group show -n "${RESOURCE_GROUP}" -o none 2>/dev/null; then diff --git a/images/capi/packer/azure/scripts/test-templates/linux/kustomization.yaml b/images/capi/packer/azure/scripts/test-templates/linux/kustomization.yaml deleted file mode 100644 index 26815281e7..0000000000 --- a/images/capi/packer/azure/scripts/test-templates/linux/kustomization.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: -- https://github.com/kubernetes-sigs/cluster-api-provider-azure/releases/download/v1.13.0/cluster-template.yaml -patches: -- path: ../patches/azuremachinetemplate-controlplane.yaml -- path: ../patches/azuremachinetemplate-workload.yaml diff --git a/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-controlplane.yaml b/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-controlplane.yaml deleted file mode 100644 index 53e8216b04..0000000000 --- a/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-controlplane.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: AzureMachineTemplate -metadata: - name: ${CLUSTER_NAME}-control-plane - namespace: default -spec: - template: - spec: - image: - id: ${MANAGED_IMAGE_ID} ---- \ No newline at end of file diff --git a/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-windows.yaml b/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-windows.yaml deleted file mode 100644 index 2abc8847cc..0000000000 --- a/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-windows.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: AzureMachineTemplate -metadata: - name: ${CLUSTER_NAME}-md-win - namespace: default -spec: - template: - spec: - image: - id: ${MANAGED_IMAGE_ID} ---- \ No newline at end of file diff --git a/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-workload.yaml b/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-workload.yaml deleted file mode 100644 index 1bc33e3d7f..0000000000 --- a/images/capi/packer/azure/scripts/test-templates/patches/azuremachinetemplate-workload.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: AzureMachineTemplate -metadata: - name: ${CLUSTER_NAME}-md-0 - namespace: default -spec: - template: - spec: - image: - id: ${MANAGED_IMAGE_ID} ---- \ No newline at end of file diff --git a/images/capi/packer/azure/scripts/test-templates/patches/kubeadmcontrolplane-windows.yaml b/images/capi/packer/azure/scripts/test-templates/patches/kubeadmcontrolplane-windows.yaml deleted file mode 100644 index 8a040a4348..0000000000 --- a/images/capi/packer/azure/scripts/test-templates/patches/kubeadmcontrolplane-windows.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 -kind: KubeadmControlPlane -metadata: - name: ${CLUSTER_NAME}-control-plane - namespace: default -spec: - version: ${KUBERNETES_BOOTSTRAP_VERSION} ---- \ No newline at end of file diff --git a/images/capi/packer/azure/scripts/test-templates/patches/machinedeployment-windows.yaml b/images/capi/packer/azure/scripts/test-templates/patches/machinedeployment-windows.yaml deleted file mode 100644 index 1c66ce2fd3..0000000000 --- a/images/capi/packer/azure/scripts/test-templates/patches/machinedeployment-windows.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: cluster.x-k8s.io/v1beta1 -kind: MachineDeployment -metadata: - name: ${CLUSTER_NAME}-md-0 - namespace: default -spec: - replicas: 0 ---- \ No newline at end of file diff --git a/images/capi/packer/azure/scripts/test-templates/windows/kustomization.yaml b/images/capi/packer/azure/scripts/test-templates/windows/kustomization.yaml deleted file mode 100644 index 0ea6f474e6..0000000000 --- a/images/capi/packer/azure/scripts/test-templates/windows/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: -- https://github.com/kubernetes-sigs/cluster-api-provider-azure/releases/download/v1.13.0/cluster-template-windows.yaml -patches: -- path: ../patches/azuremachinetemplate-windows.yaml -- path: ../patches/kubeadmcontrolplane-windows.yaml -- path: ../patches/machinedeployment-windows.yaml diff --git a/images/capi/packer/goss/goss-command.yaml b/images/capi/packer/goss/goss-command.yaml index 0c1d11a451..7bb05f898e 100644 --- a/images/capi/packer/goss/goss-command.yaml +++ b/images/capi/packer/goss/goss-command.yaml @@ -1,8 +1,9 @@ command: {{ if ne .Vars.OS "windows" }} # Linux Only - containerd --version | awk -F' ' '{print substr($3,2); }': + PATH=/opt/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin containerd --version | awk -F' ' '{print substr($3,2); }': exit-status: 0 - stdout: [] + stdout: + - "{{.Vars.containerd_version}}" stderr: [] timeout: 0 crictl ps: From 9918404b7ee34a7b6a6c1011917589b2cc65416f Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Tue, 14 Apr 2026 09:08:40 -0600 Subject: [PATCH 79/90] Bump ansible to v2.18.15 (#1974) * Bump ansible to v2.18.15 * Replace ansible.builtin.yum with ansible.builtin.dnf The yum module is deprecated in ansible-core 2.18 and redirects to dnf. Use ansible.builtin.dnf directly to fix fqcn[action-core] lint errors. --- images/capi/ansible/roles/containerd/tasks/redhat.yml | 2 +- .../capi/ansible/roles/kubernetes/tasks/azurelinux.yml | 2 +- images/capi/ansible/roles/kubernetes/tasks/redhat.yml | 2 +- images/capi/ansible/roles/providers/tasks/awscliv2.yml | 2 +- .../ansible/roles/providers/tasks/googlecompute.yml | 4 ++-- images/capi/ansible/roles/providers/tasks/hcloud.yml | 4 ++-- .../ansible/roles/providers/tasks/nutanix-redhat.yml | 4 ++-- images/capi/ansible/roles/providers/tasks/proxmox.yml | 2 +- images/capi/ansible/roles/providers/tasks/qemu.yml | 2 +- images/capi/ansible/roles/providers/tasks/raw.yml | 2 +- .../ansible/roles/providers/tasks/vmware-redhat.yml | 4 ++-- images/capi/ansible/roles/setup/tasks/redhat.yml | 10 +++++----- images/capi/ansible/roles/sysprep/tasks/redhat.yml | 2 +- images/capi/hack/utils.sh | 4 ++-- 14 files changed, 23 insertions(+), 23 deletions(-) diff --git a/images/capi/ansible/roles/containerd/tasks/redhat.yml b/images/capi/ansible/roles/containerd/tasks/redhat.yml index 125d02ab78..d2c11c27b0 100644 --- a/images/capi/ansible/roles/containerd/tasks/redhat.yml +++ b/images/capi/ansible/roles/containerd/tasks/redhat.yml @@ -13,7 +13,7 @@ # limitations under the License. --- - name: Install libseccomp package - ansible.builtin.yum: + ansible.builtin.dnf: name: libseccomp state: present lock_timeout: 60 diff --git a/images/capi/ansible/roles/kubernetes/tasks/azurelinux.yml b/images/capi/ansible/roles/kubernetes/tasks/azurelinux.yml index 8b76bd5f3e..9c524685f6 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/azurelinux.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/azurelinux.yml @@ -21,7 +21,7 @@ gpgkey: "{{ kubernetes_rpm_gpg_key }}" - name: Install Kubernetes - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" allow_downgrade: true state: present diff --git a/images/capi/ansible/roles/kubernetes/tasks/redhat.yml b/images/capi/ansible/roles/kubernetes/tasks/redhat.yml index 829ec8064f..eceb9d4322 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/redhat.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/redhat.yml @@ -21,7 +21,7 @@ gpgkey: "{{ kubernetes_rpm_gpg_key }}" - name: Install Kubernetes - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" allow_downgrade: true state: present diff --git a/images/capi/ansible/roles/providers/tasks/awscliv2.yml b/images/capi/ansible/roles/providers/tasks/awscliv2.yml index 7386d8dcf4..2d79ee0197 100644 --- a/images/capi/ansible/roles/providers/tasks/awscliv2.yml +++ b/images/capi/ansible/roles/providers/tasks/awscliv2.yml @@ -16,7 +16,7 @@ when: ansible_os_family == "Flatcar" - name: Install AWS CLI prequisites - ansible.builtin.yum: + ansible.builtin.dnf: name: - gnupg - unzip diff --git a/images/capi/ansible/roles/providers/tasks/googlecompute.yml b/images/capi/ansible/roles/providers/tasks/googlecompute.yml index c75f67b90e..ec58b9b69b 100644 --- a/images/capi/ansible/roles/providers/tasks/googlecompute.yml +++ b/images/capi/ansible/roles/providers/tasks/googlecompute.yml @@ -53,12 +53,12 @@ gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg EOM - name: Install google-cloud-cli package - ansible.builtin.yum: + ansible.builtin.dnf: name: google-cloud-cli state: present - name: Install cloud-init packages - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: diff --git a/images/capi/ansible/roles/providers/tasks/hcloud.yml b/images/capi/ansible/roles/providers/tasks/hcloud.yml index a7f4dc027b..85d7358f6a 100644 --- a/images/capi/ansible/roles/providers/tasks/hcloud.yml +++ b/images/capi/ansible/roles/providers/tasks/hcloud.yml @@ -37,7 +37,7 @@ when: ansible_os_family == "Debian" - name: Install cloud-init packages - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: @@ -60,7 +60,7 @@ when: ansible_os_family == "Debian" - name: Install CSI prerequisites on RedHat - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: diff --git a/images/capi/ansible/roles/providers/tasks/nutanix-redhat.yml b/images/capi/ansible/roles/providers/tasks/nutanix-redhat.yml index 77337390f4..97d8b57efa 100644 --- a/images/capi/ansible/roles/providers/tasks/nutanix-redhat.yml +++ b/images/capi/ansible/roles/providers/tasks/nutanix-redhat.yml @@ -13,7 +13,7 @@ # limitations under the License. --- - name: Install cloud-init packages - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: @@ -22,7 +22,7 @@ - cloud-utils-growpart - name: Install CAPX prerequisites - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: diff --git a/images/capi/ansible/roles/providers/tasks/proxmox.yml b/images/capi/ansible/roles/providers/tasks/proxmox.yml index 8771aa68f7..8a8eb12757 100644 --- a/images/capi/ansible/roles/providers/tasks/proxmox.yml +++ b/images/capi/ansible/roles/providers/tasks/proxmox.yml @@ -26,7 +26,7 @@ when: ansible_os_family == "Debian" - name: Install cloud-init packages - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: diff --git a/images/capi/ansible/roles/providers/tasks/qemu.yml b/images/capi/ansible/roles/providers/tasks/qemu.yml index 2434e07c6c..a10d725a73 100644 --- a/images/capi/ansible/roles/providers/tasks/qemu.yml +++ b/images/capi/ansible/roles/providers/tasks/qemu.yml @@ -26,7 +26,7 @@ when: ansible_os_family == "Debian" - name: Install cloud-init packages - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: diff --git a/images/capi/ansible/roles/providers/tasks/raw.yml b/images/capi/ansible/roles/providers/tasks/raw.yml index 44cbf8f739..2bcd880640 100644 --- a/images/capi/ansible/roles/providers/tasks/raw.yml +++ b/images/capi/ansible/roles/providers/tasks/raw.yml @@ -26,7 +26,7 @@ when: ansible_os_family == "Debian" - name: Install cloud-init packages - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: diff --git a/images/capi/ansible/roles/providers/tasks/vmware-redhat.yml b/images/capi/ansible/roles/providers/tasks/vmware-redhat.yml index bfb929a6ea..459a24e4d3 100644 --- a/images/capi/ansible/roles/providers/tasks/vmware-redhat.yml +++ b/images/capi/ansible/roles/providers/tasks/vmware-redhat.yml @@ -14,7 +14,7 @@ --- - name: Install cloud-init packages - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: @@ -31,7 +31,7 @@ cloud_init_version: "{{ ansible_facts.packages['cloud-init'][0].version }}" - name: Install python2 pip - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ packages }}" state: present vars: diff --git a/images/capi/ansible/roles/setup/tasks/redhat.yml b/images/capi/ansible/roles/setup/tasks/redhat.yml index 030e7f5cf3..0aa18637c7 100644 --- a/images/capi/ansible/roles/setup/tasks/redhat.yml +++ b/images/capi/ansible/roles/setup/tasks/redhat.yml @@ -33,7 +33,7 @@ when: packer_builder_type.startswith('amazon') - name: Install EPEL package - ansible.builtin.yum: + ansible.builtin.dnf: name: epel-release state: present when: packer_builder_type.startswith('amazon') @@ -45,7 +45,7 @@ when: epel_rpm_gpg_key != "" and not packer_builder_type.startswith('amazon') and not packer_builder_type.startswith('scaleway') - name: Add epel repo - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ redhat_epel_rpm }}" state: present lock_timeout: 60 @@ -54,19 +54,19 @@ - ansible.builtin.import_tasks: rpm_repos.yml - name: Perform a yum update - ansible.builtin.yum: + ansible.builtin.dnf: name: "*" state: latest lock_timeout: 60 - name: Install baseline dependencies - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ rpms }}" state: present lock_timeout: 60 - name: Install extra rpms - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ extra_rpms.split() }}" state: present lock_timeout: 60 diff --git a/images/capi/ansible/roles/sysprep/tasks/redhat.yml b/images/capi/ansible/roles/sysprep/tasks/redhat.yml index 25aa26f323..449fb13f2a 100644 --- a/images/capi/ansible/roles/sysprep/tasks/redhat.yml +++ b/images/capi/ansible/roles/sysprep/tasks/redhat.yml @@ -60,7 +60,7 @@ ansible.builtin.command: subscription-manager clean - name: Remove yum package caches - ansible.builtin.yum: + ansible.builtin.dnf: autoremove: true lock_timeout: 60 diff --git a/images/capi/hack/utils.sh b/images/capi/hack/utils.sh index 68efa371c8..090133cf9f 100755 --- a/images/capi/hack/utils.sh +++ b/images/capi/hack/utils.sh @@ -14,8 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Note: ansible-core v2.16 supports Python 3.10-3.12. -_version_ansible_core="2.16.18" +# Note: ansible-core v2.18 supports Python 3.11-3.13. +_version_ansible_core="2.18.15" case "${OSTYPE}" in linux*) From aa0c7ae7f75a0c0756adf5ec7874468ee82c2444 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Tue, 14 Apr 2026 15:12:37 -0600 Subject: [PATCH 80/90] Fix some ansible lint warnings (#1979) * Fix jinja[spacing] ansible-lint violations * Fix name[play] ansible-lint violations * Fix name[missing] ansible-lint violations --- images/capi/.ansible-lint-ignore | 38 --------------- images/capi/ansible/firstboot.yml | 12 +++-- images/capi/ansible/node.yml | 33 ++++++++----- images/capi/ansible/python.yml | 6 ++- .../ansible/roles/containerd/tasks/main.yml | 9 ++-- .../ansible/roles/firstboot/tasks/main.yaml | 6 ++- .../ansible/roles/firstboot/tasks/qemu.yml | 3 +- .../ansible/roles/kubernetes/tasks/debian.yml | 2 +- .../ansible/roles/kubernetes/tasks/main.yml | 24 ++++++---- .../ansible/roles/kubernetes/tasks/photon.yml | 2 +- .../ansible/roles/kubernetes/tasks/redhat.yml | 2 +- .../load_additional_components/tasks/main.yml | 12 +++-- images/capi/ansible/roles/node/tasks/main.yml | 9 ++-- .../ansible/roles/providers/tasks/aws.yml | 3 +- .../ansible/roles/providers/tasks/azure.yml | 3 +- .../ansible/roles/providers/tasks/main.yml | 48 ++++++++++++------- .../ansible/roles/providers/tasks/nutanix.yml | 6 ++- .../ansible/roles/providers/tasks/vmware.yml | 9 ++-- .../capi/ansible/roles/python/tasks/main.yml | 3 +- .../ansible/roles/security/tasks/trivy.yml | 2 +- .../ansible/roles/setup/tasks/azurelinux.yml | 3 +- .../ansible/roles/setup/tasks/flatcar.yml | 3 +- .../capi/ansible/roles/setup/tasks/main.yml | 15 ++++-- .../capi/ansible/roles/setup/tasks/photon.yml | 3 +- .../capi/ansible/roles/setup/tasks/redhat.yml | 3 +- .../roles/sysprep/tasks/azurelinux.yml | 3 +- .../capi/ansible/roles/sysprep/tasks/main.yml | 15 ++++-- .../ansible/roles/sysprep/tasks/photon.yml | 3 +- .../ansible/roles/sysprep/tasks/redhat.yml | 3 +- .../ansible/windows/roles/gmsa/tasks/main.yml | 3 +- .../roles/kubernetes/tasks/kubelet.yml | 6 ++- .../windows/roles/kubernetes/tasks/main.yml | 6 ++- .../load_additional_components/tasks/main.yml | 9 ++-- .../windows/roles/providers/tasks/main.yml | 6 ++- .../windows/roles/runtimes/tasks/main.yml | 3 +- .../windows/roles/systemprep/tasks/main.yml | 6 ++- 36 files changed, 188 insertions(+), 134 deletions(-) diff --git a/images/capi/.ansible-lint-ignore b/images/capi/.ansible-lint-ignore index bd93175992..042e67a7c8 100644 --- a/images/capi/.ansible-lint-ignore +++ b/images/capi/.ansible-lint-ignore @@ -1,35 +1,21 @@ # This file contains ignores rule violations for ansible-lint -ansible/firstboot.yml name[missing] -ansible/firstboot.yml name[play] -ansible/node.yml name[missing] -ansible/node.yml name[play] -ansible/python.yml name[missing] -ansible/python.yml name[play] -ansible/roles/containerd/tasks/main.yml name[missing] ansible/roles/containerd/tasks/photon.yml no-changed-when ansible/roles/containerd/defaults/main.yml var-naming[no-role-prefix] ansible/roles/ecr_credential_provider/tasks/main.yaml no-changed-when ansible/roles/ecr_credential_provider/tasks/main.yaml yaml[line-length] -ansible/roles/firstboot/tasks/main.yaml name[missing] -ansible/roles/firstboot/tasks/qemu.yml name[missing] ansible/roles/gpu/tasks/amd.yml no-changed-when ansible/roles/gpu/tasks/nvidia.yml no-changed-when ansible/roles/kubernetes/defaults/main.yml var-naming[no-role-prefix] ansible/roles/kubernetes/defaults/main.yml yaml[line-length] ansible/roles/kubernetes/tasks/crictl-url.yml name[template] -ansible/roles/kubernetes/tasks/debian.yml jinja[spacing] ansible/roles/kubernetes/tasks/ecrpull.yml command-instead-of-shell ansible/roles/kubernetes/tasks/ecrpull.yml no-changed-when ansible/roles/kubernetes/tasks/kubeadmpull.yml command-instead-of-shell ansible/roles/kubernetes/tasks/kubeadmpull.yml no-changed-when -ansible/roles/kubernetes/tasks/main.yml name[missing] -ansible/roles/kubernetes/tasks/photon.yml jinja[spacing] ansible/roles/kubernetes/tasks/photon.yml no-changed-when -ansible/roles/kubernetes/tasks/redhat.yml jinja[spacing] ansible/roles/kubernetes/tasks/url.yml command-instead-of-shell ansible/roles/kubernetes/tasks/url.yml no-changed-when ansible/roles/load_additional_components/defaults/main.yml var-naming[no-role-prefix] -ansible/roles/load_additional_components/tasks/main.yml name[missing] ansible/roles/load_additional_components/tasks/registry.yml command-instead-of-shell ansible/roles/load_additional_components/tasks/registry.yml no-changed-when ansible/roles/load_additional_components/tasks/url.yml command-instead-of-shell @@ -37,76 +23,52 @@ ansible/roles/load_additional_components/tasks/url.yml no-changed-when ansible/roles/load_additional_components/tasks/url.yml yaml[line-length] ansible/roles/node/defaults/main.yml var-naming[no-role-prefix] ansible/roles/node/tasks/main.yml command-instead-of-module -ansible/roles/node/tasks/main.yml name[missing] ansible/roles/node/tasks/main.yml no-changed-when ansible/roles/providers/defaults/main.yml var-naming[no-role-prefix] ansible/roles/providers/tasks/aws.yml command-instead-of-shell -ansible/roles/providers/tasks/aws.yml name[missing] ansible/roles/providers/tasks/aws.yml no-changed-when ansible/roles/providers/tasks/awscliv2.yml no-changed-when ansible/roles/providers/tasks/awscliv2.yml package-latest -ansible/roles/providers/tasks/azure.yml name[missing] ansible/roles/providers/tasks/cloudstack.yml command-instead-of-shell ansible/roles/providers/tasks/cloudstack.yml no-changed-when ansible/roles/providers/tasks/googlecompute.yml command-instead-of-shell ansible/roles/providers/tasks/googlecompute.yml no-changed-when -ansible/roles/providers/tasks/main.yml name[missing] -ansible/roles/providers/tasks/nutanix.yml name[missing] ansible/roles/providers/tasks/raw.yml command-instead-of-shell ansible/roles/providers/tasks/raw.yml no-changed-when ansible/roles/providers/tasks/vmware-photon.yml no-changed-when ansible/roles/providers/tasks/vmware-redhat.yml command-instead-of-shell ansible/roles/providers/tasks/vmware-redhat.yml no-changed-when -ansible/roles/providers/tasks/vmware.yml name[missing] ansible/roles/python/defaults/main.yml var-naming[no-role-prefix] ansible/roles/python/tasks/flatcar.yml no-changed-when -ansible/roles/python/tasks/main.yml name[missing] ansible/roles/python/tasks/main.yml no-changed-when -ansible/roles/security/tasks/trivy.yml jinja[spacing] ansible/roles/setup/defaults/main.yml var-naming[no-role-prefix] -ansible/roles/setup/tasks/azurelinux.yml name[missing] ansible/roles/setup/tasks/azurelinux.yml package-latest ansible/roles/setup/tasks/debian.yml command-instead-of-module ansible/roles/setup/tasks/debian.yml no-changed-when ansible/roles/setup/tasks/debian.yml package-latest -ansible/roles/setup/tasks/flatcar.yml name[missing] -ansible/roles/setup/tasks/main.yml name[missing] -ansible/roles/setup/tasks/photon.yml name[missing] ansible/roles/setup/tasks/photon.yml no-changed-when ansible/roles/setup/tasks/redhat.yml command-instead-of-module -ansible/roles/setup/tasks/redhat.yml name[missing] ansible/roles/setup/tasks/redhat.yml no-changed-when ansible/roles/setup/tasks/redhat.yml package-latest ansible/roles/setup/tasks/rpm_repos.yml no-changed-when ansible/roles/sysprep/defaults/main.yml var-naming[no-role-prefix] -ansible/roles/sysprep/tasks/azurelinux.yml name[missing] ansible/roles/sysprep/tasks/debian.yml no-changed-when ansible/roles/sysprep/tasks/flatcar.yml no-changed-when -ansible/roles/sysprep/tasks/main.yml name[missing] ansible/roles/sysprep/tasks/main.yml no-changed-when -ansible/roles/sysprep/tasks/photon.yml name[missing] ansible/roles/sysprep/tasks/photon.yml no-changed-when ansible/roles/sysprep/tasks/redhat.yml command-instead-of-module -ansible/roles/sysprep/tasks/redhat.yml name[missing] ansible/roles/sysprep/tasks/redhat.yml no-changed-when ansible/roles/sysprep/tasks/rpm_repos.yml no-changed-when ansible/windows/example.vars.yml yaml[line-length] ansible/windows/roles/cloudbase-init/tasks/main.yml schema[tasks] ansible/windows/roles/debug/defaults/main.yml var-naming[no-role-prefix] -ansible/windows/roles/gmsa/tasks/main.yml name[missing] -ansible/windows/roles/kubernetes/tasks/kubelet.yml name[missing] ansible/windows/roles/kubernetes/tasks/kubelet.yml yaml[line-length] -ansible/windows/roles/kubernetes/tasks/main.yml name[missing] ansible/windows/roles/kubernetes/tasks/sc.yml yaml[line-length] ansible/windows/roles/load_additional_components/defaults/main.yml var-naming[no-role-prefix] -ansible/windows/roles/load_additional_components/tasks/main.yml name[missing] ansible/windows/roles/providers/defaults/main.yml var-naming[no-role-prefix] ansible/windows/roles/providers/tasks/azure.yml schema[tasks] ansible/windows/roles/providers/tasks/azure.yml yaml[line-length] -ansible/windows/roles/providers/tasks/main.yml name[missing] ansible/windows/roles/runtimes/defaults/main.yml var-naming[no-role-prefix] -ansible/windows/roles/runtimes/tasks/main.yml name[missing] ansible/windows/roles/systemprep/defaults/main.yml var-naming[no-role-prefix] ansible/windows/roles/systemprep/tasks/main.yml ignore-errors -ansible/windows/roles/systemprep/tasks/main.yml name[missing] ansible/windows/roles/systemprep/tasks/ssh-feature.yml schema[tasks] diff --git a/images/capi/ansible/firstboot.yml b/images/capi/ansible/firstboot.yml index 779383808c..bbaf133248 100644 --- a/images/capi/ansible/firstboot.yml +++ b/images/capi/ansible/firstboot.yml @@ -28,22 +28,26 @@ ansible.builtin.raw: test -e /usr/bin/apt && (apt-get update && apt-get install -y python3) || (yum install -y python3) when: python_installed.rc != 0 -- hosts: all +- name: Run firstboot roles + hosts: all become: true vars: firstboot_custom_roles_pre: "" firstboot_custom_roles_post: "" tasks: - - ansible.builtin.include_role: + - name: Include pre-firstboot custom roles + ansible.builtin.include_role: name: "{{ role }}" loop: "{{ firstboot_custom_roles_pre.split() }}" loop_control: loop_var: role when: firstboot_custom_roles_pre != "" - - ansible.builtin.include_role: + - name: Include firstboot role + ansible.builtin.include_role: name: firstboot - - ansible.builtin.include_role: + - name: Include post-firstboot custom roles + ansible.builtin.include_role: name: "{{ role }}" loop: "{{ firstboot_custom_roles_post.split() }}" loop_control: diff --git a/images/capi/ansible/node.yml b/images/capi/ansible/node.yml index 3018f4aea8..e6dd3b8294 100644 --- a/images/capi/ansible/node.yml +++ b/images/capi/ansible/node.yml @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- hosts: all +- name: Provision node + hosts: all become: true vars: node_custom_roles_pre: "" @@ -23,35 +24,45 @@ arch: "{{ {'x86_64': 'amd64', 'amd64': 'amd64', 'aarch64': 'arm64', 'arm64': 'arm64', 'ppc64le': 'ppc64le'}.get(arch_uname, 'unsupported') }}" tasks: - - ansible.builtin.include_role: + - name: Include pre-node custom roles + ansible.builtin.include_role: name: "{{ role }}" loop: "{{ node_custom_roles_pre.split() }}" loop_control: loop_var: role when: node_custom_roles_pre != "" - - ansible.builtin.include_role: + - name: Include node role + ansible.builtin.include_role: name: node - - ansible.builtin.include_role: + - name: Include providers role + ansible.builtin.include_role: name: providers - - ansible.builtin.include_role: + - name: Include containerd role + ansible.builtin.include_role: name: containerd - - ansible.builtin.include_role: + - name: Include kubernetes role + ansible.builtin.include_role: name: kubernetes - - ansible.builtin.include_role: + - name: Include load_additional_components role + ansible.builtin.include_role: name: load_additional_components when: load_additional_components | bool - - ansible.builtin.include_role: + - name: Include ecr_credential_provider role + ansible.builtin.include_role: name: ecr_credential_provider when: ecr_credential_provider | bool - - ansible.builtin.include_role: + - name: Include post-node custom roles + ansible.builtin.include_role: name: "{{ role }}" loop: "{{ custom_role_names.split() + node_custom_roles_post.split() }}" loop_control: loop_var: role when: custom_role_names != "" or node_custom_roles_post != "" - - ansible.builtin.include_role: + - name: Include sysprep role + ansible.builtin.include_role: name: sysprep - - ansible.builtin.include_role: + - name: Include post-sysprep custom roles + ansible.builtin.include_role: name: "{{ role }}" loop: "{{ node_custom_roles_post_sysprep.split() }}" loop_control: diff --git a/images/capi/ansible/python.yml b/images/capi/ansible/python.yml index 53b556ec1f..04a0cab54d 100644 --- a/images/capi/ansible/python.yml +++ b/images/capi/ansible/python.yml @@ -12,14 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- hosts: all +- name: Install Python + hosts: all # Gathering facts requires Python to be available, so it's a chicken and egg # problem since this playbook installs Python. gather_facts: false become: true tasks: - - ansible.builtin.include_role: + - name: Include python role + ansible.builtin.include_role: name: python environment: diff --git a/images/capi/ansible/roles/containerd/tasks/main.yml b/images/capi/ansible/roles/containerd/tasks/main.yml index a2238dab05..8b14835583 100644 --- a/images/capi/ansible/roles/containerd/tasks/main.yml +++ b/images/capi/ansible/roles/containerd/tasks/main.yml @@ -12,13 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: debian.yml +- name: Import Debian containerd tasks + ansible.builtin.import_tasks: debian.yml when: ansible_os_family == "Debian" -- ansible.builtin.import_tasks: redhat.yml +- name: Import RedHat containerd tasks + ansible.builtin.import_tasks: redhat.yml when: ansible_os_family in ["Common Base Linux Mariner", "Microsoft Azure Linux", "RedHat"] -- ansible.builtin.import_tasks: photon.yml +- name: Import Photon containerd tasks + ansible.builtin.import_tasks: photon.yml when: ansible_os_family == "VMware Photon OS" # TODO(vincepri): Use deb/rpm packages once available. diff --git a/images/capi/ansible/roles/firstboot/tasks/main.yaml b/images/capi/ansible/roles/firstboot/tasks/main.yaml index 036f44ae74..c6d0bd8bde 100644 --- a/images/capi/ansible/roles/firstboot/tasks/main.yaml +++ b/images/capi/ansible/roles/firstboot/tasks/main.yaml @@ -13,8 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -- ansible.builtin.include_tasks: photon.yml +- name: Include Photon firstboot tasks + ansible.builtin.include_tasks: photon.yml when: ansible_os_family == "VMware Photon OS" -- ansible.builtin.include_tasks: qemu.yml +- name: Include QEMU firstboot tasks + ansible.builtin.include_tasks: qemu.yml when: packer_builder_type is search('qemu') diff --git a/images/capi/ansible/roles/firstboot/tasks/qemu.yml b/images/capi/ansible/roles/firstboot/tasks/qemu.yml index 0a166ec03e..93dce9472c 100644 --- a/images/capi/ansible/roles/firstboot/tasks/qemu.yml +++ b/images/capi/ansible/roles/firstboot/tasks/qemu.yml @@ -15,4 +15,5 @@ # no-op task just to have something for the role to do. Right now # all the work happens in the setup role -- ansible.builtin.meta: noop +- name: No-op for QEMU firstboot + ansible.builtin.meta: noop diff --git a/images/capi/ansible/roles/kubernetes/tasks/debian.yml b/images/capi/ansible/roles/kubernetes/tasks/debian.yml index 03e11d562e..9a2089466c 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/debian.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/debian.yml @@ -33,4 +33,4 @@ - kubelet={{ kubernetes_deb_version }} - kubeadm={{ kubernetes_deb_version }} - kubectl={{ kubernetes_deb_version }} - - kubernetes-cni{{ '='+kubernetes_cni_deb_version if kubernetes_cni_deb_version else '' }} + - kubernetes-cni{{ '=' + kubernetes_cni_deb_version if kubernetes_cni_deb_version else '' }} diff --git a/images/capi/ansible/roles/kubernetes/tasks/main.yml b/images/capi/ansible/roles/kubernetes/tasks/main.yml index bd14799d35..d510b3a28a 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/main.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/main.yml @@ -12,24 +12,30 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: debian.yml +- name: Import Debian Kubernetes tasks + ansible.builtin.import_tasks: debian.yml when: kubernetes_source_type == "pkg" and ansible_os_family == "Debian" -- ansible.builtin.import_tasks: azurelinux.yml +- name: Import Azure Linux Kubernetes tasks + ansible.builtin.import_tasks: azurelinux.yml when: kubernetes_source_type == "pkg" and ansible_os_family in ["Common Base Linux Mariner", "Microsoft Azure Linux"] -- ansible.builtin.import_tasks: redhat.yml +- name: Import RedHat Kubernetes tasks + ansible.builtin.import_tasks: redhat.yml when: kubernetes_source_type == "pkg" and ansible_os_family == "RedHat" -- ansible.builtin.import_tasks: photon.yml +- name: Import Photon Kubernetes tasks + ansible.builtin.import_tasks: photon.yml when: kubernetes_source_type == "pkg" and ansible_os_family == "VMware Photon OS" -- ansible.builtin.import_tasks: url.yml +- name: Import URL Kubernetes tasks + ansible.builtin.import_tasks: url.yml when: kubernetes_source_type == "http" and kubernetes_cni_source_type == "http" # must include crictl-url.yml after installing containerd, # as the cri-containerd tarball also includes crictl. -- ansible.builtin.import_tasks: crictl-url.yml +- name: Import crictl URL tasks + ansible.builtin.import_tasks: crictl-url.yml - name: Create kubelet default config file ansible.builtin.template: @@ -109,8 +115,10 @@ ansible.builtin.set_fact: ecr: '{{ kubernetes_container_registry is regex("^[0-9]{12}.dkr.ecr.[^.]+.amazonaws.com$") }}' -- ansible.builtin.import_tasks: kubeadmpull.yml +- name: Import kubeadm pull tasks + ansible.builtin.import_tasks: kubeadmpull.yml when: (kubernetes_source_type == "pkg" and not ecr) or ansible_os_family == "Flatcar" -- ansible.builtin.import_tasks: ecrpull.yml +- name: Import ECR pull tasks + ansible.builtin.import_tasks: ecrpull.yml when: kubernetes_source_type != "http" and ecr diff --git a/images/capi/ansible/roles/kubernetes/tasks/photon.yml b/images/capi/ansible/roles/kubernetes/tasks/photon.yml index 1fc3f2f956..81bc532a9f 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/photon.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/photon.yml @@ -25,4 +25,4 @@ kubelet-{{ kubernetes_rpm_version }} kubeadm-{{ kubernetes_rpm_version }} kubectl-{{ kubernetes_rpm_version }} - kubernetes-cni{{ '-'+kubernetes_cni_rpm_version if kubernetes_cni_rpm_version else '' }} + kubernetes-cni{{ '-' + kubernetes_cni_rpm_version if kubernetes_cni_rpm_version else '' }} diff --git a/images/capi/ansible/roles/kubernetes/tasks/redhat.yml b/images/capi/ansible/roles/kubernetes/tasks/redhat.yml index eceb9d4322..1264687951 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/redhat.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/redhat.yml @@ -33,4 +33,4 @@ - kubelet-{{ kubernetes_rpm_version }} - kubeadm-{{ kubernetes_rpm_version }} - kubectl-{{ kubernetes_rpm_version }} - - kubernetes-cni{{ '-'+kubernetes_cni_rpm_version if kubernetes_cni_rpm_version else '' }} + - kubernetes-cni{{ '-' + kubernetes_cni_rpm_version if kubernetes_cni_rpm_version else '' }} diff --git a/images/capi/ansible/roles/load_additional_components/tasks/main.yml b/images/capi/ansible/roles/load_additional_components/tasks/main.yml index 1298a4d305..9a140c8056 100644 --- a/images/capi/ansible/roles/load_additional_components/tasks/main.yml +++ b/images/capi/ansible/roles/load_additional_components/tasks/main.yml @@ -12,16 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: executables.yml +- name: Import additional executables tasks + ansible.builtin.import_tasks: executables.yml when: additional_executables | bool -- ansible.builtin.import_tasks: registry.yml +- name: Import additional registry images tasks + ansible.builtin.import_tasks: registry.yml when: additional_registry_images | bool -- ansible.builtin.import_tasks: url.yml +- name: Import additional URL images tasks + ansible.builtin.import_tasks: url.yml when: additional_url_images | bool # We have to use include_tasks for the S3 task due to ansible pre-processing the task when import_tasks is used. # This causes a failure when using any other additional_component. -- ansible.builtin.include_tasks: s3.yml +- name: Include additional S3 tasks + ansible.builtin.include_tasks: s3.yml when: additional_s3 | bool diff --git a/images/capi/ansible/roles/node/tasks/main.yml b/images/capi/ansible/roles/node/tasks/main.yml index 5a596aa9f5..0eea316cc3 100644 --- a/images/capi/ansible/roles/node/tasks/main.yml +++ b/images/capi/ansible/roles/node/tasks/main.yml @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: photon.yml +- name: Import Photon node tasks + ansible.builtin.import_tasks: photon.yml when: ansible_os_family == "VMware Photon OS" -- ansible.builtin.import_tasks: amazonLinux.yml +- name: Import Amazon Linux node tasks + ansible.builtin.import_tasks: amazonLinux.yml when: ansible_distribution == "Amazon" # This is required until https://github.com/ansible/ansible/issues/77537 is fixed and used. @@ -26,7 +28,8 @@ tags: - facts -- ansible.builtin.import_tasks: flatcar.yml +- name: Import Flatcar node tasks + ansible.builtin.import_tasks: flatcar.yml when: ansible_os_family == "Flatcar" - name: Ensure overlay module is present diff --git a/images/capi/ansible/roles/providers/tasks/aws.yml b/images/capi/ansible/roles/providers/tasks/aws.yml index 2acecf9cd1..5427ed5f58 100644 --- a/images/capi/ansible/roles/providers/tasks/aws.yml +++ b/images/capi/ansible/roles/providers/tasks/aws.yml @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.include_tasks: awscliv2.yml +- name: Include AWS CLI v2 tasks + ansible.builtin.include_tasks: awscliv2.yml when: ansible_distribution != "Amazon" # Remove after https://github.com/aws/amazon-ssm-agent/issues/235 is fixed. diff --git a/images/capi/ansible/roles/providers/tasks/azure.yml b/images/capi/ansible/roles/providers/tasks/azure.yml index 2ba716b833..88f1635772 100644 --- a/images/capi/ansible/roles/providers/tasks/azure.yml +++ b/images/capi/ansible/roles/providers/tasks/azure.yml @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: azurecli.yml +- name: Import Azure CLI tasks + ansible.builtin.import_tasks: azurecli.yml when: debug_tools | bool - name: Configure PTP diff --git a/images/capi/ansible/roles/providers/tasks/main.yml b/images/capi/ansible/roles/providers/tasks/main.yml index 681866b8e0..95ace6c5cf 100644 --- a/images/capi/ansible/roles/providers/tasks/main.yml +++ b/images/capi/ansible/roles/providers/tasks/main.yml @@ -12,52 +12,68 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.include_tasks: aws.yml +- name: Include AWS provider tasks + ansible.builtin.include_tasks: aws.yml when: packer_builder_type.startswith('amazon') -- ansible.builtin.include_tasks: azure.yml +- name: Include Azure provider tasks + ansible.builtin.include_tasks: azure.yml when: packer_builder_type.startswith('azure') -- ansible.builtin.include_tasks: outscale.yml +- name: Include Outscale provider tasks + ansible.builtin.include_tasks: outscale.yml when: packer_builder_type.startswith('outscale') -- ansible.builtin.include_tasks: vmware.yml +- name: Include VMware provider tasks + ansible.builtin.include_tasks: vmware.yml when: packer_builder_type is search('vmware') or packer_builder_type is search('vsphere') -- ansible.builtin.include_tasks: googlecompute.yml +- name: Include Google Compute provider tasks + ansible.builtin.include_tasks: googlecompute.yml when: packer_builder_type.startswith('googlecompute') -- ansible.builtin.include_tasks: openstack.yml +- name: Include OpenStack provider tasks + ansible.builtin.include_tasks: openstack.yml when: packer_builder_type.startswith('openstack') -- ansible.builtin.include_tasks: oci.yml +- name: Include OCI provider tasks + ansible.builtin.include_tasks: oci.yml when: packer_builder_type.startswith('oracle-oci') -- ansible.builtin.include_tasks: proxmox.yml +- name: Include Proxmox provider tasks + ansible.builtin.include_tasks: proxmox.yml when: packer_builder_type.startswith('proxmox') -- ansible.builtin.include_tasks: qemu.yml +- name: Include QEMU provider tasks + ansible.builtin.include_tasks: qemu.yml when: packer_builder_type is search('qemu') and build_target is not search('raw') -- ansible.builtin.include_tasks: cloudstack.yml +- name: Include CloudStack provider tasks + ansible.builtin.include_tasks: cloudstack.yml when: packer_builder_type is search('qemu') and provider is defined and provider is search('cloudstack') -- ansible.builtin.include_tasks: raw.yml +- name: Include raw provider tasks + ansible.builtin.include_tasks: raw.yml when: packer_builder_type is search('qemu') and build_target is search('raw') -- ansible.builtin.include_tasks: nutanix.yml +- name: Include Nutanix provider tasks + ansible.builtin.include_tasks: nutanix.yml when: packer_builder_type is search('nutanix') -- ansible.builtin.include_tasks: hcloud.yml +- name: Include Hetzner Cloud provider tasks + ansible.builtin.include_tasks: hcloud.yml when: packer_builder_type is search('hcloud') -- ansible.builtin.include_tasks: huaweicloud.yml +- name: Include Huawei Cloud provider tasks + ansible.builtin.include_tasks: huaweicloud.yml when: packer_builder_type.startswith('huaweicloud') -- ansible.builtin.include_tasks: scaleway.yml +- name: Include Scaleway provider tasks + ansible.builtin.include_tasks: scaleway.yml when: packer_builder_type.startswith('scaleway') -- ansible.builtin.include_tasks: maas.yml +- name: Include MAAS provider tasks + ansible.builtin.include_tasks: maas.yml when: packer_builder_type is search('qemu') and provider is defined and provider is search('maas') # Create a boot order configuration diff --git a/images/capi/ansible/roles/providers/tasks/nutanix.yml b/images/capi/ansible/roles/providers/tasks/nutanix.yml index 2c96ff84eb..a9f7edce32 100644 --- a/images/capi/ansible/roles/providers/tasks/nutanix.yml +++ b/images/capi/ansible/roles/providers/tasks/nutanix.yml @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.include_tasks: nutanix-redhat.yml +- name: Include Nutanix RedHat tasks + ansible.builtin.include_tasks: nutanix-redhat.yml when: ansible_os_family == "RedHat" -- ansible.builtin.include_tasks: nutanix-ubuntu.yml +- name: Include Nutanix Ubuntu tasks + ansible.builtin.include_tasks: nutanix-ubuntu.yml when: ansible_os_family == "Debian" - name: Ensure ip_vs module is loaded diff --git a/images/capi/ansible/roles/providers/tasks/vmware.yml b/images/capi/ansible/roles/providers/tasks/vmware.yml index 940b6f9272..40d08b9d61 100644 --- a/images/capi/ansible/roles/providers/tasks/vmware.yml +++ b/images/capi/ansible/roles/providers/tasks/vmware.yml @@ -12,13 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.include_tasks: vmware-photon.yml +- name: Include VMware Photon tasks + ansible.builtin.include_tasks: vmware-photon.yml when: ansible_os_family == "VMware Photon OS" -- ansible.builtin.include_tasks: vmware-ubuntu.yml +- name: Include VMware Ubuntu tasks + ansible.builtin.include_tasks: vmware-ubuntu.yml when: ansible_os_family == "Debian" -- ansible.builtin.include_tasks: vmware-redhat.yml +- name: Include VMware RedHat tasks + ansible.builtin.include_tasks: vmware-redhat.yml when: ansible_os_family == "RedHat" - name: Create provider vmtools config drop-in file diff --git a/images/capi/ansible/roles/python/tasks/main.yml b/images/capi/ansible/roles/python/tasks/main.yml index f106fe6aa5..973da587ea 100644 --- a/images/capi/ansible/roles/python/tasks/main.yml +++ b/images/capi/ansible/roles/python/tasks/main.yml @@ -16,7 +16,8 @@ ansible.builtin.raw: grep DISTRIB_ID /etc/lsb-release || echo '/etc/lsb-release not found' register: distrib_id -- ansible.builtin.include_tasks: flatcar.yml +- name: Include Flatcar Python tasks + ansible.builtin.include_tasks: flatcar.yml # We can't use ansible_os_family fact here for consistency, as facts gathering # is disabled in the playbook which includes this role. See playbook for more details. when: distrib_id.stdout_lines[0] is search("Flatcar") diff --git a/images/capi/ansible/roles/security/tasks/trivy.yml b/images/capi/ansible/roles/security/tasks/trivy.yml index 58408ae716..72a22ba810 100644 --- a/images/capi/ansible/roles/security/tasks/trivy.yml +++ b/images/capi/ansible/roles/security/tasks/trivy.yml @@ -24,7 +24,7 @@ - name: Add Trivy apt repo ansible.builtin.apt_repository: - repo: "deb https://aquasecurity.github.io/trivy-repo/deb {{ansible_distribution_release}} main" + repo: "deb https://aquasecurity.github.io/trivy-repo/deb {{ ansible_distribution_release }} main" state: present filename: trivy diff --git a/images/capi/ansible/roles/setup/tasks/azurelinux.yml b/images/capi/ansible/roles/setup/tasks/azurelinux.yml index 0ed1f5c7a4..9704921f55 100644 --- a/images/capi/ansible/roles/setup/tasks/azurelinux.yml +++ b/images/capi/ansible/roles/setup/tasks/azurelinux.yml @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: rpm_repos.yml +- name: Import RPM repository tasks + ansible.builtin.import_tasks: rpm_repos.yml - name: Perform a tdnf update ansible.builtin.dnf: diff --git a/images/capi/ansible/roles/setup/tasks/flatcar.yml b/images/capi/ansible/roles/setup/tasks/flatcar.yml index 5db4e8504b..019783590e 100644 --- a/images/capi/ansible/roles/setup/tasks/flatcar.yml +++ b/images/capi/ansible/roles/setup/tasks/flatcar.yml @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.include_tasks: bootstrap-flatcar.yml +- name: Include Flatcar bootstrap tasks + ansible.builtin.include_tasks: bootstrap-flatcar.yml - name: Create system-environment-generators directory ansible.builtin.file: diff --git a/images/capi/ansible/roles/setup/tasks/main.yml b/images/capi/ansible/roles/setup/tasks/main.yml index e4174217c1..f73013f77f 100644 --- a/images/capi/ansible/roles/setup/tasks/main.yml +++ b/images/capi/ansible/roles/setup/tasks/main.yml @@ -12,22 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: debian.yml +- name: Import Debian setup tasks + ansible.builtin.import_tasks: debian.yml when: ansible_os_family == "Debian" -- ansible.builtin.import_tasks: flatcar.yml +- name: Import Flatcar setup tasks + ansible.builtin.import_tasks: flatcar.yml # This task overrides ansible_os_family to "Flatcar" as a workaround for # regression between Flatcar and Ansible, so rest of the code can use just # "Flatcar" for comparison, which is the correct value. when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] -- ansible.builtin.import_tasks: azurelinux.yml +- name: Import Azure Linux setup tasks + ansible.builtin.import_tasks: azurelinux.yml when: ansible_os_family in ["Common Base Linux Mariner", "Microsoft Azure Linux"] -- ansible.builtin.import_tasks: redhat.yml +- name: Import RedHat setup tasks + ansible.builtin.import_tasks: redhat.yml when: ansible_os_family == "RedHat" -- ansible.builtin.import_tasks: photon.yml +- name: Import Photon setup tasks + ansible.builtin.import_tasks: photon.yml when: ansible_os_family == "VMware Photon OS" # Copy in pip config file when defined diff --git a/images/capi/ansible/roles/setup/tasks/photon.yml b/images/capi/ansible/roles/setup/tasks/photon.yml index 6af05298d5..095b01cc2b 100644 --- a/images/capi/ansible/roles/setup/tasks/photon.yml +++ b/images/capi/ansible/roles/setup/tasks/photon.yml @@ -27,7 +27,8 @@ owner: builder group: builder -- ansible.builtin.import_tasks: rpm_repos.yml +- name: Import RPM repository tasks + ansible.builtin.import_tasks: rpm_repos.yml - name: Update the repos package to import the recent gpg keys ansible.builtin.command: tdnf update -y photon-repos --enablerepo=photon --refresh diff --git a/images/capi/ansible/roles/setup/tasks/redhat.yml b/images/capi/ansible/roles/setup/tasks/redhat.yml index 0aa18637c7..d8c21e7342 100644 --- a/images/capi/ansible/roles/setup/tasks/redhat.yml +++ b/images/capi/ansible/roles/setup/tasks/redhat.yml @@ -51,7 +51,8 @@ lock_timeout: 60 when: redhat_epel_rpm != "" and not packer_builder_type.startswith('amazon') and not packer_builder_type.startswith('scaleway') -- ansible.builtin.import_tasks: rpm_repos.yml +- name: Import RPM repository tasks + ansible.builtin.import_tasks: rpm_repos.yml - name: Perform a yum update ansible.builtin.dnf: diff --git a/images/capi/ansible/roles/sysprep/tasks/azurelinux.yml b/images/capi/ansible/roles/sysprep/tasks/azurelinux.yml index 251dd74a89..2a40f28f03 100644 --- a/images/capi/ansible/roles/sysprep/tasks/azurelinux.yml +++ b/images/capi/ansible/roles/sysprep/tasks/azurelinux.yml @@ -24,7 +24,8 @@ ansible.builtin.set_fact: package_list: "{{ ansible_facts.packages.keys() | join(' ') }}" -- ansible.builtin.import_tasks: rpm_repos.yml +- name: Import RPM repository tasks + ansible.builtin.import_tasks: rpm_repos.yml - name: Ensure nftables config ends with a newline ansible.builtin.shell: /bin/echo "" >> /etc/sysconfig/nftables.conf diff --git a/images/capi/ansible/roles/sysprep/tasks/main.yml b/images/capi/ansible/roles/sysprep/tasks/main.yml index 8782e98775..db3e46a97e 100644 --- a/images/capi/ansible/roles/sysprep/tasks/main.yml +++ b/images/capi/ansible/roles/sysprep/tasks/main.yml @@ -12,19 +12,24 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: debian.yml +- name: Import Debian sysprep tasks + ansible.builtin.import_tasks: debian.yml when: ansible_os_family == "Debian" -- ansible.builtin.import_tasks: flatcar.yml +- name: Import Flatcar sysprep tasks + ansible.builtin.import_tasks: flatcar.yml when: ansible_os_family == "Flatcar" -- ansible.builtin.import_tasks: redhat.yml +- name: Import RedHat sysprep tasks + ansible.builtin.import_tasks: redhat.yml when: ansible_os_family == "RedHat" -- ansible.builtin.import_tasks: azurelinux.yml +- name: Import Azure Linux sysprep tasks + ansible.builtin.import_tasks: azurelinux.yml when: ansible_os_family in ["Common Base Linux Mariner", "Microsoft Azure Linux"] -- ansible.builtin.import_tasks: photon.yml +- name: Import Photon sysprep tasks + ansible.builtin.import_tasks: photon.yml when: ansible_os_family == "VMware Photon OS" - name: Remove containerd http proxy conf file if needed diff --git a/images/capi/ansible/roles/sysprep/tasks/photon.yml b/images/capi/ansible/roles/sysprep/tasks/photon.yml index 41f77836e1..fba507dda5 100644 --- a/images/capi/ansible/roles/sysprep/tasks/photon.yml +++ b/images/capi/ansible/roles/sysprep/tasks/photon.yml @@ -41,7 +41,8 @@ regexp: ^excludepkgs= line: excludepkgs={{ package_list }} -- ansible.builtin.import_tasks: rpm_repos.yml +- name: Import RPM repository tasks + ansible.builtin.import_tasks: rpm_repos.yml - name: Update the repos package to import the recent gpg keys ansible.builtin.command: tdnf update -y photon-repos --enablerepo=photon --refresh diff --git a/images/capi/ansible/roles/sysprep/tasks/redhat.yml b/images/capi/ansible/roles/sysprep/tasks/redhat.yml index 449fb13f2a..fcef2a175a 100644 --- a/images/capi/ansible/roles/sysprep/tasks/redhat.yml +++ b/images/capi/ansible/roles/sysprep/tasks/redhat.yml @@ -38,7 +38,8 @@ line: excludepkgs={{ package_list }} when: ansible_distribution == "Amazon" and ansible_distribution_version == "2023" -- ansible.builtin.import_tasks: rpm_repos.yml +- name: Import RPM repository tasks + ansible.builtin.import_tasks: rpm_repos.yml - name: Remove RHEL subscription diff --git a/images/capi/ansible/windows/roles/gmsa/tasks/main.yml b/images/capi/ansible/windows/roles/gmsa/tasks/main.yml index 09fed92655..b07aea40fd 100644 --- a/images/capi/ansible/windows/roles/gmsa/tasks/main.yml +++ b/images/capi/ansible/windows/roles/gmsa/tasks/main.yml @@ -12,5 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: gmsa_keyvault.yml +- name: Import gMSA KeyVault tasks + ansible.builtin.import_tasks: gmsa_keyvault.yml when: gmsa_keyvault | bool diff --git a/images/capi/ansible/windows/roles/kubernetes/tasks/kubelet.yml b/images/capi/ansible/windows/roles/kubernetes/tasks/kubelet.yml index bbdbb795a6..fac13a7d5b 100644 --- a/images/capi/ansible/windows/roles/kubernetes/tasks/kubelet.yml +++ b/images/capi/ansible/windows/roles/kubernetes/tasks/kubelet.yml @@ -28,10 +28,12 @@ ansible.windows.win_shell: New-Item -path $env:SystemDrive\var\lib\kubelet\etc\kubernetes\pki -type SymbolicLink -value $env:SystemDrive\etc\kubernetes\pki\ -Force when: kubernetes_semver is version('v1.23.0', '<') -- ansible.builtin.import_tasks: nssm.yml +- name: Import NSSM kubelet tasks + ansible.builtin.import_tasks: nssm.yml when: windows_service_manager == "nssm" -- ansible.builtin.import_tasks: sc.yml +- name: Import Windows service kubelet tasks + ansible.builtin.import_tasks: sc.yml when: windows_service_manager == "windows_service" # Dependency selection: https://www.reddit.com/r/ansible/comments/imfdgn/setting_a_variable_conditionally/g41anaf/?utm_source=reddit&utm_medium=web2x&context=3 diff --git a/images/capi/ansible/windows/roles/kubernetes/tasks/main.yml b/images/capi/ansible/windows/roles/kubernetes/tasks/main.yml index 09ae48791f..4084d1ec75 100644 --- a/images/capi/ansible/windows/roles/kubernetes/tasks/main.yml +++ b/images/capi/ansible/windows/roles/kubernetes/tasks/main.yml @@ -17,7 +17,8 @@ path: "{{ kubernetes_install_path }}" state: directory -- ansible.builtin.import_tasks: url.yml +- name: Import Kubernetes URL tasks + ansible.builtin.import_tasks: url.yml - name: Add kubernetes folder to path ansible.windows.win_path: @@ -25,4 +26,5 @@ - "{{ kubernetes_install_path }}" scope: Machine -- ansible.builtin.import_tasks: kubelet.yml +- name: Import kubelet tasks + ansible.builtin.import_tasks: kubelet.yml diff --git a/images/capi/ansible/windows/roles/load_additional_components/tasks/main.yml b/images/capi/ansible/windows/roles/load_additional_components/tasks/main.yml index 639c8cfde8..04cf2fa236 100644 --- a/images/capi/ansible/windows/roles/load_additional_components/tasks/main.yml +++ b/images/capi/ansible/windows/roles/load_additional_components/tasks/main.yml @@ -12,11 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: executables.yml +- name: Import additional executables tasks + ansible.builtin.import_tasks: executables.yml when: additional_executables | bool -- ansible.builtin.import_tasks: registry.yml +- name: Import additional registry images tasks + ansible.builtin.import_tasks: registry.yml when: additional_registry_images | bool -- ansible.builtin.import_tasks: url.yml +- name: Import additional URL images tasks + ansible.builtin.import_tasks: url.yml when: additional_url_images | bool diff --git a/images/capi/ansible/windows/roles/providers/tasks/main.yml b/images/capi/ansible/windows/roles/providers/tasks/main.yml index f59b4b326c..0740b232f0 100644 --- a/images/capi/ansible/windows/roles/providers/tasks/main.yml +++ b/images/capi/ansible/windows/roles/providers/tasks/main.yml @@ -9,8 +9,10 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.include_tasks: azure.yml +- name: Include Azure provider tasks + ansible.builtin.include_tasks: azure.yml when: packer_builder_type.startswith('azure') -- ansible.builtin.include_tasks: vmware.yml +- name: Include VMware provider tasks + ansible.builtin.include_tasks: vmware.yml when: packer_builder_type is search('vmware') or packer_builder_type is search('vsphere') diff --git a/images/capi/ansible/windows/roles/runtimes/tasks/main.yml b/images/capi/ansible/windows/roles/runtimes/tasks/main.yml index 80eececa8f..891ff49dde 100644 --- a/images/capi/ansible/windows/roles/runtimes/tasks/main.yml +++ b/images/capi/ansible/windows/roles/runtimes/tasks/main.yml @@ -12,5 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- ansible.builtin.import_tasks: containerd.yml +- name: Import containerd runtime tasks + ansible.builtin.import_tasks: containerd.yml when: runtime == "containerd" diff --git a/images/capi/ansible/windows/roles/systemprep/tasks/main.yml b/images/capi/ansible/windows/roles/systemprep/tasks/main.yml index 0f1b8d924a..6118096284 100644 --- a/images/capi/ansible/windows/roles/systemprep/tasks/main.yml +++ b/images/capi/ansible/windows/roles/systemprep/tasks/main.yml @@ -97,10 +97,12 @@ reboot: true when: windows_updates_category_names|length > 0 -- ansible.builtin.import_tasks: ssh-feature.yml +- name: Import SSH feature tasks + ansible.builtin.import_tasks: ssh-feature.yml when: ssh_source_url == "" -- ansible.builtin.import_tasks: ssh-archive.yml +- name: Import SSH archive tasks + ansible.builtin.import_tasks: ssh-archive.yml when: ssh_source_url != "" - name: Set default SSH shell to Powershell From 4172d7df5838c89bed48d680fde2da2c997e0de8 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Thu, 16 Apr 2026 07:47:05 -0600 Subject: [PATCH 81/90] Fix more ansible lint violations (#1980) * Fix command-instead-of-shell ansible-lint violations * Fix package-latest ansible-lint violations * Fix command-instead-of-module ansible-lint violations in debian.yml Replace sed commands with ansible.builtin.replace module for nullboot post-install script modifications. The remaining command-instead-of-module violations cannot be safely converted to Ansible modules (yum clean all has no module equivalent, and sed with shell globs requires structural changes). * Fix name[template] ansible-lint violation in crictl-url.yml Move Jinja template to end of task name to satisfy the name[template] rule. * Fix schema[tasks] ansible-lint violations for become_method Use fully qualified become_method: ansible.builtin.runas instead of the short form become_method: runas. --- images/capi/.ansible-lint-ignore | 19 ------------------- .../roles/kubernetes/tasks/crictl-url.yml | 2 +- .../roles/kubernetes/tasks/ecrpull.yml | 2 +- .../roles/kubernetes/tasks/kubeadmpull.yml | 2 +- .../ansible/roles/kubernetes/tasks/url.yml | 6 ++++-- .../tasks/registry.yml | 4 +++- .../load_additional_components/tasks/url.yml | 4 +++- .../ansible/roles/providers/tasks/aws.yml | 2 +- .../roles/providers/tasks/awscliv2.yml | 2 +- .../roles/providers/tasks/cloudstack.yml | 2 +- .../roles/providers/tasks/googlecompute.yml | 2 +- .../ansible/roles/providers/tasks/raw.yml | 2 +- .../roles/providers/tasks/vmware-redhat.yml | 2 +- .../ansible/roles/setup/tasks/azurelinux.yml | 2 +- .../capi/ansible/roles/setup/tasks/debian.yml | 14 ++++++++++---- .../capi/ansible/roles/setup/tasks/redhat.yml | 2 +- .../roles/cloudbase-init/tasks/main.yml | 2 +- .../windows/roles/providers/tasks/azure.yml | 2 +- .../roles/systemprep/tasks/ssh-feature.yml | 2 +- 19 files changed, 34 insertions(+), 41 deletions(-) diff --git a/images/capi/.ansible-lint-ignore b/images/capi/.ansible-lint-ignore index 042e67a7c8..aeff3f0a5b 100644 --- a/images/capi/.ansible-lint-ignore +++ b/images/capi/.ansible-lint-ignore @@ -7,49 +7,33 @@ ansible/roles/gpu/tasks/amd.yml no-changed-when ansible/roles/gpu/tasks/nvidia.yml no-changed-when ansible/roles/kubernetes/defaults/main.yml var-naming[no-role-prefix] ansible/roles/kubernetes/defaults/main.yml yaml[line-length] -ansible/roles/kubernetes/tasks/crictl-url.yml name[template] -ansible/roles/kubernetes/tasks/ecrpull.yml command-instead-of-shell ansible/roles/kubernetes/tasks/ecrpull.yml no-changed-when -ansible/roles/kubernetes/tasks/kubeadmpull.yml command-instead-of-shell ansible/roles/kubernetes/tasks/kubeadmpull.yml no-changed-when ansible/roles/kubernetes/tasks/photon.yml no-changed-when -ansible/roles/kubernetes/tasks/url.yml command-instead-of-shell ansible/roles/kubernetes/tasks/url.yml no-changed-when ansible/roles/load_additional_components/defaults/main.yml var-naming[no-role-prefix] -ansible/roles/load_additional_components/tasks/registry.yml command-instead-of-shell ansible/roles/load_additional_components/tasks/registry.yml no-changed-when -ansible/roles/load_additional_components/tasks/url.yml command-instead-of-shell ansible/roles/load_additional_components/tasks/url.yml no-changed-when ansible/roles/load_additional_components/tasks/url.yml yaml[line-length] ansible/roles/node/defaults/main.yml var-naming[no-role-prefix] ansible/roles/node/tasks/main.yml command-instead-of-module ansible/roles/node/tasks/main.yml no-changed-when ansible/roles/providers/defaults/main.yml var-naming[no-role-prefix] -ansible/roles/providers/tasks/aws.yml command-instead-of-shell ansible/roles/providers/tasks/aws.yml no-changed-when ansible/roles/providers/tasks/awscliv2.yml no-changed-when -ansible/roles/providers/tasks/awscliv2.yml package-latest -ansible/roles/providers/tasks/cloudstack.yml command-instead-of-shell ansible/roles/providers/tasks/cloudstack.yml no-changed-when -ansible/roles/providers/tasks/googlecompute.yml command-instead-of-shell ansible/roles/providers/tasks/googlecompute.yml no-changed-when -ansible/roles/providers/tasks/raw.yml command-instead-of-shell ansible/roles/providers/tasks/raw.yml no-changed-when ansible/roles/providers/tasks/vmware-photon.yml no-changed-when -ansible/roles/providers/tasks/vmware-redhat.yml command-instead-of-shell ansible/roles/providers/tasks/vmware-redhat.yml no-changed-when ansible/roles/python/defaults/main.yml var-naming[no-role-prefix] ansible/roles/python/tasks/flatcar.yml no-changed-when ansible/roles/python/tasks/main.yml no-changed-when ansible/roles/setup/defaults/main.yml var-naming[no-role-prefix] -ansible/roles/setup/tasks/azurelinux.yml package-latest -ansible/roles/setup/tasks/debian.yml command-instead-of-module ansible/roles/setup/tasks/debian.yml no-changed-when -ansible/roles/setup/tasks/debian.yml package-latest ansible/roles/setup/tasks/photon.yml no-changed-when ansible/roles/setup/tasks/redhat.yml command-instead-of-module ansible/roles/setup/tasks/redhat.yml no-changed-when -ansible/roles/setup/tasks/redhat.yml package-latest ansible/roles/setup/tasks/rpm_repos.yml no-changed-when ansible/roles/sysprep/defaults/main.yml var-naming[no-role-prefix] ansible/roles/sysprep/tasks/debian.yml no-changed-when @@ -60,15 +44,12 @@ ansible/roles/sysprep/tasks/redhat.yml command-instead-of-module ansible/roles/sysprep/tasks/redhat.yml no-changed-when ansible/roles/sysprep/tasks/rpm_repos.yml no-changed-when ansible/windows/example.vars.yml yaml[line-length] -ansible/windows/roles/cloudbase-init/tasks/main.yml schema[tasks] ansible/windows/roles/debug/defaults/main.yml var-naming[no-role-prefix] ansible/windows/roles/kubernetes/tasks/kubelet.yml yaml[line-length] ansible/windows/roles/kubernetes/tasks/sc.yml yaml[line-length] ansible/windows/roles/load_additional_components/defaults/main.yml var-naming[no-role-prefix] ansible/windows/roles/providers/defaults/main.yml var-naming[no-role-prefix] -ansible/windows/roles/providers/tasks/azure.yml schema[tasks] ansible/windows/roles/providers/tasks/azure.yml yaml[line-length] ansible/windows/roles/runtimes/defaults/main.yml var-naming[no-role-prefix] ansible/windows/roles/systemprep/defaults/main.yml var-naming[no-role-prefix] ansible/windows/roles/systemprep/tasks/main.yml ignore-errors -ansible/windows/roles/systemprep/tasks/ssh-feature.yml schema[tasks] diff --git a/images/capi/ansible/roles/kubernetes/tasks/crictl-url.yml b/images/capi/ansible/roles/kubernetes/tasks/crictl-url.yml index a7553da27b..58e1066f62 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/crictl-url.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/crictl-url.yml @@ -19,7 +19,7 @@ dest: /tmp/{{ crictl_filename }} mode: "0600" -- name: Create "{{ sysusrlocal_prefix }}/bin" directory +- name: Create crictl bin directory ansible.builtin.file: state: directory path: "{{ sysusrlocal_prefix }}/bin" diff --git a/images/capi/ansible/roles/kubernetes/tasks/ecrpull.yml b/images/capi/ansible/roles/kubernetes/tasks/ecrpull.yml index 6c601ba7f4..b17f1460d9 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/ecrpull.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/ecrpull.yml @@ -7,7 +7,7 @@ mode: "0600" - name: Get images list - ansible.builtin.shell: kubeadm config images list --config /etc/kubeadm.yml + ansible.builtin.command: kubeadm config images list --config /etc/kubeadm.yml register: images_list - name: Log into ECR diff --git a/images/capi/ansible/roles/kubernetes/tasks/kubeadmpull.yml b/images/capi/ansible/roles/kubernetes/tasks/kubeadmpull.yml index 2986de9478..af7d370df8 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/kubeadmpull.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/kubeadmpull.yml @@ -6,7 +6,7 @@ mode: "0600" - name: Kubeadm pull images - ansible.builtin.shell: kubeadm config images pull --config /etc/kubeadm.yml --cri-socket {{ containerd_cri_socket }} + ansible.builtin.command: kubeadm config images pull --config /etc/kubeadm.yml --cri-socket {{ containerd_cri_socket }} - name: Delete kubeadm config ansible.builtin.file: diff --git a/images/capi/ansible/roles/kubernetes/tasks/url.yml b/images/capi/ansible/roles/kubernetes/tasks/url.yml index c8407863ab..0e8185f3d9 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/url.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/url.yml @@ -68,7 +68,7 @@ - name: Modify Kubernetes images # Strip the arch from the name of the image to prevent image from being pulled again by kubeadm - ansible.builtin.shell: /tmp/modify-k8s-img.sh {{ item }} + ansible.builtin.command: /tmp/modify-k8s-img.sh {{ item }} loop: "{{ kubernetes_imgs }}" - name: Remove Kubernetes image modification script @@ -77,7 +77,9 @@ path: /tmp/modify-k8s-img.sh - name: Load Kubernetes images - ansible.builtin.shell: CONTAINERD_NAMESPACE="k8s.io" {{ sysusr_prefix }}/bin/ctr --address={{ containerd_cri_socket }} images import /tmp/{{ item }} + ansible.builtin.command: "{{ sysusr_prefix }}/bin/ctr --address={{ containerd_cri_socket }} images import /tmp/{{ item }}" + environment: + CONTAINERD_NAMESPACE: k8s.io loop: "{{ kubernetes_imgs }}" - name: Remove Kubernetes images diff --git a/images/capi/ansible/roles/load_additional_components/tasks/registry.yml b/images/capi/ansible/roles/load_additional_components/tasks/registry.yml index 10651158e0..8f7e675e19 100644 --- a/images/capi/ansible/roles/load_additional_components/tasks/registry.yml +++ b/images/capi/ansible/roles/load_additional_components/tasks/registry.yml @@ -13,7 +13,9 @@ # limitations under the License. --- - name: Pull additional images from registry - ansible.builtin.shell: CONTAINERD_NAMESPACE="k8s.io" {{ sysusr_prefix }}/bin/ctr --address={{ containerd_cri_socket }} images pull {{ item }} + ansible.builtin.command: "{{ sysusr_prefix }}/bin/ctr --address={{ containerd_cri_socket }} images pull {{ item }}" + environment: + CONTAINERD_NAMESPACE: k8s.io loop: "{{ additional_registry_images_list.split(',') }}" retries: 5 delay: 3 diff --git a/images/capi/ansible/roles/load_additional_components/tasks/url.yml b/images/capi/ansible/roles/load_additional_components/tasks/url.yml index 12ae550189..82f8473237 100644 --- a/images/capi/ansible/roles/load_additional_components/tasks/url.yml +++ b/images/capi/ansible/roles/load_additional_components/tasks/url.yml @@ -29,7 +29,9 @@ delay: 3 - name: Load additional images - ansible.builtin.shell: CONTAINERD_NAMESPACE="k8s.io" {{ sysusr_prefix }}/bin/ctr --address={{ containerd_cri_socket }} images import --no-unpack {{ item.dest }} + ansible.builtin.command: "{{ sysusr_prefix }}/bin/ctr --address={{ containerd_cri_socket }} images import --no-unpack {{ item.dest }}" + environment: + CONTAINERD_NAMESPACE: k8s.io loop: "{{ images.results }}" - name: Remove downloaded files diff --git a/images/capi/ansible/roles/providers/tasks/aws.yml b/images/capi/ansible/roles/providers/tasks/aws.yml index 5427ed5f58..2474d01ca8 100644 --- a/images/capi/ansible/roles/providers/tasks/aws.yml +++ b/images/capi/ansible/roles/providers/tasks/aws.yml @@ -36,7 +36,7 @@ when: ansible_os_family == "RedHat" - name: Install aws agents Ubuntu - ansible.builtin.shell: snap install amazon-ssm-agent --classic + ansible.builtin.command: snap install amazon-ssm-agent --classic when: ansible_distribution == "Ubuntu" - name: Ensure ssm agent is running Ubuntu diff --git a/images/capi/ansible/roles/providers/tasks/awscliv2.yml b/images/capi/ansible/roles/providers/tasks/awscliv2.yml index 2d79ee0197..66031097d0 100644 --- a/images/capi/ansible/roles/providers/tasks/awscliv2.yml +++ b/images/capi/ansible/roles/providers/tasks/awscliv2.yml @@ -1,5 +1,5 @@ --- -- name: Upgrade pip to latest +- name: Upgrade pip to latest # noqa: package-latest ansible.builtin.pip: name: pip executable: pip3 diff --git a/images/capi/ansible/roles/providers/tasks/cloudstack.yml b/images/capi/ansible/roles/providers/tasks/cloudstack.yml index 3a427ee500..88adaa204b 100644 --- a/images/capi/ansible/roles/providers/tasks/cloudstack.yml +++ b/images/capi/ansible/roles/providers/tasks/cloudstack.yml @@ -26,7 +26,7 @@ mode: "0644" - name: Run dracut cmd to regenerate initramfs with all drivers - needed when converting to different hypervisor templates - ansible.builtin.shell: dracut --force --no-hostonly + ansible.builtin.command: dracut --force --no-hostonly when: ansible_os_family == "RedHat" - name: Add draut cmd to regenerate initramfs with only necessary drivers on first boot diff --git a/images/capi/ansible/roles/providers/tasks/googlecompute.yml b/images/capi/ansible/roles/providers/tasks/googlecompute.yml index ec58b9b69b..1e7c5ed5a9 100644 --- a/images/capi/ansible/roles/providers/tasks/googlecompute.yml +++ b/images/capi/ansible/roles/providers/tasks/googlecompute.yml @@ -21,7 +21,7 @@ dest: /tmp/install-gcloud.sh mode: "0700" - name: Execute install-gcloud.sh - ansible.builtin.shell: bash -o errexit -o pipefail /tmp/install-gcloud.sh --disable-prompts --install-dir=/ + ansible.builtin.command: bash -o errexit -o pipefail /tmp/install-gcloud.sh --disable-prompts --install-dir=/ - name: Remove install-gcloud.sh ansible.builtin.file: path: /tmp/install-gcloud.sh diff --git a/images/capi/ansible/roles/providers/tasks/raw.yml b/images/capi/ansible/roles/providers/tasks/raw.yml index 2bcd880640..26094b3346 100644 --- a/images/capi/ansible/roles/providers/tasks/raw.yml +++ b/images/capi/ansible/roles/providers/tasks/raw.yml @@ -36,7 +36,7 @@ when: ansible_os_family == "RedHat" - name: Run dracut cmd to regenerate initramfs with all drivers - needed when converting to different hypervisor templates - ansible.builtin.shell: dracut --force --no-hostonly + ansible.builtin.command: dracut --force --no-hostonly when: ansible_os_family == "RedHat" - name: Symlink /usr/libexec/cloud-init to /usr/lib/cloud-init diff --git a/images/capi/ansible/roles/providers/tasks/vmware-redhat.yml b/images/capi/ansible/roles/providers/tasks/vmware-redhat.yml index 459a24e4d3..82146ddb72 100644 --- a/images/capi/ansible/roles/providers/tasks/vmware-redhat.yml +++ b/images/capi/ansible/roles/providers/tasks/vmware-redhat.yml @@ -59,7 +59,7 @@ mode: "0700" - name: Execute cloud-init-vmware.sh - ansible.builtin.shell: bash -o errexit -o pipefail /tmp/cloud-init-vmware.sh + ansible.builtin.command: bash -o errexit -o pipefail /tmp/cloud-init-vmware.sh environment: REPO_SLUG: "{{ guestinfo_datasource_slug }}" GIT_REF: "{{ guestinfo_datasource_ref }}" diff --git a/images/capi/ansible/roles/setup/tasks/azurelinux.yml b/images/capi/ansible/roles/setup/tasks/azurelinux.yml index 9704921f55..7b956f1304 100644 --- a/images/capi/ansible/roles/setup/tasks/azurelinux.yml +++ b/images/capi/ansible/roles/setup/tasks/azurelinux.yml @@ -15,7 +15,7 @@ - name: Import RPM repository tasks ansible.builtin.import_tasks: rpm_repos.yml -- name: Perform a tdnf update +- name: Perform a tdnf update # noqa: package-latest ansible.builtin.dnf: name: "*" state: latest diff --git a/images/capi/ansible/roles/setup/tasks/debian.yml b/images/capi/ansible/roles/setup/tasks/debian.yml index 41e0fd631e..5c07585c5e 100644 --- a/images/capi/ansible/roles/setup/tasks/debian.yml +++ b/images/capi/ansible/roles/setup/tasks/debian.yml @@ -80,7 +80,10 @@ when: packer_build_name is search('cvm') - name: Add '--no-tpm --no-efivars' to nullboot post install script - ansible.builtin.command: sed -i 's/nullbootctl/nullbootctl --no-tpm --no-efivars/' /var/lib/dpkg/info/nullboot.postinst + ansible.builtin.replace: + path: /var/lib/dpkg/info/nullboot.postinst + regexp: nullbootctl + replace: nullbootctl --no-tpm --no-efivars when: packer_build_name is search('cvm') - name: Perform a dist-upgrade @@ -93,7 +96,7 @@ retries: 5 delay: 10 -- name: Install baseline dependencies +- name: Install baseline dependencies # noqa: package-latest ansible.builtin.apt: force_apt_get: true update_cache: true @@ -104,7 +107,7 @@ retries: 5 delay: 10 -- name: Install extra debs +- name: Install extra debs # noqa: package-latest ansible.builtin.apt: force_apt_get: true name: "{{ extra_debs.split() }}" @@ -126,5 +129,8 @@ delay: 10 - name: Remove '--no-tpm --no-efivars' from nullboot post install script - ansible.builtin.command: sed -i 's/nullbootctl --no-tpm --no-efivars/nullbootctl/' /var/lib/dpkg/info/nullboot.postinst + ansible.builtin.replace: + path: /var/lib/dpkg/info/nullboot.postinst + regexp: nullbootctl --no-tpm --no-efivars + replace: nullbootctl when: packer_build_name is search('cvm') diff --git a/images/capi/ansible/roles/setup/tasks/redhat.yml b/images/capi/ansible/roles/setup/tasks/redhat.yml index d8c21e7342..0ceb1599cc 100644 --- a/images/capi/ansible/roles/setup/tasks/redhat.yml +++ b/images/capi/ansible/roles/setup/tasks/redhat.yml @@ -54,7 +54,7 @@ - name: Import RPM repository tasks ansible.builtin.import_tasks: rpm_repos.yml -- name: Perform a yum update +- name: Perform a yum update # noqa: package-latest ansible.builtin.dnf: name: "*" state: latest diff --git a/images/capi/ansible/windows/roles/cloudbase-init/tasks/main.yml b/images/capi/ansible/windows/roles/cloudbase-init/tasks/main.yml index 93893e4616..45674a2458 100644 --- a/images/capi/ansible/windows/roles/cloudbase-init/tasks/main.yml +++ b/images/capi/ansible/windows/roles/cloudbase-init/tasks/main.yml @@ -49,5 +49,5 @@ Remove-Item -Force {{ systemdrive.stdout | trim }}\Windows\Setup\Scripts\SetupComplete.cmd & "{{ programfiles.stdout | trim }}\Cloudbase Solutions\Cloudbase-Init\bin\SetSetupComplete.cmd" become: true - become_method: runas + become_method: ansible.builtin.runas become_user: System diff --git a/images/capi/ansible/windows/roles/providers/tasks/azure.yml b/images/capi/ansible/windows/roles/providers/tasks/azure.yml index c67063500c..0bd0ae7b09 100644 --- a/images/capi/ansible/windows/roles/providers/tasks/azure.yml +++ b/images/capi/ansible/windows/roles/providers/tasks/azure.yml @@ -30,7 +30,7 @@ $r = New-NetFirewallRule -DisplayName 'Block-Outbound-168.63.129.16-port-80-for-cve-2021-27075' -Direction Outbound -RemoteAddress '168.63.129.16' -RemotePort '80' -Protocol TCP -Action Block $r | Get-NetFirewallSecurityFilter | Set-NetFirewallSecurityFilter -LocalUser "O:LSD:(D;;CC;;;S-1-5-18)(D;;CC;;;$($wsg.SID.Value))(A;;CC;;;S-1-1-0)" become: true - become_method: runas + become_method: ansible.builtin.runas become_user: SYSTEM - name: Add users to WireServerAccessGroup diff --git a/images/capi/ansible/windows/roles/systemprep/tasks/ssh-feature.yml b/images/capi/ansible/windows/roles/systemprep/tasks/ssh-feature.yml index 5c841ac096..5bb543345f 100644 --- a/images/capi/ansible/windows/roles/systemprep/tasks/ssh-feature.yml +++ b/images/capi/ansible/windows/roles/systemprep/tasks/ssh-feature.yml @@ -18,5 +18,5 @@ - name: Install OpenSSH ansible.windows.win_shell: Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0 become: true - become_method: runas + become_method: ansible.builtin.runas become_user: SYSTEM From 33c517c8fa547a6fd5c2e4309e02554d1d60ee6b Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Mon, 20 Apr 2026 11:25:40 -0600 Subject: [PATCH 82/90] Add westus2 to default replicated regions for Azure SIG images --- .github/workflows/build-azure-sig.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-azure-sig.yaml b/.github/workflows/build-azure-sig.yaml index dd0e60586d..078a98cfd1 100644 --- a/.github/workflows/build-azure-sig.yaml +++ b/.github/workflows/build-azure-sig.yaml @@ -90,7 +90,7 @@ on: description: 'Space-separated Azure regions to replicate the image to (image build region is always included)' required: false type: string - default: 'australiaeast canadacentral eastus eastus2 francecentral germanywestcentral northcentralus northeurope switzerlandnorth uksouth westeurope' + default: 'australiaeast canadacentral eastus eastus2 francecentral germanywestcentral northcentralus northeurope switzerlandnorth uksouth westeurope westus2' skip_test: description: 'Skip the test stage' required: false From acea2c38fbb38162e3e20c5f033717016be554dc Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Tue, 21 Apr 2026 04:03:58 -0600 Subject: [PATCH 83/90] Migrate bare ansible_* facts to ansible_facts[] dict form (#1981) * Migrate bare ansible_* facts to ansible_facts[] dict form Set inject_facts_as_vars=False in ansible.cfg and convert all bare ansible_* fact references (e.g. ansible_os_family) to the dict form (ansible_facts['os_family']) across playbooks, roles, and templates. This eliminates the INJECT_FACTS_AS_VARS deprecation warnings emitted by ansible-core >=2.18. The current default (True) will flip to False in ansible-core 2.24, so this change future-proofs the codebase. The Flatcar OS-family override tasks retain bare ansible_os_family as the set_fact key, which Ansible syncs back into ansible_facts. * Fix yaml line-length lint violations from facts migration Wrap long when-conditions and a checksum URL that exceeded the 160 character limit after the ansible_* to ansible_facts[] conversion. * Fix bare 'services' variable from service_facts in debian.yml With inject_facts_as_vars=False, the ansible.builtin.service_facts module no longer injects bare 'services' into the namespace. Use ansible_facts.services instead. --- images/capi/ansible.cfg | 1 + images/capi/ansible/node.yml | 2 +- .../ansible/roles/containerd/tasks/main.yml | 29 ++++++------ .../ecr_credential_provider/tasks/main.yaml | 4 +- .../ansible/roles/firstboot/meta/main.yml | 2 +- .../ansible/roles/firstboot/tasks/main.yaml | 2 +- .../capi/ansible/roles/gpu/defaults/main.yml | 2 +- images/capi/ansible/roles/gpu/tasks/amd.yml | 12 ++--- .../capi/ansible/roles/gpu/tasks/nvidia.yml | 8 ++-- .../roles/kubernetes/tasks/ecrpull.yml | 2 +- .../roles/kubernetes/tasks/kubeadmpull.yml | 2 +- .../ansible/roles/kubernetes/tasks/main.yml | 18 ++++---- .../load_additional_components/tasks/s3.yml | 4 +- .../capi/ansible/roles/node/defaults/main.yml | 4 +- images/capi/ansible/roles/node/meta/main.yml | 16 +++---- images/capi/ansible/roles/node/tasks/main.yml | 20 ++++---- .../ansible/roles/providers/tasks/aws.yml | 18 ++++---- .../roles/providers/tasks/awscliv2.yml | 26 +++++------ .../ansible/roles/providers/tasks/azure.yml | 20 +++++--- .../roles/providers/tasks/azurecli.yml | 6 +-- .../roles/providers/tasks/cloudstack.yml | 4 +- .../roles/providers/tasks/googlecompute.yml | 8 ++-- .../ansible/roles/providers/tasks/hcloud.yml | 14 +++--- .../roles/providers/tasks/huaweicloud.yml | 8 ++-- .../ansible/roles/providers/tasks/maas.yml | 2 +- .../ansible/roles/providers/tasks/main.yml | 24 +++++----- .../ansible/roles/providers/tasks/nutanix.yml | 4 +- .../ansible/roles/providers/tasks/oci.yml | 6 +-- .../roles/providers/tasks/openstack.yml | 12 ++--- .../roles/providers/tasks/outscale.yml | 4 +- .../ansible/roles/providers/tasks/proxmox.yml | 14 +++--- .../ansible/roles/providers/tasks/qemu.yml | 10 ++-- .../ansible/roles/providers/tasks/raw.yml | 10 ++-- .../roles/providers/tasks/scaleway.yml | 2 +- .../roles/providers/tasks/vmware-redhat.yml | 4 +- .../roles/providers/tasks/vmware-ubuntu.yml | 4 +- .../ansible/roles/providers/tasks/vmware.yml | 14 +++--- .../capi/ansible/roles/python/tasks/main.yml | 2 +- .../ansible/roles/security/tasks/falco.yml | 12 ++--- .../ansible/roles/security/tasks/trivy.yml | 12 ++--- .../ansible/roles/setup/defaults/main.yml | 2 +- .../roles/setup/tasks/bootstrap-flatcar.yml | 2 +- .../capi/ansible/roles/setup/tasks/debian.yml | 7 ++- .../capi/ansible/roles/setup/tasks/main.yml | 12 ++--- .../capi/ansible/roles/setup/tasks/redhat.yml | 2 +- .../etc/apt/sources.list.d/ubuntu.sources.j2 | 4 +- .../setup/templates/etc/apt/sources.list.j2 | 8 ++-- .../roles/sysprep/tasks/azurelinux.yml | 2 +- .../ansible/roles/sysprep/tasks/debian.yml | 12 ++--- .../capi/ansible/roles/sysprep/tasks/main.yml | 46 ++++++++++--------- .../ansible/roles/sysprep/tasks/redhat.yml | 12 ++--- 51 files changed, 245 insertions(+), 232 deletions(-) diff --git a/images/capi/ansible.cfg b/images/capi/ansible.cfg index b086976e19..48c6d1e646 100644 --- a/images/capi/ansible.cfg +++ b/images/capi/ansible.cfg @@ -15,6 +15,7 @@ [defaults] remote_tmp = /tmp/.ansible display_skipped_hosts = False +inject_facts_as_vars = False [ssh_connection] pipelining = False diff --git a/images/capi/ansible/node.yml b/images/capi/ansible/node.yml index e6dd3b8294..09e0c2c804 100644 --- a/images/capi/ansible/node.yml +++ b/images/capi/ansible/node.yml @@ -20,7 +20,7 @@ node_custom_roles_post: "" custom_role_names: "" system: linux - arch_uname: "{{ ansible_architecture }}" + arch_uname: "{{ ansible_facts['architecture'] }}" arch: "{{ {'x86_64': 'amd64', 'amd64': 'amd64', 'aarch64': 'arm64', 'arm64': 'arm64', 'ppc64le': 'ppc64le'}.get(arch_uname, 'unsupported') }}" tasks: diff --git a/images/capi/ansible/roles/containerd/tasks/main.yml b/images/capi/ansible/roles/containerd/tasks/main.yml index 8b14835583..b76ae006db 100644 --- a/images/capi/ansible/roles/containerd/tasks/main.yml +++ b/images/capi/ansible/roles/containerd/tasks/main.yml @@ -14,15 +14,15 @@ --- - name: Import Debian containerd tasks ansible.builtin.import_tasks: debian.yml - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Import RedHat containerd tasks ansible.builtin.import_tasks: redhat.yml - when: ansible_os_family in ["Common Base Linux Mariner", "Microsoft Azure Linux", "RedHat"] + when: ansible_facts['os_family'] in ["Common Base Linux Mariner", "Microsoft Azure Linux", "RedHat"] - name: Import Photon containerd tasks ansible.builtin.import_tasks: photon.yml - when: ansible_os_family == "VMware Photon OS" + when: ansible_facts['os_family'] == "VMware Photon OS" # TODO(vincepri): Use deb/rpm packages once available. # See https://github.com/containerd/containerd/issues/1508 for context. @@ -73,7 +73,7 @@ dest: "{{ containerd_prefix | default('/usr/local') }}" extra_opts: - --no-overwrite-dir - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Copy containerd.service to /etc/systemd/system ansible.builtin.copy: @@ -88,7 +88,7 @@ src: /tmp/runc dest: /usr/local/sbin/runc mode: "0755" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" # Install containerd Wasm shims specified in a comma-separated string. Known runtimes are 'lunatic', 'slight', 'spin', and 'wws'. - name: Unpack containerd-wasm-shims @@ -98,7 +98,7 @@ dest: "{{ sysusr_prefix }}/bin" extra_opts: - --no-overwrite-dir - when: ansible_os_family != "Flatcar" and (containerd_wasm_shims_runtimes | length > 0) + when: ansible_facts['os_family'] != "Flatcar" and (containerd_wasm_shims_runtimes | length > 0) loop: "{{ containerd_wasm_shims_runtimes | split(',') }}" - name: Unpack containerd for Flatcar to /opt/bin @@ -106,7 +106,7 @@ remote_src: true src: /tmp/{{ containerd_filename }} dest: "{{ containerd_prefix | default('/opt') }}" - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" - name: Copy runc to /opt/bin ansible.builtin.copy: @@ -114,7 +114,7 @@ src: /tmp/runc dest: /opt/bin/runc mode: "0755" - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" # Install containerd Wasm shims specified in a comma-separated string. Known runtimes are 'lunatic', 'slight', 'spin', and 'wws'. - name: Unpack containerd-wasm-shims for Flatcar to /opt/bin @@ -124,7 +124,7 @@ dest: "{{ sysusr_prefix }}/bin" extra_opts: - --no-overwrite-dir - when: ansible_os_family == "Flatcar" and (containerd_wasm_shims_runtimes | length > 0) + when: ansible_facts['os_family'] == "Flatcar" and (containerd_wasm_shims_runtimes | length > 0) loop: "{{ containerd_wasm_shims_runtimes | split(',') }}" - name: Create unit file directory @@ -138,7 +138,7 @@ dest: /etc/systemd/system/containerd.service.d/10-opt-bin-custom.conf src: etc/systemd/system/containerd-flatcar.conf mode: "0600" - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" - name: Create containerd memory pressure drop-in file ansible.builtin.template: @@ -157,7 +157,7 @@ dest: /etc/systemd/system/containerd.service.d/limit-nofile.conf src: etc/systemd/system/containerd.service.d/limit-nofile.conf mode: "0644" - when: ansible_os_family in ["Common Base Linux Mariner", "Flatcar", "Microsoft Azure Linux"] + when: ansible_facts['os_family'] in ["Common Base Linux Mariner", "Flatcar", "Microsoft Azure Linux"] - name: Create containerd http proxy conf file if needed ansible.builtin.template: @@ -210,7 +210,7 @@ - ctr - crictl - critest - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Delete containerd tarball ansible.builtin.file: @@ -237,11 +237,12 @@ - name: Download runsc for gvisor ansible.builtin.get_url: dest: "{{ sysusr_prefix }}/bin/{{ item }}" - url: https://storage.googleapis.com/gvisor/releases/release/{{ containerd_gvisor_version }}/{{ ansible_architecture }}/{{ item }} + url: https://storage.googleapis.com/gvisor/releases/release/{{ containerd_gvisor_version }}/{{ ansible_facts['architecture'] }}/{{ item }} mode: "0755" owner: root group: root - checksum: sha512:https://storage.googleapis.com/gvisor/releases/release/{{ containerd_gvisor_version }}/{{ ansible_architecture }}/{{ item }}.sha512 + checksum: >- + sha512:https://storage.googleapis.com/gvisor/releases/release/{{ containerd_gvisor_version }}/{{ ansible_facts['architecture'] }}/{{ item }}.sha512 loop: - runsc - containerd-shim-runsc-v1 diff --git a/images/capi/ansible/roles/ecr_credential_provider/tasks/main.yaml b/images/capi/ansible/roles/ecr_credential_provider/tasks/main.yaml index fd643cd97a..173cb3d8e0 100644 --- a/images/capi/ansible/roles/ecr_credential_provider/tasks/main.yaml +++ b/images/capi/ansible/roles/ecr_credential_provider/tasks/main.yaml @@ -37,7 +37,7 @@ block: - name: Ensure kubelet config exists ansible.builtin.stat: - path: "{{ '/etc/default/kubelet' if ansible_os_family == 'Debian' else '/etc/sysconfig/kubelet' }}" + path: "{{ '/etc/default/kubelet' if ansible_facts['os_family'] == 'Debian' else '/etc/sysconfig/kubelet' }}" register: kubelet_config failed_when: not kubelet_config.stat.exists @@ -45,6 +45,6 @@ when: kubelet_config.stat.exists ansible.builtin.shell: | set -e -o pipefail - sed -Ei 's|^(KUBELET_EXTRA_ARGS.*)|\1 --image-credential-provider-config=/var/usr/ecr-credential-provider/ecr-credential-provider-config --image-credential-provider-bin-dir={{ ecr_credential_provider_install_dir }}|' {{ '/etc/default/kubelet' if ansible_os_family == 'Debian' else '/etc/sysconfig/kubelet' }} + sed -Ei 's|^(KUBELET_EXTRA_ARGS.*)|\1 --image-credential-provider-config=/var/usr/ecr-credential-provider/ecr-credential-provider-config --image-credential-provider-bin-dir={{ ecr_credential_provider_install_dir }}|' {{ '/etc/default/kubelet' if ansible_facts['os_family'] == 'Debian' else '/etc/sysconfig/kubelet' }} args: executable: /bin/bash diff --git a/images/capi/ansible/roles/firstboot/meta/main.yml b/images/capi/ansible/roles/firstboot/meta/main.yml index aad01df663..004bdd1dc4 100644 --- a/images/capi/ansible/roles/firstboot/meta/main.yml +++ b/images/capi/ansible/roles/firstboot/meta/main.yml @@ -17,7 +17,7 @@ dependencies: vars: rpms: "" debs: "" - when: ansible_os_family == "VMware Photon OS" + when: ansible_facts['os_family'] == "VMware Photon OS" - role: setup vars: diff --git a/images/capi/ansible/roles/firstboot/tasks/main.yaml b/images/capi/ansible/roles/firstboot/tasks/main.yaml index c6d0bd8bde..9cc7900fbe 100644 --- a/images/capi/ansible/roles/firstboot/tasks/main.yaml +++ b/images/capi/ansible/roles/firstboot/tasks/main.yaml @@ -15,7 +15,7 @@ - name: Include Photon firstboot tasks ansible.builtin.include_tasks: photon.yml - when: ansible_os_family == "VMware Photon OS" + when: ansible_facts['os_family'] == "VMware Photon OS" - name: Include QEMU firstboot tasks ansible.builtin.include_tasks: qemu.yml diff --git a/images/capi/ansible/roles/gpu/defaults/main.yml b/images/capi/ansible/roles/gpu/defaults/main.yml index ad75c953e9..3cba115a94 100644 --- a/images/capi/ansible/roles/gpu/defaults/main.yml +++ b/images/capi/ansible/roles/gpu/defaults/main.yml @@ -16,5 +16,5 @@ gpu_amd_usecase: dkms gpu_block_nouveau_loading: false gpu_systemd_networkd_update_initramfs: >- - {%- if ansible_os_family == 'VMware Photon OS' -%} dracut -f{%- elif ansible_os_family == 'Debian' -%} update-initramfs -u{%- endif -%} + {%- if ansible_facts['os_family'] == 'VMware Photon OS' -%} dracut -f{%- elif ansible_facts['os_family'] == 'Debian' -%} update-initramfs -u{%- endif -%} gpu_nvidia_ceph: false diff --git a/images/capi/ansible/roles/gpu/tasks/amd.yml b/images/capi/ansible/roles/gpu/tasks/amd.yml index eb5ef1d52a..43b21d6185 100644 --- a/images/capi/ansible/roles/gpu/tasks/amd.yml +++ b/images/capi/ansible/roles/gpu/tasks/amd.yml @@ -18,12 +18,12 @@ name: root groups: render,video append: true - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install the .deb for AMDGPU-Install ansible.builtin.apt: deb: https://repo.radeon.com/amdgpu-install/{{ amd_version }}/ubuntu/jammy/amdgpu-install_{{ amd_deb_version }}_all.deb - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Perform a cache update ansible.builtin.apt: @@ -33,19 +33,19 @@ until: apt_lock_status is not failed retries: 5 delay: 10 - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install packages required for AMD driver installation become: true ansible.builtin.apt: pkg: - - linux-headers-{{ ansible_kernel }} - - linux-modules-extra-{{ ansible_kernel }} + - linux-headers-{{ ansible_facts['kernel'] }} + - linux-modules-extra-{{ ansible_facts['kernel'] }} - build-essential - dkms - rocminfo - clinfo - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Run AMDGPU_Install binary with use-cases ansible.builtin.command: diff --git a/images/capi/ansible/roles/gpu/tasks/nvidia.yml b/images/capi/ansible/roles/gpu/tasks/nvidia.yml index 95b5b1c38d..7629ca9182 100644 --- a/images/capi/ansible/roles/gpu/tasks/nvidia.yml +++ b/images/capi/ansible/roles/gpu/tasks/nvidia.yml @@ -16,7 +16,7 @@ - name: Add NVIDIA package signing key ansible.builtin.apt_key: url: https://nvidia.github.io/libnvidia-container/gpgkey - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Perform a cache update ansible.builtin.apt: @@ -26,7 +26,7 @@ until: apt_lock_status is not failed retries: 5 delay: 10 - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install packages for building NVIDIA driver kernel module and interacting with s3 endpoint become: true @@ -37,7 +37,7 @@ - dkms - python3-boto3 - python3-botocore - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Make /etc/nvidia/ClientConfigToken directory become: true @@ -119,4 +119,4 @@ pkg: - python3-boto3 - python3-botocore - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/kubernetes/tasks/ecrpull.yml b/images/capi/ansible/roles/kubernetes/tasks/ecrpull.yml index b17f1460d9..7abc21a7fe 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/ecrpull.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/ecrpull.yml @@ -27,4 +27,4 @@ ansible.builtin.file: path: /etc/kubeadm.yml state: absent - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" diff --git a/images/capi/ansible/roles/kubernetes/tasks/kubeadmpull.yml b/images/capi/ansible/roles/kubernetes/tasks/kubeadmpull.yml index af7d370df8..b680b05a05 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/kubeadmpull.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/kubeadmpull.yml @@ -12,4 +12,4 @@ ansible.builtin.file: path: /etc/kubeadm.yml state: absent - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" diff --git a/images/capi/ansible/roles/kubernetes/tasks/main.yml b/images/capi/ansible/roles/kubernetes/tasks/main.yml index d510b3a28a..c2e563d0fe 100644 --- a/images/capi/ansible/roles/kubernetes/tasks/main.yml +++ b/images/capi/ansible/roles/kubernetes/tasks/main.yml @@ -14,19 +14,19 @@ --- - name: Import Debian Kubernetes tasks ansible.builtin.import_tasks: debian.yml - when: kubernetes_source_type == "pkg" and ansible_os_family == "Debian" + when: kubernetes_source_type == "pkg" and ansible_facts['os_family'] == "Debian" - name: Import Azure Linux Kubernetes tasks ansible.builtin.import_tasks: azurelinux.yml - when: kubernetes_source_type == "pkg" and ansible_os_family in ["Common Base Linux Mariner", "Microsoft Azure Linux"] + when: kubernetes_source_type == "pkg" and ansible_facts['os_family'] in ["Common Base Linux Mariner", "Microsoft Azure Linux"] - name: Import RedHat Kubernetes tasks ansible.builtin.import_tasks: redhat.yml - when: kubernetes_source_type == "pkg" and ansible_os_family == "RedHat" + when: kubernetes_source_type == "pkg" and ansible_facts['os_family'] == "RedHat" - name: Import Photon Kubernetes tasks ansible.builtin.import_tasks: photon.yml - when: kubernetes_source_type == "pkg" and ansible_os_family == "VMware Photon OS" + when: kubernetes_source_type == "pkg" and ansible_facts['os_family'] == "VMware Photon OS" - name: Import URL Kubernetes tasks ansible.builtin.import_tasks: url.yml @@ -40,7 +40,7 @@ - name: Create kubelet default config file ansible.builtin.template: src: etc/sysconfig/kubelet - dest: "{{ '/etc/default/kubelet' if ansible_os_family == 'Debian' else '/etc/sysconfig/kubelet' }}" + dest: "{{ '/etc/default/kubelet' if ansible_facts['os_family'] == 'Debian' else '/etc/sysconfig/kubelet' }}" owner: root group: root mode: "0644" @@ -88,19 +88,19 @@ ansible.builtin.shell: cmd: "{{ sysusr_prefix }}/bin/kubectl completion bash > {{ sysusr_prefix }}/share/bash-completion/completions/kubectl" creates: "{{ sysusr_prefix }}/share/bash-completion/completions/kubectl" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Generate kubeadm bash completion ansible.builtin.shell: cmd: "{{ sysusr_prefix }}/bin/kubeadm completion bash > {{ sysusr_prefix }}/share/bash-completion/completions/kubeadm" creates: "{{ sysusr_prefix }}/share/bash-completion/completions/kubeadm" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Generate crictl bash completion ansible.builtin.shell: cmd: "{{ sysusr_prefix }}/bin/crictl completion bash > {{ sysusr_prefix }}/share/bash-completion/completions/crictl" creates: "{{ sysusr_prefix }}/share/bash-completion/completions/crictl" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Set KUBECONFIG variable and alias ansible.builtin.copy: @@ -117,7 +117,7 @@ - name: Import kubeadm pull tasks ansible.builtin.import_tasks: kubeadmpull.yml - when: (kubernetes_source_type == "pkg" and not ecr) or ansible_os_family == "Flatcar" + when: (kubernetes_source_type == "pkg" and not ecr) or ansible_facts['os_family'] == "Flatcar" - name: Import ECR pull tasks ansible.builtin.import_tasks: ecrpull.yml diff --git a/images/capi/ansible/roles/load_additional_components/tasks/s3.yml b/images/capi/ansible/roles/load_additional_components/tasks/s3.yml index b2f469efb7..401c3d7d3f 100644 --- a/images/capi/ansible/roles/load_additional_components/tasks/s3.yml +++ b/images/capi/ansible/roles/load_additional_components/tasks/s3.yml @@ -18,7 +18,7 @@ pkg: - python3-boto3 - python3-botocore - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" # TODO: We should probably think of an approach to allow a loop here to prevent multiple calls # and as such, multiple installs/removals of the boto packages. @@ -43,4 +43,4 @@ pkg: - python3-boto3 - python3-botocore - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/node/defaults/main.yml b/images/capi/ansible/roles/node/defaults/main.yml index ed7af0748c..06bd1eb949 100644 --- a/images/capi/ansible/roles/node/defaults/main.yml +++ b/images/capi/ansible/roles/node/defaults/main.yml @@ -119,7 +119,7 @@ common_raw_photon_rpms: [] # photon and flatcar do not have backward compatibility for legacy distro behavior for sysctl.conf by default # as it uses systemd-sysctl. set this var so we can use for sysctl conf file value. sysctl_conf_file: >- - {{ '/etc/sysctl.d/99-sysctl.conf' if ansible_os_family in ['Common Base Linux Mariner', 'Flatcar', 'Microsoft Azure Linux', 'VMware Photon OS'] + {{ '/etc/sysctl.d/99-sysctl.conf' if ansible_facts['os_family'] in ['Common Base Linux Mariner', 'Flatcar', 'Microsoft Azure Linux', 'VMware Photon OS'] else '/etc/sysctl.conf' }} pause_image: registry.k8s.io/pause:3.10 @@ -127,7 +127,7 @@ containerd_additional_settings: leak_local_mdns_to_dns: false build_target: virt cloud_cfg_file: /etc/cloud/cloud.cfg -external_binary_path: "{{ '/opt/bin' if ansible_os_family == 'Flatcar' else '/usr/local/bin' }}" +external_binary_path: "{{ '/opt/bin' if ansible_facts['os_family'] == 'Flatcar' else '/usr/local/bin' }}" # Enable containerd trace audit in auditd, default: false. enable_containerd_audit: false diff --git a/images/capi/ansible/roles/node/meta/main.yml b/images/capi/ansible/roles/node/meta/main.yml index b798165d1c..573e70fb39 100644 --- a/images/capi/ansible/roles/node/meta/main.yml +++ b/images/capi/ansible/roles/node/meta/main.yml @@ -17,26 +17,26 @@ dependencies: vars: rpms: "{{ common_rpms + al2_rpms + lookup('vars', 'common_' + build_target + '_rpms') }}" debs: "{{ common_debs }}" - when: ansible_distribution == "Amazon" and ansible_distribution_version == "2" + when: ansible_facts['distribution'] == "Amazon" and ansible_facts['distribution_version'] == "2" - role: setup vars: rpms: "{{ common_rpms + al2023_rpms + lookup('vars', 'common_' + build_target + '_rpms') }}" debs: "{{ common_debs }}" - when: ansible_distribution == "Amazon" and ansible_distribution_version == "2023" + when: ansible_facts['distribution'] == "Amazon" and ansible_facts['distribution_version'] == "2023" - role: setup vars: rpms: "{{ common_rpms }}" debs: "{{ common_debs }}" - when: packer_builder_type == "oracle-oci" and ansible_architecture == "aarch64" + when: packer_builder_type == "oracle-oci" and ansible_facts['architecture'] == "aarch64" - role: setup vars: rpms: >- - {{ (common_photon_rpms + lookup('vars', 'photon_' + ansible_distribution_major_version + '_rpms' ) + {{ (common_photon_rpms + lookup('vars', 'photon_' + ansible_facts['distribution_major_version'] + '_rpms' ) + lookup('vars', 'common_' + build_target + '_photon_rpms')) }} - when: ansible_distribution == "VMware Photon OS" + when: ansible_facts['distribution'] == "VMware Photon OS" - role: setup vars: @@ -44,11 +44,11 @@ dependencies: {{ ( common_rpms + rh8_rpms + lookup('vars', 'common_' + build_target + '_rpms') ) }} debs: "{{ common_debs + lookup('vars', 'common_' + build_target + '_debs') }}" when: > - ansible_distribution not in ["VMware Photon OS", "Amazon"] - and not (packer_builder_type == "oracle-oci" and ansible_architecture == "aarch64") + ansible_facts['distribution'] not in ["VMware Photon OS", "Amazon"] + and not (packer_builder_type == "oracle-oci" and ansible_facts['architecture'] == "aarch64") and not packer_builder_type is search('qemu') - role: setup vars: rpms: "{{ common_rpms + azurelinux_rpms + lookup('vars', 'common_' + build_target + '_rpms') }}" - when: ansible_distribution in ["Common Base Linux Mariner", "Microsoft Azure Linux"] + when: ansible_facts['distribution'] in ["Common Base Linux Mariner", "Microsoft Azure Linux"] diff --git a/images/capi/ansible/roles/node/tasks/main.yml b/images/capi/ansible/roles/node/tasks/main.yml index 0eea316cc3..64e085f40e 100644 --- a/images/capi/ansible/roles/node/tasks/main.yml +++ b/images/capi/ansible/roles/node/tasks/main.yml @@ -14,23 +14,23 @@ --- - name: Import Photon node tasks ansible.builtin.import_tasks: photon.yml - when: ansible_os_family == "VMware Photon OS" + when: ansible_facts['os_family'] == "VMware Photon OS" - name: Import Amazon Linux node tasks ansible.builtin.import_tasks: amazonLinux.yml - when: ansible_distribution == "Amazon" + when: ansible_facts['distribution'] == "Amazon" # This is required until https://github.com/ansible/ansible/issues/77537 is fixed and used. - name: Override Flatcar's OS family ansible.builtin.set_fact: ansible_os_family: Flatcar - when: ansible_os_family == "Flatcar Container Linux by Kinvolk" + when: ansible_facts['os_family'] == "Flatcar Container Linux by Kinvolk" tags: - facts - name: Import Flatcar node tasks ansible.builtin.import_tasks: flatcar.yml - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" - name: Ensure overlay module is present community.general.modprobe: @@ -74,18 +74,18 @@ - name: Disable swap memory ansible.builtin.shell: | swapoff -a - when: ansible_memory_mb.swap.total != 0 + when: ansible_facts['memory_mb'].swap.total != 0 - name: Edit fstab file to disable swap ansible.builtin.shell: sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab - when: ansible_memory_mb.swap.total != 0 + when: ansible_facts['memory_mb'].swap.total != 0 - name: Disable conntrackd service ansible.builtin.systemd: name: conntrackd state: stopped enabled: false - when: ansible_os_family not in ["Common Base Linux Mariner", "Debian", "Flatcar", "Microsoft Azure Linux"] + when: ansible_facts['os_family'] not in ["Common Base Linux Mariner", "Debian", "Flatcar", "Microsoft Azure Linux"] - name: Ensure auditd is running and comes on at reboot ansible.builtin.service: @@ -95,7 +95,7 @@ - name: Configure auditd rules for containerd ansible.builtin.copy: - src: "etc/audit/rules.d/containerd.rules{{ '-flatcar' if ansible_os_family == 'Flatcar' else '' }}" + src: "etc/audit/rules.d/containerd.rules{{ '-flatcar' if ansible_facts['os_family'] == 'Flatcar' else '' }}" dest: /etc/audit/rules.d/containerd.rules owner: root group: root @@ -109,7 +109,7 @@ state: present sysctl_set: true reload: true - when: ansible_distribution == "Ubuntu" + when: ansible_facts['distribution'] == "Ubuntu" - name: Set transparent huge pages to madvise ansible.builtin.lineinfile: @@ -117,7 +117,7 @@ backrefs: true regexp: ^(?!.*transparent_hugepage=madvise)(GRUB_CMDLINE_LINUX=.*)("$) line: \1 transparent_hugepage=madvise" - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Copy udev etcd network tuning rules ansible.builtin.template: diff --git a/images/capi/ansible/roles/providers/tasks/aws.yml b/images/capi/ansible/roles/providers/tasks/aws.yml index 2474d01ca8..fabe8bf813 100644 --- a/images/capi/ansible/roles/providers/tasks/aws.yml +++ b/images/capi/ansible/roles/providers/tasks/aws.yml @@ -14,7 +14,7 @@ --- - name: Include AWS CLI v2 tasks ansible.builtin.include_tasks: awscliv2.yml - when: ansible_distribution != "Amazon" + when: ansible_facts['distribution'] != "Amazon" # Remove after https://github.com/aws/amazon-ssm-agent/issues/235 is fixed. - name: Install aws agents RPM on Redhat distributions @@ -25,33 +25,33 @@ with_items: - "{{ amazon_ssm_agent_rpm }}" when: - - ansible_os_family == "RedHat" - - ansible_distribution != "Amazon" + - ansible_facts['os_family'] == "RedHat" + - ansible_facts['distribution'] != "Amazon" - name: Ensure ssm agent is running RPM ansible.builtin.service: name: amazon-ssm-agent state: started enabled: true - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Install aws agents Ubuntu ansible.builtin.command: snap install amazon-ssm-agent --classic - when: ansible_distribution == "Ubuntu" + when: ansible_facts['distribution'] == "Ubuntu" - name: Ensure ssm agent is running Ubuntu ansible.builtin.service: name: snap.amazon-ssm-agent.amazon-ssm-agent.service state: started enabled: true - when: ansible_distribution == "Ubuntu" + when: ansible_facts['distribution'] == "Ubuntu" - name: Disable Hyper-V KVP protocol daemon on Ubuntu ansible.builtin.systemd: name: hv-kvp-daemon state: stopped enabled: false - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Create cloud-init custom data source list ansible.builtin.copy: @@ -60,7 +60,7 @@ owner: root group: root mode: "0644" - when: ansible_distribution == "Ubuntu" and ansible_distribution_version is version('22.04', '>=') + when: ansible_facts['distribution'] == "Ubuntu" and ansible_facts['distribution_version'] is version('22.04', '>=') - name: Create custom cloud-init data source ansible.builtin.copy: @@ -69,4 +69,4 @@ owner: root group: root mode: "0644" - when: ansible_distribution == "Ubuntu" and ansible_distribution_version is version('22.04', '>=') + when: ansible_facts['distribution'] == "Ubuntu" and ansible_facts['distribution_version'] is version('22.04', '>=') diff --git a/images/capi/ansible/roles/providers/tasks/awscliv2.yml b/images/capi/ansible/roles/providers/tasks/awscliv2.yml index 66031097d0..4e3da2db3d 100644 --- a/images/capi/ansible/roles/providers/tasks/awscliv2.yml +++ b/images/capi/ansible/roles/providers/tasks/awscliv2.yml @@ -4,7 +4,7 @@ name: pip executable: pip3 state: latest - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" - name: Install aws clients via pip ansible.builtin.pip: @@ -13,7 +13,7 @@ vars: packages: - awscli - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" - name: Install AWS CLI prequisites ansible.builtin.dnf: @@ -21,7 +21,7 @@ - gnupg - unzip state: present - when: ansible_distribution == "RedHat" + when: ansible_facts['distribution'] == "RedHat" - name: Install AWS CLI prerequisites ansible.builtin.apt: @@ -29,7 +29,7 @@ - gnupg - unzip state: present - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Import AWS public key ansible.builtin.shell: | @@ -65,39 +65,39 @@ EOF gpg --import aws-public-key rm aws-public-key - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Download AWS CLI v2 archive signature file ansible.builtin.get_url: - url: https://awscli.amazonaws.com/awscli-exe-linux-{{ ansible_architecture }}.zip.sig + url: https://awscli.amazonaws.com/awscli-exe-linux-{{ ansible_facts['architecture'] }}.zip.sig dest: /tmp/awscliv2.zip.sig mode: "0644" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Download AWS CLI v2 archive ansible.builtin.get_url: - url: https://awscli.amazonaws.com/awscli-exe-linux-{{ ansible_architecture }}.zip + url: https://awscli.amazonaws.com/awscli-exe-linux-{{ ansible_facts['architecture'] }}.zip dest: /tmp/awscliv2.zip mode: "0644" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Verify AWS CLI v2 archive ansible.builtin.command: gpg --verify /tmp/awscliv2.zip.sig /tmp/awscliv2.zip - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Unzip AWS CLI v2 archive ansible.builtin.unarchive: src: /tmp/awscliv2.zip dest: /tmp remote_src: true - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Install AWS CLI v2 ansible.builtin.command: /tmp/aws/install --update -i /usr/local/aws-cli -b /usr/local/sbin - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Remove temporary files ansible.builtin.file: path: /tmp/aws* state: absent - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" diff --git a/images/capi/ansible/roles/providers/tasks/azure.yml b/images/capi/ansible/roles/providers/tasks/azure.yml index 88f1635772..3aa146ea94 100644 --- a/images/capi/ansible/roles/providers/tasks/azure.yml +++ b/images/capi/ansible/roles/providers/tasks/azure.yml @@ -23,7 +23,10 @@ line: refclock PHC /dev/ptp0 poll 3 dpoll -2 offset 0 mode: "0644" # Flatcar now includes this by default as of 3975.2.0 which causes this task to fail - when: ansible_os_family != "Flatcar" or (ansible_os_family == "Flatcar" and ansible_distribution_version is version('3975.2.0', '<')) + when: > + ansible_facts['os_family'] != "Flatcar" or + (ansible_facts['os_family'] == "Flatcar" and + ansible_facts['distribution_version'] is version('3975.2.0', '<')) - name: Ensure makestep parameter set as per Azure recommendation ansible.builtin.lineinfile: @@ -31,7 +34,10 @@ regexp: ^makestep line: makestep 1.0 -1 # Flatcar now includes this by default as of 3975.2.0 which causes this task to fail - when: ansible_os_family != "Flatcar" or (ansible_os_family == "Flatcar" and ansible_distribution_version is version('3975.2.0', '<')) + when: > + ansible_facts['os_family'] != "Flatcar" or + (ansible_facts['os_family'] == "Flatcar" and + ansible_facts['distribution_version'] is version('3975.2.0', '<')) - name: Install iptables persistence ansible.builtin.apt: @@ -41,7 +47,7 @@ vars: packages: - iptables-persistent - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Block traffic to 168.63.129.16 port 80 for cve-2021-27075 ansible.builtin.copy: @@ -50,13 +56,13 @@ owner: root group: root mode: "0644" - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Load iptable rules from file community.general.iptables_state: state: restored path: /etc/iptables/rules.v4 - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install netbase and nfs-common ansible.builtin.apt: @@ -67,7 +73,7 @@ packages: - netbase - nfs-common - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" ## refer to ../files/etc/cloud/cloud.cfg.d/15_azure-vnet.cfg ## for more context on below file addition @@ -78,7 +84,7 @@ owner: root group: root mode: "0644" - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Create the credential provider path ansible.builtin.file: diff --git a/images/capi/ansible/roles/providers/tasks/azurecli.yml b/images/capi/ansible/roles/providers/tasks/azurecli.yml index ce52812f4c..a83e682362 100644 --- a/images/capi/ansible/roles/providers/tasks/azurecli.yml +++ b/images/capi/ansible/roles/providers/tasks/azurecli.yml @@ -13,7 +13,7 @@ # limitations under the License. --- - name: Install Azure CLI - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" block: - name: Add Microsoft Package Repository Key ansible.builtin.apt_key: @@ -44,13 +44,13 @@ state: present - name: Install Azure CLI - when: ansible_os_family in ["Common Base Linux Mariner", "Microsoft Azure Linux"] + when: ansible_facts['os_family'] in ["Common Base Linux Mariner", "Microsoft Azure Linux"] ansible.builtin.package: name: azure-cli state: present - name: Install Azure CLI - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" block: - name: Import the Microsoft repository key ansible.builtin.rpm_key: diff --git a/images/capi/ansible/roles/providers/tasks/cloudstack.yml b/images/capi/ansible/roles/providers/tasks/cloudstack.yml index 88adaa204b..b21d2fdd7f 100644 --- a/images/capi/ansible/roles/providers/tasks/cloudstack.yml +++ b/images/capi/ansible/roles/providers/tasks/cloudstack.yml @@ -27,7 +27,7 @@ - name: Run dracut cmd to regenerate initramfs with all drivers - needed when converting to different hypervisor templates ansible.builtin.command: dracut --force --no-hostonly - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Add draut cmd to regenerate initramfs with only necessary drivers on first boot ansible.builtin.lineinfile: @@ -36,4 +36,4 @@ line: |- bootcmd: - dracut --force - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" diff --git a/images/capi/ansible/roles/providers/tasks/googlecompute.yml b/images/capi/ansible/roles/providers/tasks/googlecompute.yml index 1e7c5ed5a9..7dc531cc1d 100644 --- a/images/capi/ansible/roles/providers/tasks/googlecompute.yml +++ b/images/capi/ansible/roles/providers/tasks/googlecompute.yml @@ -13,7 +13,7 @@ # limitations under the License. --- - name: Install gcloud SDK - when: ansible_os_family != "RedHat" + when: ansible_facts['os_family'] != "RedHat" block: - name: Download gcloud SDK ansible.builtin.get_url: @@ -39,7 +39,7 @@ with_items: "{{ find.files }}" - name: Install gcloud SDK - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" block: - name: Add gcloud repository info ansible.builtin.shell: | @@ -65,11 +65,11 @@ packages: - cloud-init - cloud-utils-growpart - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Disable Hyper-V KVP protocol daemon on Ubuntu ansible.builtin.systemd: name: hv-kvp-daemon state: stopped enabled: false - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/hcloud.yml b/images/capi/ansible/roles/providers/tasks/hcloud.yml index 85d7358f6a..d8aa580439 100644 --- a/images/capi/ansible/roles/providers/tasks/hcloud.yml +++ b/images/capi/ansible/roles/providers/tasks/hcloud.yml @@ -23,7 +23,7 @@ - cloud-guest-utils - cloud-initramfs-copymods - cloud-initramfs-dyn-netconf - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install cloud-tools and tools packages ansible.builtin.apt: @@ -34,7 +34,7 @@ packages: - linux-cloud-tools-generic - linux-tools-generic - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install cloud-init packages ansible.builtin.dnf: @@ -44,7 +44,7 @@ packages: - cloud-init - cloud-utils-growpart - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Install CSI prerequisites on Ubuntu ansible.builtin.apt: @@ -57,7 +57,7 @@ - open-iscsi - lvm2 - xfsprogs - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install CSI prerequisites on RedHat ansible.builtin.dnf: @@ -69,18 +69,18 @@ - nfs-utils - lvm2 - xfsprogs - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Enable iSCSI initiator daemon on Ubuntu or RedHat ansible.builtin.systemd: name: iscsid state: started enabled: true - when: ansible_os_family in ["Debian", "Redhat"] + when: ansible_facts['os_family'] in ["Debian", "Redhat"] - name: Disable Hyper-V KVP protocol daemon on Ubuntu ansible.builtin.systemd: name: hv-kvp-daemon state: stopped enabled: false - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/huaweicloud.yml b/images/capi/ansible/roles/providers/tasks/huaweicloud.yml index d7e390cce0..ee0a6d3c16 100644 --- a/images/capi/ansible/roles/providers/tasks/huaweicloud.yml +++ b/images/capi/ansible/roles/providers/tasks/huaweicloud.yml @@ -17,21 +17,21 @@ name: pip executable: pip3 state: latest - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Uninstall cloud-init pip package ansible.builtin.pip: name: "cloud-init" executable: pip3 state: absent - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Create dns default conf directory ansible.builtin.file: path: /etc/systemd/resolved.conf.d state: directory mode: "0755" - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Add default dns ansible.builtin.copy: @@ -42,4 +42,4 @@ owner: root group: root mode: "0644" - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/maas.yml b/images/capi/ansible/roles/providers/tasks/maas.yml index 6b74724792..0c1de5baaf 100644 --- a/images/capi/ansible/roles/providers/tasks/maas.yml +++ b/images/capi/ansible/roles/providers/tasks/maas.yml @@ -3,4 +3,4 @@ - name: Include MaaS Specific configs for Ubuntu Distro ansible.builtin.include_tasks: maas-ubuntu.yml - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/main.yml b/images/capi/ansible/roles/providers/tasks/main.yml index 95ace6c5cf..38b651856b 100644 --- a/images/capi/ansible/roles/providers/tasks/main.yml +++ b/images/capi/ansible/roles/providers/tasks/main.yml @@ -84,7 +84,7 @@ path: /etc/systemd/system/cloud-final.service.d state: directory mode: "0755" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Create cloud-final boot order drop in file ansible.builtin.copy: @@ -93,14 +93,14 @@ owner: root group: root mode: "0644" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Creates unit file directory for cloud-config ansible.builtin.file: path: /etc/systemd/system/cloud-config.service.d state: directory mode: "0755" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Create cloud-config boot order drop in file ansible.builtin.copy: @@ -109,7 +109,7 @@ owner: root group: root mode: "0644" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" # Some OS might disable cloud-final service on boot (rhel 7). # Enable all cloud-init services on boot. @@ -122,7 +122,7 @@ - cloud-config - cloud-init - cloud-init-local - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Create cloud-init config file ansible.builtin.copy: @@ -131,7 +131,7 @@ owner: root group: root mode: "0644" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" # `feature_overrides.py` only works on old cloud-init versions (removed in https://github.com/canonical/cloud-init/pull/4228)... - name: Set cloudinit feature flags @@ -141,7 +141,7 @@ owner: root group: root mode: "0644" - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Set cloudinit feature flags for redhat 8 ansible.builtin.copy: @@ -150,7 +150,7 @@ owner: root group: root mode: "0644" - when: ansible_os_family == "RedHat" and ansible_distribution == "RedHat" and ansible_distribution_major_version == "8" + when: ansible_facts['os_family'] == "RedHat" and ansible_facts['distribution'] == "RedHat" and ansible_facts['distribution_major_version'] == "8" - name: Set cloudinit feature flags for redhat 9 ansible.builtin.copy: @@ -159,7 +159,7 @@ owner: root group: root mode: "0644" - when: ansible_os_family == "RedHat" and ansible_distribution == "RedHat" and ansible_distribution_major_version == "9" + when: ansible_facts['os_family'] == "RedHat" and ansible_facts['distribution'] == "RedHat" and ansible_facts['distribution_major_version'] == "9" # ...and `features.py` must be patched instead - name: Patch cloud-init feature flags for Debian-based OS @@ -167,14 +167,14 @@ path: /usr/lib/python3/dist-packages/cloudinit/features.py marker: "# {mark} ANSIBLE MANAGED BLOCK (by image-builder)" block: "{{ lookup('file', 'cloud-init-features.patch') }}" - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Patch cloud-init feature flags for RedHat 9 ansible.builtin.blockinfile: path: /usr/lib/python3.9/site-packages/cloudinit/features.py marker: "# {mark} ANSIBLE MANAGED BLOCK (by image-builder)" block: "{{ lookup('file', 'cloud-init-features.patch') }}" - when: ansible_os_family == "RedHat" and ansible_distribution == "RedHat" and ansible_distribution_major_version == "9" + when: ansible_facts['os_family'] == "RedHat" and ansible_facts['distribution'] == "RedHat" and ansible_facts['distribution_major_version'] == "9" - name: Ensure chrony is running ansible.builtin.systemd: @@ -185,4 +185,4 @@ when: > (packer_builder_type.startswith('amazon') or packer_builder_type.startswith('azure') or packer_builder_type is search('vmware') or packer_builder_type is search('vsphere')) - and ansible_os_family != "Flatcar" + and ansible_facts['os_family'] != "Flatcar" diff --git a/images/capi/ansible/roles/providers/tasks/nutanix.yml b/images/capi/ansible/roles/providers/tasks/nutanix.yml index a9f7edce32..697af4b1dd 100644 --- a/images/capi/ansible/roles/providers/tasks/nutanix.yml +++ b/images/capi/ansible/roles/providers/tasks/nutanix.yml @@ -14,11 +14,11 @@ --- - name: Include Nutanix RedHat tasks ansible.builtin.include_tasks: nutanix-redhat.yml - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Include Nutanix Ubuntu tasks ansible.builtin.include_tasks: nutanix-ubuntu.yml - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Ensure ip_vs module is loaded ansible.builtin.lineinfile: diff --git a/images/capi/ansible/roles/providers/tasks/oci.yml b/images/capi/ansible/roles/providers/tasks/oci.yml index 1bd7a24c7b..a038389935 100644 --- a/images/capi/ansible/roles/providers/tasks/oci.yml +++ b/images/capi/ansible/roles/providers/tasks/oci.yml @@ -17,18 +17,18 @@ path: /etc/iptables/rules.v4 state: absent regexp: -A INPUT -j REJECT --reject-with icmp-host-prohibited - when: ansible_distribution == "Ubuntu" + when: ansible_facts['distribution'] == "Ubuntu" - name: Remove the default input reject all iptable rule ansible.builtin.lineinfile: path: /etc/iptables/rules.v4 state: absent regexp: -A FORWARD -j REJECT --reject-with icmp-host-prohibited - when: ansible_distribution == "Ubuntu" + when: ansible_facts['distribution'] == "Ubuntu" - name: Disable firewalld service ansible.builtin.systemd: name: firewalld state: stopped enabled: false - when: ansible_distribution == "OracleLinux" + when: ansible_facts['distribution'] == "OracleLinux" diff --git a/images/capi/ansible/roles/providers/tasks/openstack.yml b/images/capi/ansible/roles/providers/tasks/openstack.yml index 641d053619..48d1eb2abf 100644 --- a/images/capi/ansible/roles/providers/tasks/openstack.yml +++ b/images/capi/ansible/roles/providers/tasks/openstack.yml @@ -23,35 +23,35 @@ - cloud-guest-utils - cloud-initramfs-copymods - cloud-initramfs-dyn-netconf - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Disable Hyper-V KVP protocol daemon on Ubuntu ansible.builtin.systemd: name: hv-kvp-daemon state: stopped enabled: false - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Ensure networkd-dispatcher is installed ansible.builtin.apt: name: networkd-dispatcher state: present force_apt_get: true - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Enable networkd-dispatcher service ansible.builtin.systemd: name: networkd-dispatcher state: started enabled: true - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Create directory for DHCP chrony server files ansible.builtin.file: path: /var/lib/dhcp state: directory mode: '0755' - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Copy networkd-dispatcher scripts to add DHCP provided NTP servers ansible.builtin.template: @@ -65,4 +65,4 @@ - { src: files/etc/networkd-dispatcher/routable.d/20-chrony.j2, dest: /etc/networkd-dispatcher/routable.d/20-chrony } - { src: files/etc/networkd-dispatcher/off.d/20-chrony.j2, dest: /etc/networkd-dispatcher/off.d/20-chrony } - { src: files/etc/networkd-dispatcher/no-carrier.d/20-chrony.j2, dest: /etc/networkd-dispatcher/no-carrier.d/20-chrony } - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/outscale.yml b/images/capi/ansible/roles/providers/tasks/outscale.yml index d30be5612a..086b7d6e90 100644 --- a/images/capi/ansible/roles/providers/tasks/outscale.yml +++ b/images/capi/ansible/roles/providers/tasks/outscale.yml @@ -14,10 +14,10 @@ ansible.builtin.apt: name: cloud-initramfs-dyn-netconf state: present - when: ansible_distribution == 'Debian' + when: ansible_facts['distribution'] == 'Debian' - name: Install Ubuntu specific packages ansible.builtin.apt: name: cloud-initramfs-copymods state: present - when: ansible_distribution == 'Ubuntu' + when: ansible_facts['distribution'] == 'Ubuntu' diff --git a/images/capi/ansible/roles/providers/tasks/proxmox.yml b/images/capi/ansible/roles/providers/tasks/proxmox.yml index 8a8eb12757..85759302ec 100644 --- a/images/capi/ansible/roles/providers/tasks/proxmox.yml +++ b/images/capi/ansible/roles/providers/tasks/proxmox.yml @@ -23,7 +23,7 @@ - cloud-guest-utils - cloud-initramfs-copymods - cloud-initramfs-dyn-netconf - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install cloud-init packages ansible.builtin.dnf: @@ -33,21 +33,21 @@ packages: - cloud-init - cloud-utils-growpart - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Disable Hyper-V KVP protocol daemon on Ubuntu ansible.builtin.systemd: name: hv-kvp-daemon state: stopped enabled: false - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Create directory for DHCP chrony server files ansible.builtin.file: path: /var/lib/dhcp state: directory mode: '0755' - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Copy networkd-dispatcher scripts to add DHCP provided NTP servers ansible.builtin.template: @@ -61,16 +61,16 @@ - { src: files/etc/networkd-dispatcher/routable.d/20-chrony.j2, dest: /etc/networkd-dispatcher/routable.d/20-chrony } - { src: files/etc/networkd-dispatcher/off.d/20-chrony.j2, dest: /etc/networkd-dispatcher/off.d/20-chrony } - { src: files/etc/networkd-dispatcher/no-carrier.d/20-chrony.j2, dest: /etc/networkd-dispatcher/no-carrier.d/20-chrony } - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Ensure networkd-dispatcher is started ansible.builtin.systemd: name: networkd-dispatcher state: started - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Ensure networkd-dispatcher is enabled ansible.builtin.systemd: name: networkd-dispatcher enabled: true - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/qemu.yml b/images/capi/ansible/roles/providers/tasks/qemu.yml index a10d725a73..173d64a16a 100644 --- a/images/capi/ansible/roles/providers/tasks/qemu.yml +++ b/images/capi/ansible/roles/providers/tasks/qemu.yml @@ -23,7 +23,7 @@ - cloud-guest-utils - cloud-initramfs-copymods - cloud-initramfs-dyn-netconf - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install cloud-init packages ansible.builtin.dnf: @@ -33,21 +33,21 @@ packages: - cloud-init - cloud-utils-growpart - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Disable Hyper-V KVP protocol daemon on Ubuntu ansible.builtin.systemd: name: hv-kvp-daemon state: stopped enabled: false - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Create directory for DHCP chrony server files ansible.builtin.file: path: /var/lib/dhcp state: directory mode: '0755' - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Copy networkd-dispatcher scripts to add DHCP provided NTP servers ansible.builtin.template: @@ -61,4 +61,4 @@ - { src: files/etc/networkd-dispatcher/routable.d/20-chrony.j2, dest: /etc/networkd-dispatcher/routable.d/20-chrony } - { src: files/etc/networkd-dispatcher/off.d/20-chrony.j2, dest: /etc/networkd-dispatcher/off.d/20-chrony } - { src: files/etc/networkd-dispatcher/no-carrier.d/20-chrony.j2, dest: /etc/networkd-dispatcher/no-carrier.d/20-chrony } - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/raw.yml b/images/capi/ansible/roles/providers/tasks/raw.yml index 26094b3346..945370e134 100644 --- a/images/capi/ansible/roles/providers/tasks/raw.yml +++ b/images/capi/ansible/roles/providers/tasks/raw.yml @@ -23,7 +23,7 @@ - cloud-guest-utils - cloud-initramfs-copymods - cloud-initramfs-dyn-netconf - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Install cloud-init packages ansible.builtin.dnf: @@ -33,11 +33,11 @@ packages: - cloud-init - cloud-utils-growpart - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Run dracut cmd to regenerate initramfs with all drivers - needed when converting to different hypervisor templates ansible.builtin.command: dracut --force --no-hostonly - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Symlink /usr/libexec/cloud-init to /usr/lib/cloud-init ansible.builtin.file: @@ -45,11 +45,11 @@ dest: /usr/lib/cloud-init mode: "0777" state: link - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Disable Hyper-V KVP protocol daemon on Ubuntu ansible.builtin.systemd: name: hv-kvp-daemon state: stopped enabled: false - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/scaleway.yml b/images/capi/ansible/roles/providers/tasks/scaleway.yml index 29cde0cc60..8ceb1e9c72 100644 --- a/images/capi/ansible/roles/providers/tasks/scaleway.yml +++ b/images/capi/ansible/roles/providers/tasks/scaleway.yml @@ -17,4 +17,4 @@ name: hv-kvp-daemon state: stopped enabled: false - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/providers/tasks/vmware-redhat.yml b/images/capi/ansible/roles/providers/tasks/vmware-redhat.yml index 82146ddb72..1757c6f5ca 100644 --- a/images/capi/ansible/roles/providers/tasks/vmware-redhat.yml +++ b/images/capi/ansible/roles/providers/tasks/vmware-redhat.yml @@ -37,7 +37,7 @@ vars: packages: - python2-pip - when: ansible_distribution_major_version|int <= 8 + when: ansible_facts['distribution_major_version']|int <= 8 # pip on CentOS needs to be upgraded, but since it's still # Python 2.7, need < 21.0 @@ -45,7 +45,7 @@ ansible.builtin.pip: name: pip<21.0 state: forcereinstall - when: ansible_distribution_major_version == '7' + when: ansible_facts['distribution_major_version'] == '7' # Directly installing Guestinfo datasource is needed so long as # cloud-init is < 21.3 diff --git a/images/capi/ansible/roles/providers/tasks/vmware-ubuntu.yml b/images/capi/ansible/roles/providers/tasks/vmware-ubuntu.yml index c5d38e2051..b63187e202 100644 --- a/images/capi/ansible/roles/providers/tasks/vmware-ubuntu.yml +++ b/images/capi/ansible/roles/providers/tasks/vmware-ubuntu.yml @@ -55,7 +55,7 @@ mode: a+x vars: server_dir: /var/lib/dhcp - chrony_helper_dir: "{{ '/usr/libexec/chrony' if ansible_distribution_version is version('22.04', '>=') else '/usr/lib/chrony' }}" + chrony_helper_dir: "{{ '/usr/libexec/chrony' if ansible_facts['distribution_version'] is version('22.04', '>=') else '/usr/lib/chrony' }}" loop: - { src: files/etc/networkd-dispatcher/routable.d/20-chrony.j2, dest: /etc/networkd-dispatcher/routable.d/20-chrony } - { src: files/etc/networkd-dispatcher/off.d/20-chrony.j2, dest: /etc/networkd-dispatcher/off.d/20-chrony } @@ -68,4 +68,4 @@ content: | datasource: VMware mode: "0644" - when: ansible_distribution_version is version('22.04', '>=') + when: ansible_facts['distribution_version'] is version('22.04', '>=') diff --git a/images/capi/ansible/roles/providers/tasks/vmware.yml b/images/capi/ansible/roles/providers/tasks/vmware.yml index 40d08b9d61..507b0ed55d 100644 --- a/images/capi/ansible/roles/providers/tasks/vmware.yml +++ b/images/capi/ansible/roles/providers/tasks/vmware.yml @@ -14,15 +14,15 @@ --- - name: Include VMware Photon tasks ansible.builtin.include_tasks: vmware-photon.yml - when: ansible_os_family == "VMware Photon OS" + when: ansible_facts['os_family'] == "VMware Photon OS" - name: Include VMware Ubuntu tasks ansible.builtin.include_tasks: vmware-ubuntu.yml - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Include VMware RedHat tasks ansible.builtin.include_tasks: vmware-redhat.yml - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Create provider vmtools config drop-in file ansible.builtin.copy: @@ -31,7 +31,7 @@ owner: root group: root mode: "0644" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Create service to modify cloud-init config ansible.builtin.copy: @@ -40,7 +40,7 @@ owner: root group: root mode: "0644" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Copy cloud-init modification script ansible.builtin.copy: @@ -49,7 +49,7 @@ owner: root group: root mode: "0755" - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Enable modify-cloud-init-cfg.service ansible.builtin.systemd: @@ -57,4 +57,4 @@ daemon_reload: true enabled: true state: stopped - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" diff --git a/images/capi/ansible/roles/python/tasks/main.yml b/images/capi/ansible/roles/python/tasks/main.yml index 973da587ea..b112b67de4 100644 --- a/images/capi/ansible/roles/python/tasks/main.yml +++ b/images/capi/ansible/roles/python/tasks/main.yml @@ -18,6 +18,6 @@ - name: Include Flatcar Python tasks ansible.builtin.include_tasks: flatcar.yml - # We can't use ansible_os_family fact here for consistency, as facts gathering + # We can't use ansible_facts['os_family'] fact here for consistency, as facts gathering # is disabled in the playbook which includes this role. See playbook for more details. when: distrib_id.stdout_lines[0] is search("Flatcar") diff --git a/images/capi/ansible/roles/security/tasks/falco.yml b/images/capi/ansible/roles/security/tasks/falco.yml index ecb4aa8366..b3c129b6e2 100644 --- a/images/capi/ansible/roles/security/tasks/falco.yml +++ b/images/capi/ansible/roles/security/tasks/falco.yml @@ -15,7 +15,7 @@ --- - name: Install Falco on Debian based systems - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" block: - name: Add Falco package signing key ansible.builtin.apt_key: @@ -33,7 +33,7 @@ pkg: - dkms - make - - "linux-headers-{{ ansible_kernel }}" + - "linux-headers-{{ ansible_facts['kernel'] }}" - clang - llvm update_cache: true @@ -43,7 +43,7 @@ until: pkg_result is success - name: Install Falco on RedHat based systems - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" block: - name: Add Falco YUM repo ansible.builtin.yum_repository: @@ -60,7 +60,7 @@ pkg: - dkms - make - - "kernel-devel-{{ ansible_kernel }}" + - "kernel-devel-{{ ansible_facts['kernel'] }}" - clang - llvm - dialog @@ -73,11 +73,11 @@ ansible.builtin.package: name: falco state: present - when: ansible_os_family == "Debian" or ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "Debian" or ansible_facts['os_family'] == "RedHat" - name: Enable Falco Modern eBPF ansible.builtin.service: name: falco-modern-bpf state: started enabled: true - when: ansible_os_family == "Debian" or ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "Debian" or ansible_facts['os_family'] == "RedHat" diff --git a/images/capi/ansible/roles/security/tasks/trivy.yml b/images/capi/ansible/roles/security/tasks/trivy.yml index 72a22ba810..28ae28037e 100644 --- a/images/capi/ansible/roles/security/tasks/trivy.yml +++ b/images/capi/ansible/roles/security/tasks/trivy.yml @@ -15,7 +15,7 @@ --- - name: Install Trivy on Debian based systems - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" block: - name: Add Trivy package signing key ansible.builtin.apt_key: @@ -24,19 +24,19 @@ - name: Add Trivy apt repo ansible.builtin.apt_repository: - repo: "deb https://aquasecurity.github.io/trivy-repo/deb {{ ansible_distribution_release }} main" + repo: "deb https://aquasecurity.github.io/trivy-repo/deb {{ ansible_facts['distribution_release'] }} main" state: present filename: trivy - name: Install Trivy on RedHat based systems - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" block: - name: Add Trivy rpm repo ansible.builtin.yum_repository: name: Trivy repository description: Trivy YUM repo file: trivy - baseurl: https://aquasecurity.github.io/trivy-repo/rpm/releases/{{ ansible_distribution_release }}/{{ ansible_architecture }}/ + baseurl: https://aquasecurity.github.io/trivy-repo/rpm/releases/{{ ansible_facts['distribution_release'] }}/{{ ansible_facts['architecture'] }}/ gpgcheck: true enabled: true gpgkey: https://aquasecurity.github.io/trivy-repo/rpm/public.keyy @@ -46,10 +46,10 @@ name: trivy update_cache: true state: present - when: ansible_os_family == "Debian" or ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "Debian" or ansible_facts['os_family'] == "RedHat" - name: Update Trivy DB to ensure latest records are available as of now ansible.builtin.command: trivy rootfs --download-db-only args: creates: ~/.cache/trivy/db/trivy.db - when: ansible_os_family == "Debian" or ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "Debian" or ansible_facts['os_family'] == "RedHat" diff --git a/images/capi/ansible/roles/setup/defaults/main.yml b/images/capi/ansible/roles/setup/defaults/main.yml index 8f5a40710d..9a7dab0f5f 100644 --- a/images/capi/ansible/roles/setup/defaults/main.yml +++ b/images/capi/ansible/roles/setup/defaults/main.yml @@ -23,6 +23,6 @@ ubuntu_repo: "http://us.archive.ubuntu.com/ubuntu/" ubuntu_security_repo: "http://security.ubuntu.com/ubuntu/" disable_public_repos: false -external_binary_path: "{{ '/opt/bin' if ansible_os_family == 'Flatcar' else '/usr/local/bin' }}" +external_binary_path: "{{ '/opt/bin' if ansible_facts['os_family'] == 'Flatcar' else '/usr/local/bin' }}" extra_repos: "" pip_conf_file: "" diff --git a/images/capi/ansible/roles/setup/tasks/bootstrap-flatcar.yml b/images/capi/ansible/roles/setup/tasks/bootstrap-flatcar.yml index c070dcc9e9..8358f8dfcd 100644 --- a/images/capi/ansible/roles/setup/tasks/bootstrap-flatcar.yml +++ b/images/capi/ansible/roles/setup/tasks/bootstrap-flatcar.yml @@ -23,6 +23,6 @@ - name: Override Flatcar's OS family ansible.builtin.set_fact: ansible_os_family: Flatcar - when: ansible_os_family == "Flatcar Container Linux by Kinvolk" + when: ansible_facts['os_family'] == "Flatcar Container Linux by Kinvolk" tags: - facts diff --git a/images/capi/ansible/roles/setup/tasks/debian.yml b/images/capi/ansible/roles/setup/tasks/debian.yml index 5c07585c5e..f860fd09d4 100644 --- a/images/capi/ansible/roles/setup/tasks/debian.yml +++ b/images/capi/ansible/roles/setup/tasks/debian.yml @@ -21,7 +21,10 @@ # from this repo leads to build failures(especially in Arm), hence ignoring the step. # Ubuntu 24.04 has changed to deb822 source management # As a result the there is change in format source configurations and location - when: (packer_builder_type != "oracle-oci") and ((ansible_distribution == "Ubuntu") and (ansible_distribution_major_version is version('24', '<'))) + when: > + (packer_builder_type != "oracle-oci") and + ((ansible_facts['distribution'] == "Ubuntu") and + (ansible_facts['distribution_major_version'] is version('24', '<'))) - name: Put templated ubuntu.sources in place ansible.builtin.template: @@ -30,7 +33,7 @@ mode: "0644" # Ubuntu 24.04 has changed to deb822 source management # As a result the there is change in format source configurations and location - when: (ansible_distribution == "Ubuntu") and (ansible_distribution_major_version is version('24', '>=')) + when: (ansible_facts['distribution'] == "Ubuntu") and (ansible_facts['distribution_major_version'] is version('24', '>=')) - name: Put templated apt.conf.d/90proxy in place when defined ansible.builtin.template: diff --git a/images/capi/ansible/roles/setup/tasks/main.yml b/images/capi/ansible/roles/setup/tasks/main.yml index f73013f77f..71fd46752c 100644 --- a/images/capi/ansible/roles/setup/tasks/main.yml +++ b/images/capi/ansible/roles/setup/tasks/main.yml @@ -14,26 +14,26 @@ --- - name: Import Debian setup tasks ansible.builtin.import_tasks: debian.yml - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Import Flatcar setup tasks ansible.builtin.import_tasks: flatcar.yml - # This task overrides ansible_os_family to "Flatcar" as a workaround for + # This task overrides ansible_facts['os_family'] to "Flatcar" as a workaround for # regression between Flatcar and Ansible, so rest of the code can use just # "Flatcar" for comparison, which is the correct value. - when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + when: ansible_facts['os_family'] in ["Flatcar", "Flatcar Container Linux by Kinvolk"] - name: Import Azure Linux setup tasks ansible.builtin.import_tasks: azurelinux.yml - when: ansible_os_family in ["Common Base Linux Mariner", "Microsoft Azure Linux"] + when: ansible_facts['os_family'] in ["Common Base Linux Mariner", "Microsoft Azure Linux"] - name: Import RedHat setup tasks ansible.builtin.import_tasks: redhat.yml - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Import Photon setup tasks ansible.builtin.import_tasks: photon.yml - when: ansible_os_family == "VMware Photon OS" + when: ansible_facts['os_family'] == "VMware Photon OS" # Copy in pip config file when defined - name: Install pip config file diff --git a/images/capi/ansible/roles/setup/tasks/redhat.yml b/images/capi/ansible/roles/setup/tasks/redhat.yml index 0ceb1599cc..6bf171aa2b 100644 --- a/images/capi/ansible/roles/setup/tasks/redhat.yml +++ b/images/capi/ansible/roles/setup/tasks/redhat.yml @@ -19,7 +19,7 @@ password: "{{ lookup('env', 'RHSM_PASS') }}" auto_attach: true when: - - ansible_distribution == "RedHat" + - ansible_facts['distribution'] == "RedHat" - lookup('env', 'RHSM_USER') | length > 0 - lookup('env', 'RHSM_PASS') | length > 0 diff --git a/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.d/ubuntu.sources.j2 b/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.d/ubuntu.sources.j2 index 818a454887..82dad028dd 100644 --- a/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.d/ubuntu.sources.j2 +++ b/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.d/ubuntu.sources.j2 @@ -1,11 +1,11 @@ Types: deb URIs: {{ ubuntu_repo }} -Suites: {{ ansible_distribution_release }} {{ ansible_distribution_release }}-updates {{ ansible_distribution_release }}-backports +Suites: {{ ansible_facts['distribution_release'] }} {{ ansible_facts['distribution_release'] }}-updates {{ ansible_facts['distribution_release'] }}-backports Components: main restricted universe multiverse Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg Types: deb URIs: {{ ubuntu_security_repo }} -Suites: {{ ansible_distribution_release }}-security +Suites: {{ ansible_facts['distribution_release'] }}-security Components: main restricted universe multiverse Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg \ No newline at end of file diff --git a/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.j2 b/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.j2 index eaec06f851..4ba56f1a27 100644 --- a/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.j2 +++ b/images/capi/ansible/roles/setup/templates/etc/apt/sources.list.j2 @@ -1,4 +1,4 @@ -deb {{ ubuntu_repo }} {{ ansible_distribution_release }} main restricted universe -deb {{ ubuntu_repo }} {{ ansible_distribution_release }}-updates main restricted universe -deb {{ ubuntu_repo }} {{ ansible_distribution_release }}-backports main restricted universe -deb {{ ubuntu_security_repo }} {{ ansible_distribution_release }}-security main restricted universe +deb {{ ubuntu_repo }} {{ ansible_facts['distribution_release'] }} main restricted universe +deb {{ ubuntu_repo }} {{ ansible_facts['distribution_release'] }}-updates main restricted universe +deb {{ ubuntu_repo }} {{ ansible_facts['distribution_release'] }}-backports main restricted universe +deb {{ ubuntu_security_repo }} {{ ansible_facts['distribution_release'] }}-security main restricted universe diff --git a/images/capi/ansible/roles/sysprep/tasks/azurelinux.yml b/images/capi/ansible/roles/sysprep/tasks/azurelinux.yml index 2a40f28f03..311121bc35 100644 --- a/images/capi/ansible/roles/sysprep/tasks/azurelinux.yml +++ b/images/capi/ansible/roles/sysprep/tasks/azurelinux.yml @@ -37,7 +37,7 @@ name: swap.target enabled: false masked: true - when: ansible_memory_mb.swap.total != 0 + when: ansible_facts['memory_mb'].swap.total != 0 - name: Remove the kickstart log ansible.builtin.file: diff --git a/images/capi/ansible/roles/sysprep/tasks/debian.yml b/images/capi/ansible/roles/sysprep/tasks/debian.yml index a97472e393..fd3c4163e6 100644 --- a/images/capi/ansible/roles/sysprep/tasks/debian.yml +++ b/images/capi/ansible/roles/sysprep/tasks/debian.yml @@ -71,7 +71,7 @@ ansible.builtin.service: name: rsyslog state: stopped - when: "'rsyslog' in services" + when: "'rsyslog' in ansible_facts.services" - name: Remove apt package caches ansible.builtin.apt: @@ -136,28 +136,28 @@ ansible.builtin.file: path: /etc/cloud/cloud.cfg.d/subiquity-disable-cloudinit-networking.cfg state: absent - when: ansible_distribution_version is version('22.04', '>=') + when: ansible_facts['distribution_version'] is version('22.04', '>=') - name: Removing 99-installer.cfg which sets the cloud-init datasource to None ansible.builtin.file: path: /etc/cloud/cloud.cfg.d/99-installer.cfg state: absent - when: ansible_distribution_version is version('22.04', '>=') + when: ansible_facts['distribution_version'] is version('22.04', '>=') - name: Removing subiquity curtin preserve sources config ansible.builtin.file: path: /etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg state: absent - when: ansible_distribution_version is version('22.04', '>=') + when: ansible_facts['distribution_version'] is version('22.04', '>=') - name: Removing cloud-init ds identify config ansible.builtin.file: path: /etc/cloud/ds-identify.cfg state: absent - when: ansible_distribution_version is version('22.04', '>=') + when: ansible_facts['distribution_version'] is version('22.04', '>=') - name: Removing 90-installer-network.cfg installer network configuration ansible.builtin.file: path: /etc/cloud/cloud.cfg.d/90-installer-network.cfg state: absent - when: ansible_distribution_version is version('22.04', '>=') + when: ansible_facts['distribution_version'] is version('22.04', '>=') diff --git a/images/capi/ansible/roles/sysprep/tasks/main.yml b/images/capi/ansible/roles/sysprep/tasks/main.yml index db3e46a97e..1c7402a1a8 100644 --- a/images/capi/ansible/roles/sysprep/tasks/main.yml +++ b/images/capi/ansible/roles/sysprep/tasks/main.yml @@ -14,23 +14,23 @@ --- - name: Import Debian sysprep tasks ansible.builtin.import_tasks: debian.yml - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Import Flatcar sysprep tasks ansible.builtin.import_tasks: flatcar.yml - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" - name: Import RedHat sysprep tasks ansible.builtin.import_tasks: redhat.yml - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Import Azure Linux sysprep tasks ansible.builtin.import_tasks: azurelinux.yml - when: ansible_os_family in ["Common Base Linux Mariner", "Microsoft Azure Linux"] + when: ansible_facts['os_family'] in ["Common Base Linux Mariner", "Microsoft Azure Linux"] - name: Import Photon sysprep tasks ansible.builtin.import_tasks: photon.yml - when: ansible_os_family == "VMware Photon OS" + when: ansible_facts['os_family'] == "VMware Photon OS" - name: Remove containerd http proxy conf file if needed ansible.builtin.file: @@ -56,7 +56,7 @@ loop: - { path: /etc/machine-id, state: absent, mode: "{{ machine_id_mode }}" } - { path: /etc/machine-id, state: touch, mode: "{{ machine_id_mode }}" } - when: ansible_os_family not in ["Common Base Linux Mariner", "Flatcar", "Microsoft Azure Linuz"] + when: ansible_facts['os_family'] not in ["Common Base Linux Mariner", "Flatcar", "Microsoft Azure Linuz"] - name: Truncate hostname file ansible.builtin.file: @@ -72,7 +72,9 @@ - name: Set hostname ansible.builtin.hostname: name: localhost.local - when: ansible_os_family not in ["Common Base Linux Mariner", "Flatcar", "Microsoft Azure Linux", "VMware Photon OS"] and packer_build_name != "nutanix" + when: > + ansible_facts['os_family'] not in ["Common Base Linux Mariner", "Flatcar", "Microsoft Azure Linux", "VMware Photon OS"] + and packer_build_name != "nutanix" - name: Reset hosts file ansible.builtin.copy: @@ -109,13 +111,13 @@ ansible.builtin.shell: cmd: | cloud-init clean --machine-id - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Reset cloud-init ansible.builtin.shell: cmd: | cloud-init clean - when: ansible_os_family == "RedHat" + when: ansible_facts['os_family'] == "RedHat" - name: Remove cloud-init.disabled ansible.builtin.file: @@ -169,7 +171,7 @@ src: files/etc/netplan/51-kubevirt-netplan.yaml dest: /etc/netplan/51-kubevirt-netplan.yaml mode: "0644" - when: ansible_os_family == "Debian" and kubevirt == "true" + when: ansible_facts['os_family'] == "Debian" and kubevirt == "true" - name: Find SSH host keys ansible.builtin.find: @@ -189,8 +191,8 @@ path: "{{ item.path }}" loop: - { path: /root/.ssh/authorized_keys } - - { path: "/home/{{ ansible_env.SUDO_USER | default(ansible_user_id) }}/.ssh/authorized_keys" } - when: ansible_os_family != "Flatcar" + - { path: "/home/{{ ansible_facts['env'].SUDO_USER | default(ansible_facts['user_id']) }}/.ssh/authorized_keys" } + when: ansible_facts['os_family'] != "Flatcar" - name: Remove SSH authorized users for Flatcar ansible.builtin.file: @@ -198,7 +200,7 @@ path: "{{ item.path }}" loop: - { path: /root/.ssh/authorized_keys } - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" - name: Truncate all remaining log files in /var/log ansible.builtin.shell: | @@ -207,13 +209,13 @@ args: executable: /bin/bash - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Delete all logrotated logs ansible.builtin.shell: cmd: | find /var/log -type f -regex '.*[0-9z]$' -exec rm {} + - when: ansible_os_family != "Flatcar" + when: ansible_facts['os_family'] != "Flatcar" - name: Remove swapfile ansible.builtin.file: @@ -223,7 +225,7 @@ - /swap.img - /swapfile - /mnt/resource/swapfile - when: ansible_memory_mb.swap.total != 0 + when: ansible_facts['memory_mb'].swap.total != 0 - name: Truncate shell history ansible.builtin.file: @@ -231,13 +233,13 @@ path: "{{ item.path }}" loop: - { path: /root/.bash_history } - - { path: "/home/{{ ansible_env.SUDO_USER | default(ansible_user_id) }}/.bash_history" } + - { path: "/home/{{ ansible_facts['env'].SUDO_USER | default(ansible_facts['user_id']) }}/.bash_history" } - name: Rotate journalctl to archive logs ansible.builtin.shell: cmd: | journalctl --rotate - when: not ( ansible_os_family == "RedHat" and ansible_distribution_major_version|int <= 7 ) + when: not ( ansible_facts['os_family'] == "RedHat" and ansible_facts['distribution_major_version']|int <= 7 ) - name: Remove archived journalctl logs ansible.builtin.shell: @@ -251,22 +253,22 @@ owner: root group: root mode: "0644" - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" - name: Remove any default Ignition files used by Packer ansible.builtin.file: state: absent path: /usr/share/oem/config.ign - when: ansible_os_family == "Flatcar" + when: ansible_facts['os_family'] == "Flatcar" - name: Start fstrim ansible.builtin.systemd: name: fstrim.service state: started - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" - name: Start ssh ansible.builtin.systemd: name: ssh enabled: true - when: ansible_os_family == "Debian" + when: ansible_facts['os_family'] == "Debian" diff --git a/images/capi/ansible/roles/sysprep/tasks/redhat.yml b/images/capi/ansible/roles/sysprep/tasks/redhat.yml index fcef2a175a..78ab8670c4 100644 --- a/images/capi/ansible/roles/sysprep/tasks/redhat.yml +++ b/images/capi/ansible/roles/sysprep/tasks/redhat.yml @@ -29,14 +29,14 @@ path: /etc/yum.conf regexp: ^exclude= line: exclude={{ package_list }} - when: ansible_distribution != "Amazon" or ansible_distribution_version != "2023" + when: ansible_facts['distribution'] != "Amazon" or ansible_facts['distribution_version'] != "2023" - name: Exclude packages from upgrade ansible.builtin.lineinfile: path: /etc/dnf/dnf.conf regexp: ^excludepkgs= line: excludepkgs={{ package_list }} - when: ansible_distribution == "Amazon" and ansible_distribution_version == "2023" + when: ansible_facts['distribution'] == "Amazon" and ansible_facts['distribution_version'] == "2023" - name: Import RPM repository tasks ansible.builtin.import_tasks: rpm_repos.yml @@ -44,7 +44,7 @@ - name: Remove RHEL subscription when: - - ansible_distribution == "RedHat" + - ansible_facts['distribution'] == "RedHat" - lookup('env', 'RHSM_USER') | length > 0 - lookup('env', 'RHSM_PASS') | length > 0 block: @@ -72,15 +72,15 @@ ansible.builtin.shell: | set -o pipefail sed -i '/^\(HWADDR\|UUID\)=/d' /etc/sysconfig/network-scripts/ifcfg-* - when: packer_builder_type != "googlecompute" and ansible_distribution_major_version|int != 9 + when: packer_builder_type != "googlecompute" and ansible_facts['distribution_major_version']|int != 9 - name: Migrate interface configuration files to NetworkManager keyfiles ansible.builtin.command: nmcli connection migrate - when: packer_builder_type != "googlecompute" and ansible_distribution_major_version|int == 9 + when: packer_builder_type != "googlecompute" and ansible_facts['distribution_major_version']|int == 9 - name: Reset network interface IDs ansible.builtin.shell: sed -i '/^\(uuid\)=/d' /etc/NetworkManager/system-connections/*.nmconnection - when: packer_builder_type != "googlecompute" and ansible_distribution_major_version|int == 9 + when: packer_builder_type != "googlecompute" and ansible_facts['distribution_major_version']|int == 9 - name: Remove the kickstart log ansible.builtin.file: From 3cdeadc49551d39fd71c8936c958fec384d3a1be Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Tue, 21 Apr 2026 09:42:04 -0600 Subject: [PATCH 84/90] Pin GitHub Actions to SHAs and add dependabot config Pin all third-party GitHub Actions in build-azure-sig.yaml to their commit SHAs to comply with the Kubernetes org policy requiring pinned actions. The version tag is preserved as a trailing comment for readability. Add a dependabot.yml configuration to keep the pinned actions up to date automatically via weekly grouped pull requests. --- .github/dependabot.yml | 24 +++++++++++++++++++++ .github/workflows/build-azure-sig.yaml | 30 +++++++++++++------------- 2 files changed, 39 insertions(+), 15 deletions(-) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..0abcead57b --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,24 @@ +# Please see the documentation for all configuration options: https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + +# github-actions +- directory: "/" + package-ecosystem: "github-actions" + open-pull-requests-limit: 5 + schedule: + interval: "weekly" + time: "09:00" + # Use America/New_York Standard Time (UTC -05:00) + timezone: "America/New_York" + groups: + all-github-actions: + patterns: [ "*" ] + commit-message: + prefix: "dependabot" + include: scope + labels: + - "ok-to-test" + - "kind/cleanup" + - "release-note-none" diff --git a/.github/workflows/build-azure-sig.yaml b/.github/workflows/build-azure-sig.yaml index 078a98cfd1..374e032a55 100644 --- a/.github/workflows/build-azure-sig.yaml +++ b/.github/workflows/build-azure-sig.yaml @@ -132,7 +132,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - name: Configure Kubernetes version uses: ./.github/actions/configure-k8s-version @@ -162,14 +162,14 @@ jobs: echo "kube-proxy image ${IMAGE}:${TAG} exists" - name: Azure Login - uses: azure/login@v2 + uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2 with: client-id: ${{ secrets.AZURE_CLIENT_ID }} tenant-id: ${{ secrets.AZURE_TENANT_ID }} subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} - name: Setup Python - uses: actions/setup-python@v5 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 with: python-version: '3.12' @@ -262,7 +262,7 @@ jobs: echo "${PUBLISHING_INFO}" > packer/azure/sig-publishing-info.json - name: Upload publishing info artifact - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: publishing-info path: images/capi/packer/azure/sig-publishing-info.json @@ -287,10 +287,10 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - name: Download publishing info artifact - uses: actions/download-artifact@v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 with: name: publishing-info path: images/capi/packer/azure/sig/ @@ -314,7 +314,7 @@ jobs: echo "CONTAINERD_VERSION=${CONTAINERD_VERSION}" >> $GITHUB_OUTPUT - name: Azure Login - uses: azure/login@v2 + uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2 with: client-id: ${{ secrets.AZURE_CLIENT_ID }} tenant-id: ${{ secrets.AZURE_TENANT_ID }} @@ -598,7 +598,7 @@ jobs: - name: Upload diagnostics if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: test-diagnostics path: _artifacts/ @@ -646,10 +646,10 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - name: Download publishing info artifact - uses: actions/download-artifact@v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 with: name: publishing-info path: images/capi/packer/azure/sig/ @@ -677,7 +677,7 @@ jobs: echo "TAGS=$(echo "$PUBLISHING_INFO" | jq -r .tags)" >> $GITHUB_OUTPUT - name: Azure Login - uses: azure/login@v2 + uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2 with: client-id: ${{ secrets.AZURE_CLIENT_ID }} tenant-id: ${{ secrets.AZURE_TENANT_ID }} @@ -778,7 +778,7 @@ jobs: | tee sig-publishing.json - name: Upload publishing artifact - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: sig-publishing path: images/capi/sig-publishing.json @@ -799,10 +799,10 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - name: Download publishing info artifact - uses: actions/download-artifact@v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 with: name: publishing-info path: images/capi/packer/azure/sig/ @@ -820,7 +820,7 @@ jobs: echo "SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_image_version)" >> $GITHUB_OUTPUT - name: Azure Login - uses: azure/login@v2 + uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2 with: client-id: ${{ secrets.AZURE_CLIENT_ID }} tenant-id: ${{ secrets.AZURE_TENANT_ID }} From 577469d3b698b4fb4b6629109c89a9d5ff40f34e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Apr 2026 20:04:48 +0000 Subject: [PATCH 85/90] dependabot(deps): bump the all-github-actions group with 5 updates Bumps the all-github-actions group with 5 updates: | Package | From | To | | --- | --- | --- | | [actions/checkout](https://github.com/actions/checkout) | `4.3.1` | `6.0.2` | | [azure/login](https://github.com/azure/login) | `2.3.0` | `3.0.0` | | [actions/setup-python](https://github.com/actions/setup-python) | `5.6.0` | `6.2.0` | | [actions/upload-artifact](https://github.com/actions/upload-artifact) | `4.6.2` | `7.0.1` | | [actions/download-artifact](https://github.com/actions/download-artifact) | `4.3.0` | `8.0.1` | Updates `actions/checkout` from 4.3.1 to 6.0.2 - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/34e114876b0b11c390a56381ad16ebd13914f8d5...de0fac2e4500dabe0009e67214ff5f5447ce83dd) Updates `azure/login` from 2.3.0 to 3.0.0 - [Release notes](https://github.com/azure/login/releases) - [Commits](https://github.com/azure/login/compare/a457da9ea143d694b1b9c7c869ebb04ebe844ef5...532459ea530d8321f2fb9bb10d1e0bcf23869a43) Updates `actions/setup-python` from 5.6.0 to 6.2.0 - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/a26af69be951a213d495a4c3e4e4022e16d87065...a309ff8b426b58ec0e2a45f0f869d46889d02405) Updates `actions/upload-artifact` from 4.6.2 to 7.0.1 - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/ea165f8d65b6e75b540449e92b4886f43607fa02...043fb46d1a93c77aae656e7c1c64a875d1fc6a0a) Updates `actions/download-artifact` from 4.3.0 to 8.0.1 - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/d3f86a106a0bac45b974a628896c90dbdf5c8093...3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 6.0.2 dependency-type: direct:production update-type: version-update:semver-major dependency-group: all-github-actions - dependency-name: azure/login dependency-version: 3.0.0 dependency-type: direct:production update-type: version-update:semver-major dependency-group: all-github-actions - dependency-name: actions/setup-python dependency-version: 6.2.0 dependency-type: direct:production update-type: version-update:semver-major dependency-group: all-github-actions - dependency-name: actions/upload-artifact dependency-version: 7.0.1 dependency-type: direct:production update-type: version-update:semver-major dependency-group: all-github-actions - dependency-name: actions/download-artifact dependency-version: 8.0.1 dependency-type: direct:production update-type: version-update:semver-major dependency-group: all-github-actions ... Signed-off-by: dependabot[bot] --- .github/workflows/build-azure-sig.yaml | 30 +++++++++++++------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/.github/workflows/build-azure-sig.yaml b/.github/workflows/build-azure-sig.yaml index 374e032a55..1c86351907 100644 --- a/.github/workflows/build-azure-sig.yaml +++ b/.github/workflows/build-azure-sig.yaml @@ -132,7 +132,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Configure Kubernetes version uses: ./.github/actions/configure-k8s-version @@ -162,14 +162,14 @@ jobs: echo "kube-proxy image ${IMAGE}:${TAG} exists" - name: Azure Login - uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2 + uses: azure/login@532459ea530d8321f2fb9bb10d1e0bcf23869a43 # v3.0.0 with: client-id: ${{ secrets.AZURE_CLIENT_ID }} tenant-id: ${{ secrets.AZURE_TENANT_ID }} subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} - name: Setup Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' @@ -262,7 +262,7 @@ jobs: echo "${PUBLISHING_INFO}" > packer/azure/sig-publishing-info.json - name: Upload publishing info artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: publishing-info path: images/capi/packer/azure/sig-publishing-info.json @@ -287,10 +287,10 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Download publishing info artifact - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 + uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 with: name: publishing-info path: images/capi/packer/azure/sig/ @@ -314,7 +314,7 @@ jobs: echo "CONTAINERD_VERSION=${CONTAINERD_VERSION}" >> $GITHUB_OUTPUT - name: Azure Login - uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2 + uses: azure/login@532459ea530d8321f2fb9bb10d1e0bcf23869a43 # v3.0.0 with: client-id: ${{ secrets.AZURE_CLIENT_ID }} tenant-id: ${{ secrets.AZURE_TENANT_ID }} @@ -598,7 +598,7 @@ jobs: - name: Upload diagnostics if: failure() - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: test-diagnostics path: _artifacts/ @@ -646,10 +646,10 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Download publishing info artifact - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 + uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 with: name: publishing-info path: images/capi/packer/azure/sig/ @@ -677,7 +677,7 @@ jobs: echo "TAGS=$(echo "$PUBLISHING_INFO" | jq -r .tags)" >> $GITHUB_OUTPUT - name: Azure Login - uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2 + uses: azure/login@532459ea530d8321f2fb9bb10d1e0bcf23869a43 # v3.0.0 with: client-id: ${{ secrets.AZURE_CLIENT_ID }} tenant-id: ${{ secrets.AZURE_TENANT_ID }} @@ -778,7 +778,7 @@ jobs: | tee sig-publishing.json - name: Upload publishing artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: sig-publishing path: images/capi/sig-publishing.json @@ -799,10 +799,10 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Download publishing info artifact - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 + uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 with: name: publishing-info path: images/capi/packer/azure/sig/ @@ -820,7 +820,7 @@ jobs: echo "SHARED_IMAGE_GALLERY_IMAGE_VERSION=$(echo "$PUBLISHING_INFO" | jq -r .shared_image_gallery_image_version)" >> $GITHUB_OUTPUT - name: Azure Login - uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2 + uses: azure/login@532459ea530d8321f2fb9bb10d1e0bcf23869a43 # v3.0.0 with: client-id: ${{ secrets.AZURE_CLIENT_ID }} tenant-id: ${{ secrets.AZURE_TENANT_ID }} From 2efcb578bd68d4f0c434ae82b71bbdd578da20ba Mon Sep 17 00:00:00 2001 From: zylxjtu Date: Tue, 21 Apr 2026 22:48:26 +0000 Subject: [PATCH 86/90] Update pause image to support windows 2025 --- docs/book/src/capi/capi.md | 2 +- images/capi/ansible/roles/node/defaults/main.yml | 2 +- images/capi/ansible/windows/example.vars.yml | 2 +- images/capi/ansible/windows/roles/runtimes/defaults/main.yml | 2 +- images/capi/packer/config/common.json | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/book/src/capi/capi.md b/docs/book/src/capi/capi.md index 6fd42127c1..1a4f2f1569 100644 --- a/docs/book/src/capi/capi.md +++ b/docs/book/src/capi/capi.md @@ -74,7 +74,7 @@ Several variables can be used to customize the image build. | `no_proxy` | This can be set to a comma-delimited list of domains that should be excluded from proxying during the Ansible stage of building | `""` | | `reenable_public_repos` | If set to `"false"`, the package repositories disabled by setting `disable_public_repos` will remain disabled at the end of the build. | `"true"` | | `remove_extra_repos` | If set to `"true"`, the package repositories added to the OS through the use of `extra_repos` will be removed at the end of the build. | `"false"` | -| `pause_image` | This can be used to override the default pause image used to hold the network namespace and IP for the pod. | `"registry.k8s.io/pause:3.10"` | +| `pause_image` | This can be used to override the default pause image used to hold the network namespace and IP for the pod. | `"registry.k8s.io/pause:3.10.2"` | | `pip_conf_file` | The path to a file to be copied into the image at `/etc/pip.conf` for use as a global config file. This file will be removed at the end of the build if `remove_extra_repos` is `true`. | `""` | | `containerd_additional_settings` | This is a string, base64 encoded, that contains additional configuration for containerd. Version 2 and 3 are supported, please use the appropriate version based on your containerd version. It must not contain the pause image configuration block. See `image-builder/images/capi/ansible/roles/containerd/templates/etc/containerd/config.toml` for the template. | `null` | | `load_additional_components` | If set to `"true"`, the `load_additional_components` role will be executed. This needs to be set to `"true"` if any of `additional_url_images`, `additional_registry_images` or `additional_executables` are set to `"true"` | `"false"` | diff --git a/images/capi/ansible/roles/node/defaults/main.yml b/images/capi/ansible/roles/node/defaults/main.yml index 06bd1eb949..789af2027e 100644 --- a/images/capi/ansible/roles/node/defaults/main.yml +++ b/images/capi/ansible/roles/node/defaults/main.yml @@ -122,7 +122,7 @@ sysctl_conf_file: >- {{ '/etc/sysctl.d/99-sysctl.conf' if ansible_facts['os_family'] in ['Common Base Linux Mariner', 'Flatcar', 'Microsoft Azure Linux', 'VMware Photon OS'] else '/etc/sysctl.conf' }} -pause_image: registry.k8s.io/pause:3.10 +pause_image: registry.k8s.io/pause:3.10.2 containerd_additional_settings: leak_local_mdns_to_dns: false build_target: virt diff --git a/images/capi/ansible/windows/example.vars.yml b/images/capi/ansible/windows/example.vars.yml index 1c63e24901..397669fb82 100644 --- a/images/capi/ansible/windows/example.vars.yml +++ b/images/capi/ansible/windows/example.vars.yml @@ -20,7 +20,7 @@ gmsa_keyvault_url: https://kubernetesartifacts.azureedge.net/ccgakvplugin/v1.1.4 runtime: containerd kubernetes_install_path: c:\k windows_service_manager: nssm -pause_image: registry.k8s.io/pause:3.10 +pause_image: registry.k8s.io/pause:3.10.2 load_additional_components: true additional_registry_images: true additional_registry_images_list: sigwindowstools/kube-proxy:v1.28.4-calico-hostprocess, sigwindowstools/calico-node:v3.26.1-hostprocess diff --git a/images/capi/ansible/windows/roles/runtimes/defaults/main.yml b/images/capi/ansible/windows/roles/runtimes/defaults/main.yml index e5056ef2ac..407480bdb9 100644 --- a/images/capi/ansible/windows/roles/runtimes/defaults/main.yml +++ b/images/capi/ansible/windows/roles/runtimes/defaults/main.yml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -pause_image: mcr.microsoft.com/oss/kubernetes/pause:3.10 +pause_image: mcr.microsoft.com/oss/v2/kubernetes/pause:3.10.1-3 containerd_additional_settings: "" containerd_config_file: config.toml diff --git a/images/capi/packer/config/common.json b/images/capi/packer/config/common.json index 1b2e649b30..f8224ea962 100644 --- a/images/capi/packer/config/common.json +++ b/images/capi/packer/config/common.json @@ -15,7 +15,7 @@ "node_custom_roles_post": "", "node_custom_roles_post_sysprep": "", "node_custom_roles_pre": "", - "pause_image": "registry.k8s.io/pause:3.10", + "pause_image": "registry.k8s.io/pause:3.10.2", "pip_conf_file": "", "redhat_epel_rpm": "https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm", "reenable_public_repos": "true", From e6ee1fefa3e69b438a027c5088e7108ced20e051 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nick=20J=C3=BCttner?= Date: Wed, 29 Apr 2026 12:13:17 +0200 Subject: [PATCH 87/90] fix: create /etc/sysctl.d directory for Flatcar Flatcar Stable 4593.2.0+ ships without /etc/sysctl.d/ pre-created (part of the broader filesystem reshuffle in that release). The node role's 'Set and persist kernel params' task writes to /etc/sysctl.d/99-sysctl.conf via ansible.posix.sysctl, which uses mkstemp in the parent directory and fails with FileNotFoundError when the directory is missing. Add a file task in the setup role's flatcar.yml to ensure the directory exists before any sysctl tasks run. --- images/capi/ansible/roles/setup/tasks/flatcar.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/images/capi/ansible/roles/setup/tasks/flatcar.yml b/images/capi/ansible/roles/setup/tasks/flatcar.yml index 019783590e..08b71fd724 100644 --- a/images/capi/ansible/roles/setup/tasks/flatcar.yml +++ b/images/capi/ansible/roles/setup/tasks/flatcar.yml @@ -21,6 +21,16 @@ state: directory mode: "0755" +# Flatcar Stable 4593.2.0+ ships without /etc/sysctl.d/ pre-created. The +# node role's sysctl tasks write to /etc/sysctl.d/99-sysctl.conf via the +# ansible.posix.sysctl module, which uses mkstemp in the parent directory +# and fails with FileNotFoundError when the directory is missing. +- name: Create /etc/sysctl.d directory + ansible.builtin.file: + path: /etc/sysctl.d + state: directory + mode: "0755" + - name: Add env generator that includes system PATH on service path ansible.builtin.copy: src: etc/systemd/system-environment-generators/10-flatcar-path From 90cb9f625e9aac5fcf83c210fd7a50f6577b5e32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nick=20J=C3=BCttner?= Date: Wed, 29 Apr 2026 12:14:34 +0200 Subject: [PATCH 88/90] fix: bump Flatcar AMI volume_size to 15 GB Flatcar Stable 4593.2.0 increased baseline partition sizes (/boot to 1 GB, both /usr partitions to 2 GB, /oem to 1 GB), which pushes the source AMI snapshot above the global default volume_size of 8 GB defined in packer/ami/packer.json. EC2 rejects launch with: InvalidBlockDeviceMapping: Volume of size 8GB is smaller than snapshot, expect size >= 13GB Override volume_size to 15 in the Flatcar AMI configs so the launch instance has headroom for the new layout. --- images/capi/packer/ami/flatcar-arm64.json | 3 ++- images/capi/packer/ami/flatcar.json | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/images/capi/packer/ami/flatcar-arm64.json b/images/capi/packer/ami/flatcar-arm64.json index 1fc11c3c92..76f3823073 100644 --- a/images/capi/packer/ami/flatcar-arm64.json +++ b/images/capi/packer/ami/flatcar-arm64.json @@ -17,5 +17,6 @@ "sysusr_prefix": "/opt", "sysusrlocal_prefix": "/opt", "user_data": "", - "user_data_file": "packer/files/flatcar/ignition/bootstrap-cloud.json" + "user_data_file": "packer/files/flatcar/ignition/bootstrap-cloud.json", + "volume_size": "15" } diff --git a/images/capi/packer/ami/flatcar.json b/images/capi/packer/ami/flatcar.json index 7a7aa51a9e..71f6c56fdf 100644 --- a/images/capi/packer/ami/flatcar.json +++ b/images/capi/packer/ami/flatcar.json @@ -15,5 +15,6 @@ "sysusr_prefix": "/opt", "sysusrlocal_prefix": "/opt", "user_data": "", - "user_data_file": "packer/files/flatcar/ignition/bootstrap-cloud.json" + "user_data_file": "packer/files/flatcar/ignition/bootstrap-cloud.json", + "volume_size": "15" } From 6517d9e4ad86a5cc2e99d7a2fda535ce70496ba6 Mon Sep 17 00:00:00 2001 From: Drew Hudson-Viles Date: Tue, 5 May 2026 16:38:01 +0100 Subject: [PATCH 89/90] feat(containerd): adding ability to override the default image_pull_progress_timeout for containerd. Defaults to containerd defaults --- .../src/capi/containerd/customizing-containerd.md | 11 +++++++++++ .../templates/etc/containerd/config.toml | 4 ++++ .../windows/roles/runtimes/templates/config.toml | 4 ++++ images/capi/packer/ami/packer-windows.json | 2 ++ images/capi/packer/ami/packer.json | 2 ++ images/capi/packer/azure/packer-windows.json | 2 ++ images/capi/packer/azure/packer.json | 2 ++ images/capi/packer/config/ansible-args.json | 2 +- images/capi/packer/config/containerd.json | 1 + images/capi/packer/config/ppc64le/containerd.json | 1 + .../config/windows/ansible-args-windows.json | 2 +- images/capi/packer/config/windows/containerd.json | 1 + images/capi/packer/digitalocean/packer.json | 1 + images/capi/packer/gce/packer.json | 2 ++ images/capi/packer/goss/goss-command.yaml | 15 +++++++++++++++ images/capi/packer/goss/goss-vars.yaml | 1 + images/capi/packer/hcloud/packer-flatcar.json | 2 ++ images/capi/packer/hcloud/packer.json | 2 ++ images/capi/packer/huaweicloud/packer.json | 2 ++ images/capi/packer/maas/packer.json.tmpl | 2 ++ images/capi/packer/nutanix/packer-windows.json | 1 + images/capi/packer/nutanix/packer.json.tmpl | 2 ++ images/capi/packer/oci/packer-windows.json | 2 ++ images/capi/packer/oci/packer.json | 2 ++ images/capi/packer/openstack/packer.json | 2 ++ images/capi/packer/outscale/packer.json | 2 ++ images/capi/packer/ova/packer-node.json | 2 ++ images/capi/packer/ova/packer-windows.json | 2 ++ images/capi/packer/powervs/packer.json | 1 + images/capi/packer/proxmox/packer.json.tmpl | 3 +++ images/capi/packer/qemu/packer.json.tmpl | 2 ++ images/capi/packer/raw/packer.json.tmpl | 2 ++ images/capi/packer/scaleway/packer.json | 2 ++ images/capi/packer/vultr/packer.json | 1 + 34 files changed, 85 insertions(+), 2 deletions(-) diff --git a/docs/book/src/capi/containerd/customizing-containerd.md b/docs/book/src/capi/containerd/customizing-containerd.md index 713244789a..e6baaf9c35 100644 --- a/docs/book/src/capi/containerd/customizing-containerd.md +++ b/docs/book/src/capi/containerd/customizing-containerd.md @@ -73,3 +73,14 @@ root@sandboxed-container:/# dmesg ``` You are running a sandboxed container. + +## Additional Customizations + +Containerd can be further customized in a couple of ways. One option that is directly inserted into the containerd +[`config.toml`](https://github.com/kubernetes-sigs/image-builder/blob/main/images/capi/ansible/roles/containerd/templates/etc/containerd/config.toml#L14) +is to override the image pull progress timeout. This can be done using `containerd_image_pull_progress_timeout`. + +You can also add further configuration by adding values for `containerd_additional_settings`. This is rendered at the +end of the +[`config.toml`](https://github.com/kubernetes-sigs/image-builder/blob/main/images/capi/ansible/roles/containerd/templates/etc/containerd/config.toml#L86) +default template. diff --git a/images/capi/ansible/roles/containerd/templates/etc/containerd/config.toml b/images/capi/ansible/roles/containerd/templates/etc/containerd/config.toml index 2625d6e586..4907a601d8 100644 --- a/images/capi/ansible/roles/containerd/templates/etc/containerd/config.toml +++ b/images/capi/ansible/roles/containerd/templates/etc/containerd/config.toml @@ -10,6 +10,10 @@ imports = ["/etc/containerd/conf.d/*.toml"] [plugins] {% if containerd_version is version('2.0.0', '>=') %} +{% if containerd_image_pull_progress_timeout | default('') | length > 0 %} + [plugins.'io.containerd.cri.v1.images'] + image_pull_progress_timeout = "{{ containerd_image_pull_progress_timeout }}" +{% endif %} [plugins.'io.containerd.cri.v1.images'.pinned_images] sandbox = "{{ pause_image }}" [plugins.'io.containerd.cri.v1.images'.registry] diff --git a/images/capi/ansible/windows/roles/runtimes/templates/config.toml b/images/capi/ansible/windows/roles/runtimes/templates/config.toml index 87d306362b..dcb70ded14 100644 --- a/images/capi/ansible/windows/roles/runtimes/templates/config.toml +++ b/images/capi/ansible/windows/roles/runtimes/templates/config.toml @@ -25,6 +25,10 @@ imports = ["{{ containerd_conf_dir }}\\conf.d\\*.toml"] [plugins] {% if containerd_version is version('2.0.0', '>=') %} +{% if containerd_image_pull_progress_timeout | default('') | length > 0 %} + [plugins.'io.containerd.cri.v1.images'] + image_pull_progress_timeout = "{{ containerd_image_pull_progress_timeout }}" +{% endif %} [plugins.'io.containerd.cri.v1.images'.pinned_images] sandbox = "{{ pause_image }}" [plugins.'io.containerd.cri.v1.runtime'.cni] diff --git a/images/capi/packer/ami/packer-windows.json b/images/capi/packer/ami/packer-windows.json index e393817220..bdc6723098 100644 --- a/images/capi/packer/ami/packer-windows.json +++ b/images/capi/packer/ami/packer-windows.json @@ -126,6 +126,7 @@ "vars_inline": { "OS": "{{user `distribution` | lower}}", "PROVIDER": "amazon", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "distribution_version": "{{user `distribution_version`}}", "kubernetes_version": "{{user `kubernetes_semver`}}", @@ -177,6 +178,7 @@ "cloudbase_metadata_services_unattend": "cloudbaseinit.metadata.services.base.EmptyMetadataService", "cloudbase_plugins": "cloudbaseinit.plugins.windows.createuser.CreateUserPlugin, cloudbaseinit.plugins.common.setuserpassword.SetUserPasswordPlugin, cloudbaseinit.plugins.windows.extendvolumes.ExtendVolumesPlugin, cloudbaseinit.plugins.common.userdata.UserDataPlugin, cloudbaseinit.plugins.common.ephemeraldisk.EphemeralDiskPlugin, cloudbaseinit.plugins.common.mtu.MTUPlugin, cloudbaseinit.plugins.common.sethostname.SetHostNamePlugin, cloudbaseinit.plugins.common.sshpublickeys.SetUserSSHPublicKeysPlugin", "cloudbase_plugins_unattend": "cloudbaseinit.plugins.common.mtu.MTUPlugin", + "containerd_image_pull_progress_timeout": null, "containerd_version": null, "encrypted": "false", "iam_instance_profile": "", diff --git a/images/capi/packer/ami/packer.json b/images/capi/packer/ami/packer.json index 04f6b3054d..1ca76fe352 100644 --- a/images/capi/packer/ami/packer.json +++ b/images/capi/packer/ami/packer.json @@ -131,6 +131,7 @@ "PROVIDER": "amazon", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -164,6 +165,7 @@ "builder_instance_type": "t3.small", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/azure/packer-windows.json b/images/capi/packer/azure/packer-windows.json index a1f0361ffe..2acc35d04c 100644 --- a/images/capi/packer/azure/packer-windows.json +++ b/images/capi/packer/azure/packer-windows.json @@ -177,6 +177,7 @@ "vars_inline": { "OS": "{{user `distribution` | lower}}", "PROVIDER": "azure", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "distribution_version": "{{user `distribution_version`}}", "kubernetes_version": "{{user `kubernetes_semver`}}", @@ -217,6 +218,7 @@ "cloudbase_plugins": "cloudbaseinit.plugins.windows.createuser.CreateUserPlugin, cloudbaseinit.plugins.common.setuserpassword.SetUserPasswordPlugin, cloudbaseinit.plugins.windows.extendvolumes.ExtendVolumesPlugin, cloudbaseinit.plugins.common.userdata.UserDataPlugin, cloudbaseinit.plugins.common.ephemeraldisk.EphemeralDiskPlugin, cloudbaseinit.plugins.windows.azureguestagent.AzureGuestAgentPlugin, cloudbaseinit.plugins.common.mtu.MTUPlugin, cloudbaseinit.plugins.common.sethostname.SetHostNamePlugin", "cloudbase_plugins_unattend": "cloudbaseinit.plugins.common.mtu.MTUPlugin", "community_gallery_image_id": "", + "containerd_image_pull_progress_timeout": null, "containerd_version": null, "direct_shared_gallery_image_id": "", "exclude_from_latest": "false", diff --git a/images/capi/packer/azure/packer.json b/images/capi/packer/azure/packer.json index f4de234784..84c55d70b8 100644 --- a/images/capi/packer/azure/packer.json +++ b/images/capi/packer/azure/packer.json @@ -180,6 +180,7 @@ "PROVIDER": "azure", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "containerd_wasm_shims_runtimes": "{{user `containerd_wasm_shims_runtimes` }}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", @@ -221,6 +222,7 @@ "community_gallery_image_id": "", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "containerd_wasm_shims_runtimes": null, diff --git a/images/capi/packer/config/ansible-args.json b/images/capi/packer/config/ansible-args.json index 487412c37f..b4f74f877f 100644 --- a/images/capi/packer/config/ansible-args.json +++ b/images/capi/packer/config/ansible-args.json @@ -1,5 +1,5 @@ { "ansible_common_ssh_args": "-o IdentitiesOnly=yes", - "ansible_common_vars": "containerd_gvisor_runtime={{user `containerd_gvisor_runtime`}} containerd_gvisor_version={{user `containerd_gvisor_version`}} containerd_sha256={{user `containerd_sha256`}} pause_image={{user `pause_image`}} containerd_additional_settings={{user `containerd_additional_settings`}} containerd_cri_socket={{user `containerd_cri_socket`}} containerd_version={{user `containerd_version`}} containerd_wasm_shims_url={{user `containerd_wasm_shims_url`}} containerd_wasm_shims_version={{user `containerd_wasm_shims_version`}} containerd_wasm_shims_sha256={{user `containerd_wasm_shims_sha256`}} containerd_wasm_shims_runtimes=\"{{user `containerd_wasm_shims_runtimes`}}\" containerd_wasm_shims_runtime_versions=\"{{user `containerd_wasm_shims_runtime_versions`}}\" crictl_version={{user `crictl_version`}} custom_role_names=\"{{user `custom_role_names`}}\" firstboot_custom_roles_pre=\"{{user `firstboot_custom_roles_pre`}}\" firstboot_custom_roles_post=\"{{user `firstboot_custom_roles_post`}}\" node_custom_roles_pre=\"{{user `node_custom_roles_pre`}}\" node_custom_roles_post=\"{{user `node_custom_roles_post`}}\" node_custom_roles_post_sysprep=\"{{user `node_custom_roles_post_sysprep`}}\" disable_public_repos={{user `disable_public_repos`}} extra_debs=\"{{user `extra_debs`}}\" extra_kernel_boot_params=\"{{user `extra_kernel_boot_params`}}\" extra_repos=\"{{user `extra_repos`}}\" extra_rpms=\"{{user `extra_rpms`}}\" http_proxy={{user `http_proxy`}} https_proxy={{user `https_proxy`}} kubeadm_template={{user `kubeadm_template`}} kubernetes_apiserver_port={{user `kubernetes_apiserver_port`}} kubernetes_cni_http_source={{user `kubernetes_cni_http_source`}} kubernetes_http_source={{user `kubernetes_http_source`}} kubernetes_container_registry={{user `kubernetes_container_registry`}} kubernetes_rpm_repo={{user `kubernetes_rpm_repo`}} kubernetes_rpm_gpg_key={{user `kubernetes_rpm_gpg_key`}} kubernetes_rpm_gpg_check={{user `kubernetes_rpm_gpg_check`}} kubernetes_deb_repo={{user `kubernetes_deb_repo`}} kubernetes_deb_gpg_key={{user `kubernetes_deb_gpg_key`}} kubernetes_cni_deb_version={{user `kubernetes_cni_deb_version`}} kubernetes_cni_rpm_version={{user `kubernetes_cni_rpm_version`}} kubernetes_cni_semver={{user `kubernetes_cni_semver`}} kubernetes_cni_source_type={{user `kubernetes_cni_source_type`}} kubernetes_semver={{user `kubernetes_semver`}} kubernetes_source_type={{user `kubernetes_source_type`}} kubernetes_load_additional_imgs={{user `kubernetes_load_additional_imgs`}} kubernetes_deb_version={{user `kubernetes_deb_version`}} kubernetes_rpm_version={{user `kubernetes_rpm_version`}} no_proxy={{user `no_proxy`}} pip_conf_file={{user `pip_conf_file`}} python_path={{user `python_path`}} redhat_epel_rpm={{user `redhat_epel_rpm`}} epel_rpm_gpg_key={{user `epel_rpm_gpg_key`}} reenable_public_repos={{user `reenable_public_repos`}} remove_extra_repos={{user `remove_extra_repos`}} systemd_prefix={{user `systemd_prefix`}} sysusr_prefix={{user `sysusr_prefix`}} sysusrlocal_prefix={{user `sysusrlocal_prefix`}} load_additional_components={{ user `load_additional_components`}} additional_registry_images={{ user `additional_registry_images`}} additional_registry_images_list={{ user `additional_registry_images_list`}} ecr_credential_provider={{ user `ecr_credential_provider` }} additional_url_images={{ user `additional_url_images`}} additional_url_images_list={{ user `additional_url_images_list`}} additional_executables={{ user `additional_executables`}} additional_executables_list={{ user `additional_executables_list`}} additional_executables_destination_path={{ user `additional_executables_destination_path`}} additional_s3={{ user `additional_s3`}} build_target={{ user `build_target`}} amazon_ssm_agent_rpm={{ user `amazon_ssm_agent_rpm` }} enable_containerd_audit={{ user `enable_containerd_audit` }} kubernetes_enable_automatic_resource_sizing={{ user `kubernetes_enable_automatic_resource_sizing` }} debug_tools={{user `debug_tools`}} ubuntu_repo={{user `ubuntu_repo`}} ubuntu_security_repo={{user `ubuntu_security_repo`}} gpu_block_nouveau_loading={{user `block_nouveau_loading`}} runc_version={{user `runc_version`}} containerd_service_url={{user `containerd_service_url`}} netplan_removal_excludes=\"{{user `netplan_removal_excludes`}}\"", + "ansible_common_vars": "containerd_gvisor_runtime={{user `containerd_gvisor_runtime`}} containerd_gvisor_version={{user `containerd_gvisor_version`}} containerd_sha256={{user `containerd_sha256`}} pause_image={{user `pause_image`}} containerd_additional_settings={{user `containerd_additional_settings`}} containerd_cri_socket={{user `containerd_cri_socket`}} containerd_version={{user `containerd_version`}} containerd_image_pull_progress_timeout={{user `containerd_image_pull_progress_timeout`}} containerd_wasm_shims_url={{user `containerd_wasm_shims_url`}} containerd_wasm_shims_version={{user `containerd_wasm_shims_version`}} containerd_wasm_shims_sha256={{user `containerd_wasm_shims_sha256`}} containerd_wasm_shims_runtimes=\"{{user `containerd_wasm_shims_runtimes`}}\" containerd_wasm_shims_runtime_versions=\"{{user `containerd_wasm_shims_runtime_versions`}}\" crictl_version={{user `crictl_version`}} custom_role_names=\"{{user `custom_role_names`}}\" firstboot_custom_roles_pre=\"{{user `firstboot_custom_roles_pre`}}\" firstboot_custom_roles_post=\"{{user `firstboot_custom_roles_post`}}\" node_custom_roles_pre=\"{{user `node_custom_roles_pre`}}\" node_custom_roles_post=\"{{user `node_custom_roles_post`}}\" node_custom_roles_post_sysprep=\"{{user `node_custom_roles_post_sysprep`}}\" disable_public_repos={{user `disable_public_repos`}} extra_debs=\"{{user `extra_debs`}}\" extra_kernel_boot_params=\"{{user `extra_kernel_boot_params`}}\" extra_repos=\"{{user `extra_repos`}}\" extra_rpms=\"{{user `extra_rpms`}}\" http_proxy={{user `http_proxy`}} https_proxy={{user `https_proxy`}} kubeadm_template={{user `kubeadm_template`}} kubernetes_apiserver_port={{user `kubernetes_apiserver_port`}} kubernetes_cni_http_source={{user `kubernetes_cni_http_source`}} kubernetes_http_source={{user `kubernetes_http_source`}} kubernetes_container_registry={{user `kubernetes_container_registry`}} kubernetes_rpm_repo={{user `kubernetes_rpm_repo`}} kubernetes_rpm_gpg_key={{user `kubernetes_rpm_gpg_key`}} kubernetes_rpm_gpg_check={{user `kubernetes_rpm_gpg_check`}} kubernetes_deb_repo={{user `kubernetes_deb_repo`}} kubernetes_deb_gpg_key={{user `kubernetes_deb_gpg_key`}} kubernetes_cni_deb_version={{user `kubernetes_cni_deb_version`}} kubernetes_cni_rpm_version={{user `kubernetes_cni_rpm_version`}} kubernetes_cni_semver={{user `kubernetes_cni_semver`}} kubernetes_cni_source_type={{user `kubernetes_cni_source_type`}} kubernetes_semver={{user `kubernetes_semver`}} kubernetes_source_type={{user `kubernetes_source_type`}} kubernetes_load_additional_imgs={{user `kubernetes_load_additional_imgs`}} kubernetes_deb_version={{user `kubernetes_deb_version`}} kubernetes_rpm_version={{user `kubernetes_rpm_version`}} no_proxy={{user `no_proxy`}} pip_conf_file={{user `pip_conf_file`}} python_path={{user `python_path`}} redhat_epel_rpm={{user `redhat_epel_rpm`}} epel_rpm_gpg_key={{user `epel_rpm_gpg_key`}} reenable_public_repos={{user `reenable_public_repos`}} remove_extra_repos={{user `remove_extra_repos`}} systemd_prefix={{user `systemd_prefix`}} sysusr_prefix={{user `sysusr_prefix`}} sysusrlocal_prefix={{user `sysusrlocal_prefix`}} load_additional_components={{ user `load_additional_components`}} additional_registry_images={{ user `additional_registry_images`}} additional_registry_images_list={{ user `additional_registry_images_list`}} ecr_credential_provider={{ user `ecr_credential_provider` }} additional_url_images={{ user `additional_url_images`}} additional_url_images_list={{ user `additional_url_images_list`}} additional_executables={{ user `additional_executables`}} additional_executables_list={{ user `additional_executables_list`}} additional_executables_destination_path={{ user `additional_executables_destination_path`}} additional_s3={{ user `additional_s3`}} build_target={{ user `build_target`}} amazon_ssm_agent_rpm={{ user `amazon_ssm_agent_rpm` }} enable_containerd_audit={{ user `enable_containerd_audit` }} kubernetes_enable_automatic_resource_sizing={{ user `kubernetes_enable_automatic_resource_sizing` }} debug_tools={{user `debug_tools`}} ubuntu_repo={{user `ubuntu_repo`}} ubuntu_security_repo={{user `ubuntu_security_repo`}} gpu_block_nouveau_loading={{user `block_nouveau_loading`}} runc_version={{user `runc_version`}} containerd_service_url={{user `containerd_service_url`}} netplan_removal_excludes=\"{{user `netplan_removal_excludes`}}\"", "ansible_scp_extra_args": "{{env `ANSIBLE_SCP_EXTRA_ARGS`}}" } diff --git a/images/capi/packer/config/containerd.json b/images/capi/packer/config/containerd.json index 56f4590d38..08b1d789da 100644 --- a/images/capi/packer/config/containerd.json +++ b/images/capi/packer/config/containerd.json @@ -3,6 +3,7 @@ "containerd_cri_socket": "/var/run/containerd/containerd.sock", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_version": "2.2.2", "runc_version": "1.3.4" } diff --git a/images/capi/packer/config/ppc64le/containerd.json b/images/capi/packer/config/ppc64le/containerd.json index 6e04e0334f..78d4944db8 100644 --- a/images/capi/packer/config/ppc64le/containerd.json +++ b/images/capi/packer/config/ppc64le/containerd.json @@ -1,4 +1,5 @@ { + "containerd_image_pull_progress_timeout": null, "containerd_sha256": "8f7a8190f2a635cd0e5580a131408a275ba277f7a04edffba4a4005960093987", "containerd_version": "2.2.2" } diff --git a/images/capi/packer/config/windows/ansible-args-windows.json b/images/capi/packer/config/windows/ansible-args-windows.json index 02120012fe..f9a3231092 100644 --- a/images/capi/packer/config/windows/ansible-args-windows.json +++ b/images/capi/packer/config/windows/ansible-args-windows.json @@ -1,3 +1,3 @@ { - "ansible_common_vars": "runtime={{user `runtime`}} containerd_url={{user `containerd_url`}} containerd_sha256={{user `containerd_sha256_windows`}} containerd_version={{user `containerd_version`}} pause_image={{user `pause_image`}} additional_debug_files=\"{{user `additional_debug_files`}}\" containerd_additional_settings={{user `containerd_additional_settings`}} custom_role_names=\"{{user `custom_role_names`}}\" http_proxy={{user `http_proxy`}} https_proxy={{user `https_proxy`}} no_proxy={{user `no_proxy`}} kubernetes_base_url={{user `kubernetes_base_url`}} kubernetes_semver={{user `kubernetes_semver`}} kubernetes_install_path={{user `kubernetes_install_path`}} cloudbase_init_url=\"{{user `cloudbase_init_url`}}\" cloudbase_plugins=\"{{user `cloudbase_plugins`}}\" cloudbase_metadata_services=\"{{user `cloudbase_metadata_services`}}\" cloudbase_plugins_unattend=\"{{user `cloudbase_plugins_unattend`}}\" cloudbase_metadata_services_unattend=\"{{user `cloudbase_metadata_services_unattend`}}\" prepull={{user `prepull`}} windows_updates_kbs=\"{{user `windows_updates_kbs`}}\" windows_updates_categories=\"{{user `windows_updates_categories`}}\" windows_service_manager={{user `windows_service_manager`}} nssm_url={{user `nssm_url`}} distribution_version={{user `distribution_version`}} netbios_host_name_compatibility={{user `netbios_host_name_compatibility`}} disable_hypervisor={{ user `disable_hypervisor` }} cloudbase_logging_serial_port={{ user `cloudbase_logging_serial_port` }} cloudbase_real_time_clock_utc={{ user `cloudbase_real_time_clock_utc` }} load_additional_components={{ user `load_additional_components`}} ecr_credential_provider={{ user `ecr_credential_provider` }} additional_registry_images={{ user `additional_registry_images`}} additional_registry_images_list={{ user `additional_registry_images_list`}} additional_url_images={{ user `additional_url_images`}} additional_url_images_list={{ user `additional_url_images_list`}} additional_executables={{ user `additional_executables`}} additional_executables_list={{ user `additional_executables_list`}} additional_executables_destination_path={{ user `additional_executables_destination_path`}} ssh_source_url={{user `ssh_source_url` }} debug_tools={{user `debug_tools`}}" + "ansible_common_vars": "runtime={{user `runtime`}} containerd_url={{user `containerd_url`}} containerd_sha256={{user `containerd_sha256_windows`}} containerd_version={{user `containerd_version`}} containerd_image_pull_progress_timeout={{user `containerd_image_pull_progress_timeout`}} pause_image={{user `pause_image`}} additional_debug_files=\"{{user `additional_debug_files`}}\" containerd_additional_settings={{user `containerd_additional_settings`}} custom_role_names=\"{{user `custom_role_names`}}\" http_proxy={{user `http_proxy`}} https_proxy={{user `https_proxy`}} no_proxy={{user `no_proxy`}} kubernetes_base_url={{user `kubernetes_base_url`}} kubernetes_semver={{user `kubernetes_semver`}} kubernetes_install_path={{user `kubernetes_install_path`}} cloudbase_init_url=\"{{user `cloudbase_init_url`}}\" cloudbase_plugins=\"{{user `cloudbase_plugins`}}\" cloudbase_metadata_services=\"{{user `cloudbase_metadata_services`}}\" cloudbase_plugins_unattend=\"{{user `cloudbase_plugins_unattend`}}\" cloudbase_metadata_services_unattend=\"{{user `cloudbase_metadata_services_unattend`}}\" prepull={{user `prepull`}} windows_updates_kbs=\"{{user `windows_updates_kbs`}}\" windows_updates_categories=\"{{user `windows_updates_categories`}}\" windows_service_manager={{user `windows_service_manager`}} nssm_url={{user `nssm_url`}} distribution_version={{user `distribution_version`}} netbios_host_name_compatibility={{user `netbios_host_name_compatibility`}} disable_hypervisor={{ user `disable_hypervisor` }} cloudbase_logging_serial_port={{ user `cloudbase_logging_serial_port` }} cloudbase_real_time_clock_utc={{ user `cloudbase_real_time_clock_utc` }} load_additional_components={{ user `load_additional_components`}} ecr_credential_provider={{ user `ecr_credential_provider` }} additional_registry_images={{ user `additional_registry_images`}} additional_registry_images_list={{ user `additional_registry_images_list`}} additional_url_images={{ user `additional_url_images`}} additional_url_images_list={{ user `additional_url_images_list`}} additional_executables={{ user `additional_executables`}} additional_executables_list={{ user `additional_executables_list`}} additional_executables_destination_path={{ user `additional_executables_destination_path`}} ssh_source_url={{user `ssh_source_url` }} debug_tools={{user `debug_tools`}}" } diff --git a/images/capi/packer/config/windows/containerd.json b/images/capi/packer/config/windows/containerd.json index ad43316008..7120a37b18 100644 --- a/images/capi/packer/config/windows/containerd.json +++ b/images/capi/packer/config/windows/containerd.json @@ -1,4 +1,5 @@ { "containerd_additional_settings": null, + "containerd_image_pull_progress_timeout": null, "containerd_url": "https://github.com/containerd/containerd/releases/download/v{{user `containerd_version`}}/containerd-{{user `containerd_version`}}-windows-amd64.tar.gz" } diff --git a/images/capi/packer/digitalocean/packer.json b/images/capi/packer/digitalocean/packer.json index 73e4051d25..35750d72ea 100644 --- a/images/capi/packer/digitalocean/packer.json +++ b/images/capi/packer/digitalocean/packer.json @@ -45,6 +45,7 @@ "build_timestamp": "{{timestamp}}", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/gce/packer.json b/images/capi/packer/gce/packer.json index 6464ee07de..498e57e580 100644 --- a/images/capi/packer/gce/packer.json +++ b/images/capi/packer/gce/packer.json @@ -67,6 +67,7 @@ "PROVIDER": "gcp", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -87,6 +88,7 @@ "build_timestamp": "{{timestamp}}", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/goss/goss-command.yaml b/images/capi/packer/goss/goss-command.yaml index 7bb05f898e..d82a96449d 100644 --- a/images/capi/packer/goss/goss-command.yaml +++ b/images/capi/packer/goss/goss-command.yaml @@ -11,6 +11,13 @@ command: stdout: [] stderr: [] timeout: 0 +{{if and (semverCompare ">=2.0.0" .Vars.containerd_version) (ne .Vars.containerd_image_pull_progress_timeout "")}} + grep -E 'image_pull_progress_timeout = "{{.Vars.containerd_image_pull_progress_timeout}}"' /etc/containerd/config.toml: + exit-status: 0 + stdout: [ ] + stderr: [ ] + timeout: 0 +{{end}} {{if contains "lunatic" .Vars.containerd_wasm_shims_runtimes}} containerd-shim-lunatic-v1: exit-status: 1 @@ -205,6 +212,14 @@ command: stdout: - "sandbox = \"{{.Vars.pause_image}}\"" timeout: 30000 + {{ if ne .Vars.containerd_image_pull_progress_timeout "" }} + Correct Containerd image pull timeout config: + exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'image_pull_progress_timeout = \\\"{{.Vars.containerd_image_pull_progress_timeout}}\\\"'\"" + exit-status: 0 + stdout: + - "image_pull_progress_timeout = \"{{.Vars.containerd_image_pull_progress_timeout}}\"" + timeout: 30000 + {{ end }} Correct Containerd CNI conf_dir: exec: "powershell -command \"Get-Content 'C:\\Program Files\\containerd\\config.toml' | Select-String -SimpleMatch 'conf_dir = \\\"C:/etc/cni/net.d\\\"'\"" exit-status: 0 diff --git a/images/capi/packer/goss/goss-vars.yaml b/images/capi/packer/goss/goss-vars.yaml index 85c406031a..d0272ee97a 100644 --- a/images/capi/packer/goss/goss-vars.yaml +++ b/images/capi/packer/goss/goss-vars.yaml @@ -91,6 +91,7 @@ photon_5_rpms: &photon_5_rpms arch: "amd64" containerd_gvisor_runtime: "" containerd_gvisor_version: "" +containerd_image_pull_progress_timeout: "" containerd_version: "" containerd_wasm_shims_runtimes: "" extra_kernel_boot_params: "" diff --git a/images/capi/packer/hcloud/packer-flatcar.json b/images/capi/packer/hcloud/packer-flatcar.json index de227d846a..1430cfcdbd 100644 --- a/images/capi/packer/hcloud/packer-flatcar.json +++ b/images/capi/packer/hcloud/packer-flatcar.json @@ -29,6 +29,7 @@ "build_name": "{{user `build_name`}}", "build_timestamp": "{{user `build_timestamp`}}", "build_type": "node", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_semver": "{{user `kubernetes_cni_semver`}}", "kubernetes_semver": "{{user `kubernetes_semver`}}", @@ -137,6 +138,7 @@ "ansible_user_vars": "", "build_name": null, "build_timestamp": "{{timestamp}}", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "containerd_wasm_shims_runtimes": null, diff --git a/images/capi/packer/hcloud/packer.json b/images/capi/packer/hcloud/packer.json index 3cc8d51dd2..19308d02c5 100644 --- a/images/capi/packer/hcloud/packer.json +++ b/images/capi/packer/hcloud/packer.json @@ -104,6 +104,7 @@ "PROVIDER": "hcloud", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "containerd_wasm_shims_runtimes": "{{user `containerd_wasm_shims_runtimes` }}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", @@ -127,6 +128,7 @@ "build_timestamp": "{{timestamp}}", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "containerd_wasm_shims_runtimes": null, diff --git a/images/capi/packer/huaweicloud/packer.json b/images/capi/packer/huaweicloud/packer.json index c12ff3ebe9..abe9129f04 100644 --- a/images/capi/packer/huaweicloud/packer.json +++ b/images/capi/packer/huaweicloud/packer.json @@ -101,6 +101,7 @@ "PROVIDER": "huaweicloud", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -125,6 +126,7 @@ "build_timestamp": "{{timestamp}}", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/maas/packer.json.tmpl b/images/capi/packer/maas/packer.json.tmpl index f51472147f..48734e1bb5 100644 --- a/images/capi/packer/maas/packer.json.tmpl +++ b/images/capi/packer/maas/packer.json.tmpl @@ -139,6 +139,7 @@ "OS": "{{user `distro_name` | lower}}", "OS_VERSION": "{{user `distribution_version` | lower}}", "PROVIDER": "qemu", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -165,6 +166,7 @@ "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_url": "https://github.com/containerd/containerd/releases/download/v{{user `containerd_version`}}/containerd-{{user `containerd_version`}}-linux-amd64.tar.gz", "containerd_version": null, + "containerd_image_pull_progress_timeout": null, "cpus": "1", "crictl_version": null, "crictl_url": "https://github.com/kubernetes-sigs/cri-tools/releases/download/v{{user `crictl_version`}}/crictl-v{{user `crictl_version`}}-linux-amd64.tar.gz", diff --git a/images/capi/packer/nutanix/packer-windows.json b/images/capi/packer/nutanix/packer-windows.json index 070196e294..322481d096 100644 --- a/images/capi/packer/nutanix/packer-windows.json +++ b/images/capi/packer/nutanix/packer-windows.json @@ -106,6 +106,7 @@ "vars_inline": { "OS": "{{user `distro_name` | lower}}", "PROVIDER": "nutanix", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "distribution_version": "{{user `distro_version`}}", "kubernetes_version": "{{user `kubernetes_semver`}}", diff --git a/images/capi/packer/nutanix/packer.json.tmpl b/images/capi/packer/nutanix/packer.json.tmpl index 22cc1e4267..b71042195b 100644 --- a/images/capi/packer/nutanix/packer.json.tmpl +++ b/images/capi/packer/nutanix/packer.json.tmpl @@ -97,6 +97,7 @@ "PROVIDER": "nutanix", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -118,6 +119,7 @@ "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", "containerd_version": null, + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "runc_version": null, "cpus": "1", diff --git a/images/capi/packer/oci/packer-windows.json b/images/capi/packer/oci/packer-windows.json index 123f1181b7..fb395cb16b 100644 --- a/images/capi/packer/oci/packer-windows.json +++ b/images/capi/packer/oci/packer-windows.json @@ -35,6 +35,7 @@ "build_name": "{{user `build_name`}}", "build_timestamp": "{{user `build_timestamp`}}", "build_type": "node", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_semver": "{{user `kubernetes_cni_semver`}}", "kubernetes_semver": "{{user `kubernetes_semver`}}", @@ -123,6 +124,7 @@ "cloudbase_metadata_services_unattend": "cloudbaseinit.metadata.services.httpservice.HttpService", "cloudbase_plugins": "cloudbaseinit.plugins.common.userdata.UserDataPlugin, cloudbaseinit.plugins.common.ephemeraldisk.EphemeralDiskPlugin, cloudbaseinit.plugins.common.mtu.MTUPlugin, cloudbaseinit.plugins.common.sethostname.SetHostNamePlugin", "cloudbase_plugins_unattend": "cloudbaseinit.plugins.common.mtu.MTUPlugin", + "containerd_image_pull_progress_timeout": null, "containerd_url": "", "containerd_version": null, "ib_version": "{{env `IB_VERSION`}}", diff --git a/images/capi/packer/oci/packer.json b/images/capi/packer/oci/packer.json index 3b7160a0cb..73fdcc0750 100644 --- a/images/capi/packer/oci/packer.json +++ b/images/capi/packer/oci/packer.json @@ -78,6 +78,7 @@ "PROVIDER": "oci", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -101,6 +102,7 @@ "compartment_ocid": "", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/openstack/packer.json b/images/capi/packer/openstack/packer.json index 44ed7bbfc6..04a7bb60f1 100644 --- a/images/capi/packer/openstack/packer.json +++ b/images/capi/packer/openstack/packer.json @@ -95,6 +95,7 @@ "PROVIDER": "openstack", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -116,6 +117,7 @@ "build_timestamp": "{{timestamp}}", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/outscale/packer.json b/images/capi/packer/outscale/packer.json index ad4bd5e740..1343cc198a 100644 --- a/images/capi/packer/outscale/packer.json +++ b/images/capi/packer/outscale/packer.json @@ -78,6 +78,7 @@ "PROVIDER": "outscale", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -99,6 +100,7 @@ "build_timestamp": "{{timestamp}}", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/ova/packer-node.json b/images/capi/packer/ova/packer-node.json index f189b2c934..cfd3f2d37e 100644 --- a/images/capi/packer/ova/packer-node.json +++ b/images/capi/packer/ova/packer-node.json @@ -463,6 +463,7 @@ "block_nouveau_loading": "{{user `block_nouveau_loading`}}", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -492,6 +493,7 @@ "cdrom_adapter_type": "ide", "cdrom_type": "ide", "cluster": "", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/ova/packer-windows.json b/images/capi/packer/ova/packer-windows.json index 065b0bfdc8..566c4c61e2 100644 --- a/images/capi/packer/ova/packer-windows.json +++ b/images/capi/packer/ova/packer-windows.json @@ -212,6 +212,7 @@ "vars_inline": { "OS": "{{user `distro_name` | lower}}", "PROVIDER": "ova", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "distribution_version": "{{user `distro_version`}}", "kubernetes_version": "{{user `kubernetes_semver`}}", @@ -243,6 +244,7 @@ "cloudbase_plugins": "cloudbaseinit.plugins.windows.createuser.CreateUserPlugin, cloudbaseinit.plugins.common.setuserpassword.SetUserPasswordPlugin, cloudbaseinit.plugins.common.mtu.MTUPlugin, cloudbaseinit.plugins.common.sethostname.SetHostNamePlugin, cloudbaseinit.plugins.common.networkconfig.NetworkConfigPlugin, cloudbaseinit.plugins.windows.extendvolumes.ExtendVolumesPlugin, cloudbaseinit.plugins.common.ephemeraldisk.EphemeralDiskPlugin, cloudbaseinit.plugins.common.sshpublickeys.SetUserSSHPublicKeysPlugin, cloudbaseinit.plugins.common.userdata.UserDataPlugin, cloudbaseinit.plugins.common.localscripts.LocalScriptsPlugin, cloudbaseinit.plugins.windows.createuser.CreateUserPlugin, cloudbaseinit.plugins.windows.extendvolumes.ExtendVolumesPlugin", "cloudbase_plugins_unattend": "cloudbaseinit.plugins.common.mtu.MTUPlugin", "cloudbase_real_time_clock_utc": "true", + "containerd_image_pull_progress_timeout": null, "containerd_url": "", "containerd_version": null, "disable_hypervisor": null, diff --git a/images/capi/packer/powervs/packer.json b/images/capi/packer/powervs/packer.json index 5b87d8c5ac..e8e8c2a5a6 100644 --- a/images/capi/packer/powervs/packer.json +++ b/images/capi/packer/powervs/packer.json @@ -74,6 +74,7 @@ "capture_cos_secret_key": "", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/proxmox/packer.json.tmpl b/images/capi/packer/proxmox/packer.json.tmpl index 8a03445114..e26a764da4 100644 --- a/images/capi/packer/proxmox/packer.json.tmpl +++ b/images/capi/packer/proxmox/packer.json.tmpl @@ -154,6 +154,7 @@ "OS": "{{user `distro_name` | lower}}", "OS_VERSION": "{{user `distribution_version` | lower}}", "PROVIDER": "qemu", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -186,6 +187,7 @@ "bridge": "{{env `PROXMOX_BRIDGE`}}", "build_timestamp": "{{timestamp}}", "containerd_version": null, + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "runc_version": null, "cores": "2", @@ -231,3 +233,4 @@ "scsi_controller": "virtio-scsi-pci" } } + diff --git a/images/capi/packer/qemu/packer.json.tmpl b/images/capi/packer/qemu/packer.json.tmpl index 31434039fa..76524d47aa 100644 --- a/images/capi/packer/qemu/packer.json.tmpl +++ b/images/capi/packer/qemu/packer.json.tmpl @@ -142,6 +142,7 @@ "OS": "{{user `distro_name` | lower}}", "OS_VERSION": "{{user `distribution_version` | lower}}", "PROVIDER": "qemu", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -183,6 +184,7 @@ "build_timestamp": "{{timestamp}}", "cd_files": "linux/base/*.nothing", "containerd_version": null, + "containerd_image_pull_progress_timeout": null, "cpu_model": "host", "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "runc_version": null, diff --git a/images/capi/packer/raw/packer.json.tmpl b/images/capi/packer/raw/packer.json.tmpl index 7199703d87..652322341a 100644 --- a/images/capi/packer/raw/packer.json.tmpl +++ b/images/capi/packer/raw/packer.json.tmpl @@ -134,6 +134,7 @@ "PROVIDER": "raw", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -160,6 +161,7 @@ "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", "containerd_version": null, + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "runc_version": null, "cpus": "1", diff --git a/images/capi/packer/scaleway/packer.json b/images/capi/packer/scaleway/packer.json index e2b84755a3..c091dfef36 100644 --- a/images/capi/packer/scaleway/packer.json +++ b/images/capi/packer/scaleway/packer.json @@ -58,6 +58,7 @@ "PROVIDER": "scaleway", "containerd_gvisor_runtime": "{{user `containerd_gvisor_runtime`}}", "containerd_gvisor_version": "{{user `containerd_gvisor_version`}}", + "containerd_image_pull_progress_timeout": "{{user `containerd_image_pull_progress_timeout`}}", "containerd_version": "{{user `containerd_version`}}", "kubernetes_cni_deb_version": "{{ user `kubernetes_cni_deb_version` }}", "kubernetes_cni_rpm_version": "{{ split (user `kubernetes_cni_rpm_version`) \"-\" 0 }}", @@ -80,6 +81,7 @@ "commercial_type": "DEV1-S", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, diff --git a/images/capi/packer/vultr/packer.json b/images/capi/packer/vultr/packer.json index c311927a51..208c4353a7 100644 --- a/images/capi/packer/vultr/packer.json +++ b/images/capi/packer/vultr/packer.json @@ -39,6 +39,7 @@ "build_timestamp": "{{timestamp}}", "containerd_gvisor_runtime": "false", "containerd_gvisor_version": "latest", + "containerd_image_pull_progress_timeout": null, "containerd_service_url": "https://raw.githubusercontent.com/containerd/containerd/refs/tags/v{{user `containerd_version`}}/containerd.service", "containerd_version": null, "crictl_version": null, From 598fb620a8e3c75999cddb3bb3040a66f12350e8 Mon Sep 17 00:00:00 2001 From: Drew Hudson-Viles Date: Wed, 6 May 2026 16:44:12 +0100 Subject: [PATCH 90/90] chore(ci): updating the cloudbuild gcb-docker-gcloud image to the latest release --- cloudbuild.yaml | 2 +- docs/book/src/capi/releasing.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/cloudbuild.yaml b/cloudbuild.yaml index e872db977a..9ad4a1e13a 100644 --- a/cloudbuild.yaml +++ b/cloudbuild.yaml @@ -3,7 +3,7 @@ timeout: 3000s options: substitution_option: ALLOW_LOOSE steps: - - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud@sha256:63840f133e0dfeea0af9ef391210da7fab9d2676172e2967fccab0cd6110c4e7' # v20250513-9264efb079 + - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud@sha256:ff388e0dc16351e96f8464e2e185b74a7578a5ccb7a112cf3393468e59e6e2d2' # v20260205-38cfa9523f entrypoint: make env: - DOCKER_CLI_EXPERIMENTAL=enabled diff --git a/docs/book/src/capi/releasing.md b/docs/book/src/capi/releasing.md index f9ba7fbaa3..2d5d4b4e2b 100644 --- a/docs/book/src/capi/releasing.md +++ b/docs/book/src/capi/releasing.md @@ -5,6 +5,7 @@ The current release of Image Builder is [v0.1.50][] (April 1, 2026). The corresp ## Release Process Releasing image-builder is a simple process: project maintainers should be able to follow the steps below in order to create a new release. +Before proceeding, make sure the current SHA being used in [`cloudbuild.yaml`](../../../../cloudbuild.yaml) is [still valid](https://console.cloud.google.com/artifacts/docker/k8s-staging-test-infra/us/gcr.io/gcb-docker-gcloud). If it is not, then the job that runs on tag will fail. ### Create a tag