Remove github actions

Change-Id: I7e85168f402c7808ab1f7655d9ce573e853c9b40
This commit is contained in:
Jake Yip 2024-05-06 19:23:47 +10:00
parent af14e916f0
commit f0717f610c
15 changed files with 0 additions and 1555 deletions

View File

@ -1,37 +0,0 @@
name: Ensure OpenStack image
description: >-
Ensures that the specified image exists in the target OpenStack cloud.
inputs:
os-client-config-file:
description: The path of the OpenStack clouds file
required: true
default: ./clouds.yml
os-cloud:
description: The name of the cloud within the OpenStack clouds file
required: true
default: openstack
image-name:
description: The name of the image to use
required: true
image-url:
description: The URL of the image
required: true
outputs:
image-id:
description: The ID of the image
value: ${{ steps.ensure-image.outputs.image-id }}
runs:
using: "composite"
steps:
- id: ensure-image
run: ./scripts/ensure-image.sh
shell: bash
env:
OS_CLIENT_CONFIG_FILE: ${{ inputs.os-client-config-file }}
OS_CLOUD: ${{ inputs.os-cloud }}
IMAGE_NAME: ${{ inputs.image-name }}
IMAGE_URL: ${{ inputs.image-url }}

View File

@ -1,115 +0,0 @@
name: Set up test environment
description: >-
Sets up a Cluster API management cluster for a test.
inputs:
dependencies-path:
description: Path to the dependencies file to use.
default: dependencies.json
runs:
using: "composite"
steps:
- name: Read dependencies
id: deps
shell: bash
run: |
echo "addon-provider=$(jq -r '.["addon-provider"]' "$DEPENDENCIES_PATH")" >> $GITHUB_OUTPUT
echo "cluster-api=$(jq -r '.["cluster-api"]' "$DEPENDENCIES_PATH")" >> $GITHUB_OUTPUT
echo "cluster-api-janitor-openstack=$(jq -r '.["cluster-api-janitor-openstack"]' "$DEPENDENCIES_PATH")" >> $GITHUB_OUTPUT
echo "cluster-api-provider-openstack=$(jq -r '.["cluster-api-provider-openstack"]' "$DEPENDENCIES_PATH")" >> $GITHUB_OUTPUT
echo "cert-manager=$(jq -r '.["cert-manager"]' "$DEPENDENCIES_PATH")" >> $GITHUB_OUTPUT
echo "helm=$(jq -r '.["helm"]' "$DEPENDENCIES_PATH")" >> $GITHUB_OUTPUT
echo "sonobuoy=$(jq -r '.["sonobuoy"]' "$DEPENDENCIES_PATH")" >> $GITHUB_OUTPUT
env:
DEPENDENCIES_PATH: ${{ inputs.dependencies-path }}
- name: Install tools
shell: bash
run: sudo apt install -y zip unzip
- name: Install sonobuoy
shell: bash
run: >
wget https://github.com/vmware-tanzu/sonobuoy/releases/download/${SONOBUOY_VERSION}/sonobuoy_${SONOBUOY_VERSION:1}_linux_amd64.tar.gz &&
tar -xf sonobuoy_${SONOBUOY_VERSION:1}_linux_amd64.tar.gz &&
sudo install -o root -g root -m 0755 sonobuoy /usr/local/bin/sonobuoy &&
sonobuoy version
env:
SONOBUOY_VERSION: ${{ steps.deps.outputs.sonobuoy }}
- uses: actions/setup-python@v4
with:
python-version: '3.9'
check-latest: true
- name: Set up Helm
uses: azure/setup-helm@v3
with:
version: ${{ steps.deps.outputs.helm }}
- name: Install cert-manager
shell: bash
run: |-
helm upgrade cert-manager cert-manager \
--repo https://charts.jetstack.io \
--version ${{ steps.deps.outputs.cert-manager }} \
--namespace cert-manager \
--create-namespace \
--install \
--set installCRDs=true \
--wait \
--timeout 10m
- name: Install clusterctl
shell: bash
run: >
curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/${CAPI_VERSION}/clusterctl-linux-amd64 -o clusterctl &&
sudo install -o root -g root -m 0755 clusterctl /usr/local/bin/clusterctl &&
clusterctl version
env:
CAPI_VERSION: ${{ steps.deps.outputs.cluster-api }}
- name: Check if Cluster API is already installed
id: capi-check
shell: bash
run: kubectl get provider -n capi-system cluster-api
continue-on-error: true
- name: Install or upgrade Cluster API controllers
shell: bash
run: >
clusterctl ${{ steps.capi-check.outcome == 'success' && 'upgrade apply' || 'init' }} \
--core cluster-api:${CAPI_VERSION} \
--control-plane kubeadm:${CAPI_VERSION} \
--bootstrap kubeadm:${CAPI_VERSION} \
--infrastructure openstack:${CAPO_VERSION} \
--wait-providers
env:
CAPI_VERSION: ${{ steps.deps.outputs.cluster-api }}
CAPO_VERSION: ${{ steps.deps.outputs.cluster-api-provider-openstack }}
- name: Install Cluster API add-on provider
shell: bash
run: |-
helm upgrade cluster-api-addon-provider cluster-api-addon-provider \
--repo https://stackhpc.github.io/cluster-api-addon-provider \
--version ${{ steps.deps.outputs.addon-provider }} \
--namespace capi-addon-system \
--create-namespace \
--install \
--wait \
--timeout 10m
- name: Install Cluster API janitor
shell: bash
run: |-
helm upgrade cluster-api-janitor-openstack cluster-api-janitor-openstack \
--repo https://stackhpc.github.io/cluster-api-janitor-openstack \
--version ${{ steps.deps.outputs.cluster-api-janitor-openstack }} \
--namespace capi-janitor-system \
--create-namespace \
--install \
--wait \
--timeout 10m

View File

@ -1,163 +0,0 @@
name: Upgrade and test cluster
description: >-
Run a Helm upgrade using the specified chart version and values, wait for
the cluster to become ready and run Sonobuoy against it
inputs:
name:
description: The name of the cluster
required: true
os-client-config-file:
description: The path of the OpenStack clouds file
required: true
default: ./clouds.yaml
os-cloud:
description: The name of the cloud within the OpenStack clouds file
required: true
default: openstack
chart-repo:
description: The repository to fetch the charts from
required: true
default: https://stackhpc.github.io/capi-helm-charts
chart-name:
description: The name of the chart to use
required: true
default: openstack-cluster
chart-version:
description: The version of the charts to use
required: true
values-path:
description: The path to a file containing Helm values
required: true
default: ./values.yaml
kubernetes-version:
description: The Kubernetes version in the image
required: true
image-id:
description: The ID of the image to use
required: true
sonobuoy-mode:
description: |
The mode for the Sonobuoy run.
One of certified-conformance, conformance-lite, non-disruptive-conformance, quick.
required: true
default: quick
sonobuoy-upload:
description: Specify "yes" to upload the Sonobuoy run as an artifact
required: true
default: "no"
skip-workload-status:
description: Specify "yes" to skip the workload status check
required: true
default: "no"
runs:
using: "composite"
steps:
- name: Install or upgrade cluster from directory
shell: bash
run: |-
helm upgrade ${{ inputs.name }} ${{ inputs.chart-name }} \
--repo ${{ inputs.chart-repo }} \
--version ${{ inputs.chart-version }} \
--install \
--values ${{ inputs.os-client-config-file }} \
--values ${{ inputs.values-path }} \
--set cloudName=${{ inputs.os-cloud }} \
--set kubernetesVersion=${{ inputs.kubernetes-version }} \
--set machineImageId=${{ inputs.image-id }}
# Wait for any upgrade to start before checking if it is complete
# This is to make sure the controller has actioned the update before
# progressing to wait for ready
# However, in some cases the cluster will never become unready, e.g.
# in the chart upgrade tests if there are no changes to templates
# In this case, we time out after 2m which should be enough time for
# the controllers to react
- name: Wait for cluster not ready
shell: bash
run: |-
kubectl wait clusters/${{ inputs.name }} \
--for=condition=ready=false \
--timeout 2m
continue-on-error: true
- name: Wait for cluster ready
shell: bash
run: |-
kubectl wait clusters/${{ inputs.name }} \
--for=condition=ready \
--timeout 30m
- name: Wait for machine deployments to be running
shell: bash
run: |-
kubectl wait machinedeployments \
--all \
--for=jsonpath='{.status.phase}'=Running \
--timeout 30m
- name: Wait for addons to deploy
shell: bash
run: |-
kubectl wait manifests \
--all \
--for=jsonpath='{.status.phase}'=Deployed \
--timeout 20m \
&& \
kubectl wait helmreleases \
--all \
--for=jsonpath='{.status.phase}'=Deployed \
--timeout 20m
- name: Write kubeconfig
shell: bash
run: |-
kubectl get secret ${{ inputs.name }}-kubeconfig \
-o go-template='{{ .data.value | base64decode }}' \
> kubeconfig
- name: Wait for all workloads rollouts to complete
shell: bash
run: |-
set -e
NAMESPACES=$(kubectl get ns --no-headers --output jsonpath='{.items[*].metadata.name}')
for ns in $NAMESPACES; do
echo "namespace: $ns"
kubectl rollout status \
--namespace "$ns" \
--watch \
--timeout 20m \
deployments,statefulsets,daemonsets
done
env:
KUBECONFIG: ./kubeconfig
if: "${{ inputs.skip-workload-status != 'yes' }}"
- name: Run sonobuoy
shell: bash
run: sonobuoy run --mode ${{ inputs.sonobuoy-mode }} --wait
env:
KUBECONFIG: ./kubeconfig
- name: Retrieve sonobuoy results
shell: bash
run: ./scripts/sonobuoy-retrieve.sh --filename ./sonobuoy-results-${{ inputs.name }}.tar.gz
env:
KUBECONFIG: ./kubeconfig
if: "${{ inputs.sonobuoy-upload == 'yes' }}"
- name: Upload sonobuoy results artifact
uses: actions/upload-artifact@v3
with:
name: sonobuoy-results-${{ inputs.name }}
path: ./sonobuoy-results-${{ inputs.name }}.tar.gz
if: "${{ inputs.sonobuoy-upload == 'yes' }}"
- name: Remove sonobuoy artifacts from cluster
shell: bash
run: sonobuoy delete --wait --all
env:
KUBECONFIG: ./kubeconfig
if: ${{ always() }}

View File

@ -1,27 +0,0 @@
name: Upload logs
description: >-
Upload Cluster API controller logs as a workflow artifact.
inputs:
name-suffix:
description: The suffix to add to the controller logs.
required: true
runs:
using: "composite"
steps:
- name: Output controller logs
shell: bash
run: |
kubectl -n capi-system logs deploy/capi-controller-manager > capi-logs.txt
kubectl -n capi-kubeadm-control-plane-system logs deploy/capi-kubeadm-control-plane-controller-manager > capi-kubeadm-control-plane-logs.txt
kubectl -n capi-kubeadm-bootstrap-system logs deploy/capi-kubeadm-bootstrap-controller-manager > capi-kubeadm-bootstrap-logs.txt
kubectl -n capo-system logs deploy/capo-controller-manager > capo-logs.txt
kubectl -n capi-addon-system logs deploy/cluster-api-addon-provider > capi-addon-provider-logs.txt
- name: Upload controller log artifacts
uses: actions/upload-artifact@v3
with:
name: cluster-api-controller-logs-${{ inputs.name-suffix }}
path: ./*-logs.txt

23
.github/release.yml vendored
View File

@ -1,23 +0,0 @@
---
changelog:
categories:
- title: Breaking changes
labels:
- breaking
- title: New features and enhancements
labels:
- "*"
exclude:
labels:
- bug
- automation
- title: Bugs fixed
labels:
- bug
- title: Dependency updates
labels:
- automation

View File

@ -1,115 +0,0 @@
name: Ensure CAPI images
on:
workflow_call:
inputs:
ref:
type: string
description: The Git ref under test.
required: true
outputs:
kube-1-28-image:
value: ${{ jobs.produce_outputs.outputs.kube-1-28-image }}
kube-1-28-version:
value: ${{ jobs.produce_outputs.outputs.kube-1-28-version }}
kube-1-29-image:
value: ${{ jobs.produce_outputs.outputs.kube-1-29-image }}
kube-1-29-version:
value: ${{ jobs.produce_outputs.outputs.kube-1-29-version }}
kube-1-30-image:
value: ${{ jobs.produce_outputs.outputs.kube-1-30-image }}
kube-1-30-version:
value: ${{ jobs.produce_outputs.outputs.kube-1-30-version }}
jobs:
image_manifest:
runs-on: ubuntu-latest
outputs:
manifest: ${{ steps.images.outputs.manifest }}
steps:
- name: Checkout
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Fetch image details
id: images
run: |
VN="$(jq -r '.["azimuth-images"]' ./dependencies.json)"
MANIFEST="$(curl -fsSL "https://github.com/stackhpc/azimuth-images/releases/download/${VN}/manifest.json")"
echo "manifest=$(jq -c . <<< "$MANIFEST")" >> $GITHUB_OUTPUT
ensure_image:
runs-on: ubuntu-latest
needs: [image_manifest]
strategy:
fail-fast: false
matrix:
include:
- name: kube-1-28
image: ${{ fromJSON(needs.image_manifest.outputs.manifest).kubernetes-1-28-jammy }}
skip: ${{ github.event.pull_request.draft }}
- name: kube-1-29
image: ${{ fromJSON(needs.image_manifest.outputs.manifest).kubernetes-1-29-jammy }}
skip: ${{ github.event.pull_request.draft }}
- name: kube-1-30
image: ${{ fromJSON(needs.image_manifest.outputs.manifest).kubernetes-1-30-jammy }}
skip: false
steps:
- name: Checkout
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
if: ${{ !matrix.skip }}
- name: Write cloud credential
run: >
echo "$CLOUD" > clouds.yml
shell: bash
env:
CLOUD: ${{ secrets.CLOUD }}
if: ${{ !matrix.skip }}
- uses: actions/setup-python@v4
with:
python-version: '3.9'
check-latest: true
if: ${{ !matrix.skip }}
- name: Install OpenStack CLI
run: pip install python-openstackclient
if: ${{ !matrix.skip }}
- name: Ensure Kubernetes image
id: ensure-image
uses: ./.github/actions/ensure-image
with:
image-name: ${{ matrix.image.name }}
image-url: ${{ matrix.image.url }}
if: ${{ !matrix.skip }}
- name: Write matrix outputs
uses: cloudposse/github-action-matrix-outputs-write@0.4.2
with:
matrix-step-name: ${{ github.job }}
matrix-key: ${{ matrix.name }}
outputs: |-
image-id: ${{ steps.ensure-image.outputs.image-id }}
kube-version: ${{ matrix.image.kubernetes_version }}
if: ${{ !matrix.skip }}
produce_outputs:
runs-on: ubuntu-latest
needs: [ensure_image]
outputs:
kube-1-28-image: ${{ fromJSON(steps.matrix-outputs.outputs.result).image-id.kube-1-28 }}
kube-1-28-version: ${{ fromJSON(steps.matrix-outputs.outputs.result).kube-version.kube-1-28 }}
kube-1-29-image: ${{ fromJSON(steps.matrix-outputs.outputs.result).image-id.kube-1-29 }}
kube-1-29-version: ${{ fromJSON(steps.matrix-outputs.outputs.result).kube-version.kube-1-29 }}
kube-1-30-image: ${{ fromJSON(steps.matrix-outputs.outputs.result).image-id.kube-1-30 }}
kube-1-30-version: ${{ fromJSON(steps.matrix-outputs.outputs.result).kube-version.kube-1-30 }}
steps:
- name: Read matrix outputs
id: matrix-outputs
uses: cloudposse/github-action-matrix-outputs-read@0.1.1
with:
matrix-step-name: ensure_image

View File

@ -1,65 +0,0 @@
name: Helm Lint
on:
workflow_call:
inputs:
ref:
type: string
description: The Git ref under test.
required: true
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v3
with:
version: v3.10.0
- uses: actions/setup-python@v4
with:
python-version: '3.9'
check-latest: true
- name: Set up chart-testing
uses: scrungus/chart-testing-action@v3.7.3
- name: Create dummy values
run: >
echo "$VALUES" > values.yaml
env:
VALUES: |
clouds:
openstack:
auth:
auth_url: https://my.cloud:5000
application_credential_id: "xxxx"
application_credential_secret: "xxxx"
region_name: "RegionOne"
verify: false
interface: "public"
identity_api_version: 3
auth_type: "v3applicationcredential"
kubernetesVersion: 1.27.2
machineImageId: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
controlPlane:
machineFlavor: xxxx
machineCount: 1
nodeGroups:
- machineCount: 2
machineFlavor: xxxx
name: test-group1
- name: Run chart-testing (lint)
run: |-
ct lint \
--target-branch ${{ github.event.repository.default_branch }} \
--all \
--validate-maintainers=false \
--extra-values values.yaml

View File

@ -1,50 +0,0 @@
name: test main
on:
push:
branches:
- main
concurrency:
group: main
cancel-in-progress: true
jobs:
lint:
uses: ./.github/workflows/lint.yaml
with:
ref: ${{ github.sha }}
mirror_container_images:
needs: [lint]
uses: ./.github/workflows/sync-images.yaml
secrets: inherit
with:
ref: ${{ github.sha }}
ensure_capi_images:
needs: [lint]
uses: ./.github/workflows/ensure-capi-images.yaml
secrets: inherit
with:
ref: ${{ github.sha }}
publish_charts:
needs: [lint]
uses: ./.github/workflows/publish-charts.yaml
secrets: inherit
with:
ref: ${{ github.sha }}
test:
needs: [mirror_container_images, ensure_capi_images, publish_charts]
uses: ./.github/workflows/test.yaml
secrets: inherit
with:
# Pass the images as JSON
images: ${{ toJSON(needs.ensure_capi_images.outputs) }}
# Pass the chart version to test
chart-version: ${{ needs.publish_charts.outputs.chart-version }}
# We want to test the current sha
ref: ${{ github.sha }}
# Only run the sanity check on main
tests-full: false

View File

@ -1,67 +0,0 @@
name: test pr
on:
pull_request_target:
types:
- opened
- synchronize
- ready_for_review
- reopened
branches:
- main
concurrency:
group: ${{ github.head_ref }}
cancel-in-progress: true
jobs:
# This job does nothing, but uses an environment that has protection in place
# It is used as a guard to the rest of the workflow so that we can require approval
# for all commits to a PR
wait_for_approval:
runs-on: ubuntu-latest
environment: ci-approval
steps:
- name: Workflow approved
run: exit 0
lint:
needs: [wait_for_approval]
uses: ./.github/workflows/lint.yaml
with:
ref: ${{ github.event.pull_request.head.sha }}
mirror_container_images:
needs: [lint]
uses: ./.github/workflows/sync-images.yaml
secrets: inherit
with:
ref: ${{ github.event.pull_request.head.sha }}
ensure_capi_images:
needs: [lint]
uses: ./.github/workflows/ensure-capi-images.yaml
secrets: inherit
with:
ref: ${{ github.event.pull_request.head.sha }}
publish_charts:
needs: [lint]
uses: ./.github/workflows/publish-charts.yaml
secrets: inherit
with:
ref: ${{ github.event.pull_request.head.sha }}
test:
needs: [mirror_container_images, ensure_capi_images, publish_charts]
uses: ./.github/workflows/test.yaml
secrets: inherit
with:
# Pass the images as JSON
images: ${{ toJSON(needs.ensure_capi_images.outputs) }}
# Pass the chart version to test
chart-version: ${{ needs.publish_charts.outputs.chart-version }}
# We want to test the code in the PR
ref: ${{ github.event.pull_request.head.sha }}
# If the PR is in draft, just run a sanity check
# If the PR is in review, run the full test suite
tests-full: ${{ !github.event.pull_request.draft }}

View File

@ -1,37 +0,0 @@
name: publish artifacts
on:
workflow_call:
inputs:
ref:
type: string
description: The Git ref under test.
required: true
outputs:
chart-version:
value: ${{ jobs.build_push_charts.outputs.chart-version }}
jobs:
build_push_charts:
name: Build and push Helm charts
runs-on: ubuntu-latest
outputs:
chart-version: ${{ steps.semver.outputs.version }}
steps:
- name: Check out the repository
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
# This is important for the semver action to work correctly
# when determining the number of commits since the last tag
fetch-depth: 0
- name: Get SemVer version for current commit
id: semver
uses: stackhpc/github-actions/semver@master
- name: Publish Helm charts
uses: stackhpc/github-actions/helm-publish@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
version: ${{ steps.semver.outputs.version }}
app-version: ${{ steps.semver.outputs.short-sha }}

View File

@ -1,58 +0,0 @@
name: sync images
on:
workflow_call:
inputs:
ref:
type: string
description: The Git ref to use in the checkout.
jobs:
build_manifest_matrix:
runs-on: ubuntu-latest
outputs:
manifest-files: ${{ steps.list-manifests.outputs.manifest-files }}
steps:
- name: Check out the repository
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: List manifests
id: list-manifests
# Use jq to format the manifest list as a JSON array
run: |
MANIFEST_FILES="$(ls ./skopeo-manifests/*.yaml | jq -c -s -R 'split("\n") | map(select(length > 0))')"
echo "manifest-files=$MANIFEST_FILES" >> $GITHUB_OUTPUT
sync_images:
runs-on: ubuntu-latest
needs: [build_manifest_matrix]
strategy:
# Restrict the maximum number of parallel syncs to avoid quay.io push limits
# Anonymous pulls are not rate-limited
max-parallel: 4
# If one sync fails, continue with the others
fail-fast: false
matrix:
manifest-file: ${{ fromJSON(needs.build_manifest_matrix.outputs.manifest-files) }}
steps:
- name: Check out the repository
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
# Just sync all the images in all the manifests to GitHub packages
- name: Sync component images
run: |-
podman run \
-v ./skopeo-manifests:/opt/skopeo-manifests \
-w /opt \
quay.io/skopeo/stable:latest \
sync \
--src yaml \
--dest docker \
--dest-creds "${{ secrets.QUAY_IO_USER }}:${{ secrets.QUAY_IO_TOKEN }}" \
--scoped \
--all \
${{ matrix.manifest-file }} \
quay.io/azimuth

View File

@ -1,29 +0,0 @@
name: Publish dependencies on release
on:
push:
tags:
- "**"
jobs:
mirror_container_images:
uses: ./.github/workflows/sync-images.yaml
secrets: inherit
with:
ref: ${{ github.sha }}
publish_charts:
uses: ./.github/workflows/publish-charts.yaml
secrets: inherit
with:
ref: ${{ github.sha }}
publish_dependencies:
runs-on: ubuntu-latest
steps:
- name: Check out the repository
uses: actions/checkout@v3
- name: Publish manifest to release
uses: softprops/action-gh-release@v1
with:
files: dependencies.json

View File

@ -1,474 +0,0 @@
name: Test OpenStack cluster chart
on:
workflow_call:
inputs:
images:
type: string
description: JSON-encoded dictionary of images and versions
chart-version:
type: string
description: The version of the charts to test
ref:
type: string
description: The Git ref under test.
required: true
tests-full:
type: boolean
description: Indicates whether to run the full test suite or just a sanity check
required: true
default: false
jobs:
# This job tests a clean deployment against the latest version
# It is the only job that runs when tests-full=false
# For tests-full=true it creates an internal network + router and runs Sonobuoy in conformance mode
# For tests-full=false it uses a pre-existing internal network and runs Sonobuoy in quick mode
latest:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Create kind cluster
uses: helm/kind-action@v1.8.0
- name: Set up test environment
uses: ./.github/actions/setup
- name: Write cloud credential
run: echo "$CLOUD" > ./clouds.yaml
env:
CLOUD: ${{ secrets.CLOUD }}
- name: Write Helm values
run: echo "$VALUES" > ./values.yaml
env:
VALUES: |
clouds:
openstack:
auth:
project_id: ${{ secrets.PROJECT_ID }}
verify: false
controlPlane:
machineFlavor: ${{ vars.CONTROL_PLANE_FLAVOR }}
machineCount: 1
nodeGroups:
- name: md-0
machineFlavor: ${{ vars.NODE_GROUP_FLAVOR }}
machineCount: 2
- name: Apply network configuration
run: echo "$NETWORKING" >> ./values.yaml
env:
NETWORKING: |
clusterNetworking:
externalNetworkId: ${{ vars.EXTERNAL_NETWORK_ID }}
if: ${{ inputs.tests-full }}
- name: Apply network configuration
run: echo "$NETWORKING" >> ./values.yaml
env:
NETWORKING: |
clusterNetworking:
externalNetworkId: ${{ vars.EXTERNAL_NETWORK_ID }}
internalNetwork:
networkFilter:
tags: capi-helm-chart-ci
if: ${{ !inputs.tests-full }}
# This is the point at which we start to consume OpenStack resources
# We use the same resource as the Azimuth CI, so acquire the same CI lock
- name: Acquire S3 lock
uses: stackhpc/github-actions/s3-lock@master
with:
host: ${{ vars.CI_S3_LOCK_HOST }}
access-key: ${{ secrets.CI_S3_LOCK_ACCESS_KEY }}
secret-key: ${{ secrets.CI_S3_LOCK_SECRET_KEY }}
bucket: ${{ vars.CI_S3_LOCK_BUCKET }}
action: acquire
# Include the job ID in the process ID
process-id: ${{ github.repository }}/${{ github.run_id }}/${{ github.job }}
# GitHub terminates jobs after 6 hours
# We don't want jobs to acquire the lock then get timed out before they can finish
# So wait a maximum of 3 hours to acquire the lock, leaving 3 hours for other tasks in the workflow
timeout-minutes: 180
if: ${{ vars.CI_S3_LOCK_HOST != '' }}
- name: Test clean Kubernetes 1.30 deployment
uses: ./.github/actions/upgrade-and-test
with:
name: ci-${{ github.run_id }}-${{ github.job }}
chart-version: ${{ inputs.chart-version }}
kubernetes-version: ${{ fromJson(inputs.images).kube-1-30-version }}
image-id: ${{ fromJson(inputs.images).kube-1-30-image }}
sonobuoy-mode: ${{ inputs.tests-full && 'certified-conformance' || 'quick' }}
sonobuoy-upload: ${{ inputs.tests-full && 'yes' || 'no' }}
- name: Delete Kubernetes 1.30 deployment
run: helm delete ci-${{ github.run_id }}-${{ github.job }} --wait
if: ${{ always() }}
- name: Upload logs
uses: ./.github/actions/upload-logs
with:
name-suffix: ${{ github.job }}
if: ${{ always() }}
# Release the CI lock when we are done
- name: Release S3 lock
uses: stackhpc/github-actions/s3-lock@master
with:
host: ${{ vars.CI_S3_LOCK_HOST }}
access-key: ${{ secrets.CI_S3_LOCK_ACCESS_KEY }}
secret-key: ${{ secrets.CI_S3_LOCK_SECRET_KEY }}
bucket: ${{ vars.CI_S3_LOCK_BUCKET }}
action: release
process-id: ${{ github.repository }}/${{ github.run_id }}/${{ github.job }}
if: ${{ vars.CI_S3_LOCK_HOST != '' && always() }}
# This job tests the etcd volume support
# It only runs for non-draft PRs
# It uses a pre-existing internal network and the default volume type
etcd-volume:
runs-on: ubuntu-latest
if: ${{ inputs.tests-full }}
steps:
- name: Checkout
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Create kind cluster
uses: helm/kind-action@v1.8.0
- name: Set up test environment
uses: ./.github/actions/setup
- name: Write cloud credential
run: echo "$CLOUD" > ./clouds.yaml
env:
CLOUD: ${{ secrets.CLOUD }}
- name: Write Helm values
run: echo "$VALUES" > ./values.yaml
env:
VALUES: |
clouds:
openstack:
auth:
project_id: ${{ secrets.PROJECT_ID }}
verify: false
clusterNetworking:
externalNetworkId: ${{ vars.EXTERNAL_NETWORK_ID }}
internalNetwork:
networkFilter:
tags: capi-helm-chart-ci
etcd:
blockDevice:
size: 10
controlPlane:
machineFlavor: ${{ vars.CONTROL_PLANE_FLAVOR }}
machineCount: 1
nodeGroups:
- name: md-0
machineFlavor: ${{ vars.NODE_GROUP_FLAVOR }}
machineCount: 2
# This is the point at which we start to consume OpenStack resources
# We use the same resource as the Azimuth CI, so acquire the same CI lock
- name: Acquire S3 lock
uses: stackhpc/github-actions/s3-lock@master
with:
host: ${{ vars.CI_S3_LOCK_HOST }}
access-key: ${{ secrets.CI_S3_LOCK_ACCESS_KEY }}
secret-key: ${{ secrets.CI_S3_LOCK_SECRET_KEY }}
bucket: ${{ vars.CI_S3_LOCK_BUCKET }}
action: acquire
# Include the job ID in the process ID
process-id: ${{ github.repository }}/${{ github.run_id }}/${{ github.job }}
# GitHub terminates jobs after 6 hours
# We don't want jobs to acquire the lock then get timed out before they can finish
# So wait a maximum of 3 hours to acquire the lock, leaving 3 hours for other tasks in the workflow
timeout-minutes: 180
if: ${{ vars.CI_S3_LOCK_HOST != '' }}
- name: Deploy Kubernetes 1.30 for etcd volume test
uses: ./.github/actions/upgrade-and-test
with:
name: ci-${{ github.run_id }}-${{ github.job }}
chart-version: ${{ inputs.chart-version }}
kubernetes-version: ${{ fromJson(inputs.images).kube-1-30-version }}
image-id: ${{ fromJson(inputs.images).kube-1-30-image }}
- name: Delete etcd volume test deployment
run: helm delete ci-${{ github.run_id }}-${{ github.job }} --wait
if: ${{ always() }}
- name: Upload logs
uses: ./.github/actions/upload-logs
with:
name-suffix: ${{ github.job }}
if: ${{ always() }}
# Release the CI lock when we are done
- name: Release S3 lock
uses: stackhpc/github-actions/s3-lock@master
with:
host: ${{ vars.CI_S3_LOCK_HOST }}
access-key: ${{ secrets.CI_S3_LOCK_ACCESS_KEY }}
secret-key: ${{ secrets.CI_S3_LOCK_SECRET_KEY }}
bucket: ${{ vars.CI_S3_LOCK_BUCKET }}
action: release
process-id: ${{ github.repository }}/${{ github.run_id }}/${{ github.job }}
if: ${{ vars.CI_S3_LOCK_HOST != '' && always() }}
# This job tests Kubernetes upgrade
# It only runs for non-draft PRs
# It uses a pre-existing internal network
kube-upgrade:
runs-on: ubuntu-latest
if: ${{ inputs.tests-full }}
steps:
- name: Checkout
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
- name: Create kind cluster
uses: helm/kind-action@v1.8.0
- name: Set up test environment
uses: ./.github/actions/setup
- name: Write cloud credential
run: echo "$CLOUD" > ./clouds.yaml
env:
CLOUD: ${{ secrets.CLOUD }}
- name: Write Helm values
run: echo "$VALUES" > ./values.yaml
env:
VALUES: |
clouds:
openstack:
auth:
project_id: ${{ secrets.PROJECT_ID }}
verify: false
clusterNetworking:
externalNetworkId: ${{ vars.EXTERNAL_NETWORK_ID }}
internalNetwork:
networkFilter:
tags: capi-helm-chart-ci
controlPlane:
machineFlavor: ${{ vars.CONTROL_PLANE_FLAVOR }}
machineCount: 1
nodeGroups:
- name: md-0
machineFlavor: ${{ vars.NODE_GROUP_FLAVOR }}
machineCount: 2
# This is the point at which we start to consume OpenStack resources
# We use the same resource as the Azimuth CI, so acquire the same CI lock
- name: Acquire S3 lock
uses: stackhpc/github-actions/s3-lock@master
with:
host: ${{ vars.CI_S3_LOCK_HOST }}
access-key: ${{ secrets.CI_S3_LOCK_ACCESS_KEY }}
secret-key: ${{ secrets.CI_S3_LOCK_SECRET_KEY }}
bucket: ${{ vars.CI_S3_LOCK_BUCKET }}
action: acquire
# Include the job ID in the process ID
process-id: ${{ github.repository }}/${{ github.run_id }}/${{ github.job }}
# GitHub terminates jobs after 6 hours
# We don't want jobs to acquire the lock then get timed out before they can finish
# So wait a maximum of 3 hours to acquire the lock, leaving 3 hours for other tasks in the workflow
timeout-minutes: 180
if: ${{ vars.CI_S3_LOCK_HOST != '' }}
- name: Deploy Kubernetes 1.28 for Kubernetes upgrade test
uses: ./.github/actions/upgrade-and-test
with:
name: ci-${{ github.run_id }}-${{ github.job }}
chart-version: ${{ inputs.chart-version }}
kubernetes-version: ${{ fromJson(inputs.images).kube-1-28-version }}
image-id: ${{ fromJson(inputs.images).kube-1-28-image }}
- name: Upgrade to Kubernetes 1.29
uses: ./.github/actions/upgrade-and-test
with:
name: ci-${{ github.run_id }}-${{ github.job }}
chart-version: ${{ inputs.chart-version }}
kubernetes-version: ${{ fromJson(inputs.images).kube-1-29-version }}
image-id: ${{ fromJson(inputs.images).kube-1-29-image }}
- name: Upgrade to Kubernetes 1.30
uses: ./.github/actions/upgrade-and-test
with:
name: ci-${{ github.run_id }}-${{ github.job }}
chart-version: ${{ inputs.chart-version }}
kubernetes-version: ${{ fromJson(inputs.images).kube-1-30-version }}
image-id: ${{ fromJson(inputs.images).kube-1-30-image }}
- name: Delete Kubernetes upgrade deployment
run: helm delete ci-${{ github.run_id }}-${{ github.job }} --wait
if: ${{ always() }}
- name: Upload logs
uses: ./.github/actions/upload-logs
with:
name-suffix: ${{ github.job }}
if: ${{ always() }}
# Release the CI lock when we are done
- name: Release S3 lock
uses: stackhpc/github-actions/s3-lock@master
with:
host: ${{ vars.CI_S3_LOCK_HOST }}
access-key: ${{ secrets.CI_S3_LOCK_ACCESS_KEY }}
secret-key: ${{ secrets.CI_S3_LOCK_SECRET_KEY }}
bucket: ${{ vars.CI_S3_LOCK_BUCKET }}
action: release
process-id: ${{ github.repository }}/${{ github.run_id }}/${{ github.job }}
if: ${{ vars.CI_S3_LOCK_HOST != '' && always() }}
# This jobs tests upgrading the chart + dependencies from the latest tag
# It only runs for non-draft PRs
# It uses a pre-existing internal network
# It installs ALL of the addons so that we test upgrading them
chart-upgrade:
runs-on: ubuntu-latest
if: ${{ inputs.tests-full }}
steps:
- name: Checkout current
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref }}
path: current
- name: Get latest tag
id: latest-tag
run: |
set -eo pipefail
TAG_NAME="$(curl -fsSL "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases/latest" | jq -r '.tag_name')"
echo "tag-name=${TAG_NAME}" >> "$GITHUB_OUTPUT"
- name: Checkout latest tag
uses: actions/checkout@v3
with:
ref: ${{ steps.latest-tag.outputs.tag-name }}
path: latest-tag
- name: Write cloud credential
run: echo "$CLOUD" > ./clouds.yaml
env:
CLOUD: ${{ secrets.CLOUD }}
- name: Write Helm values
run: echo "$VALUES" > ./values.yaml
env:
VALUES: |
clouds:
openstack:
auth:
project_id: ${{ secrets.PROJECT_ID }}
verify: false
clusterNetworking:
externalNetworkId: ${{ vars.EXTERNAL_NETWORK_ID }}
internalNetwork:
networkFilter:
tags: capi-helm-chart-ci
controlPlane:
machineFlavor: ${{ vars.CONTROL_PLANE_FLAVOR }}
machineCount: 1
nodeGroups:
- name: md-0
machineFlavor: ${{ vars.NODE_GROUP_FLAVOR }}
machineCount: 2
addons:
kubernetesDashboard:
enabled: true
monitoring:
enabled: true
- name: Create kind cluster
uses: helm/kind-action@v1.8.0
# For the setup, we use a merged dependencies file in case new dependencies
# are added by the code under test, ensuring that the older dependencies are
# used where they are specified
- name: Create merged dependencies file
run: >
jq -s '.[0] * .[1]' \
current/dependencies.json \
latest-tag/dependencies.json \
> dependencies-merged.json
- name: Set up test environment with dependencies from latest tag
uses: ./current/.github/actions/setup
with:
dependencies-path: dependencies-merged.json
# This is the point at which we start to consume OpenStack resources
# We use the same resource as the Azimuth CI, so acquire the same CI lock
- name: Acquire S3 lock
uses: stackhpc/github-actions/s3-lock@master
with:
host: ${{ vars.CI_S3_LOCK_HOST }}
access-key: ${{ secrets.CI_S3_LOCK_ACCESS_KEY }}
secret-key: ${{ secrets.CI_S3_LOCK_SECRET_KEY }}
bucket: ${{ vars.CI_S3_LOCK_BUCKET }}
action: acquire
# Include the job ID in the process ID
process-id: ${{ github.repository }}/${{ github.run_id }}/${{ github.job }}
# GitHub terminates jobs after 6 hours
# We don't want jobs to acquire the lock then get timed out before they can finish
# So wait a maximum of 3 hours to acquire the lock, leaving 3 hours for other tasks in the workflow
timeout-minutes: 180
if: ${{ vars.CI_S3_LOCK_HOST != '' }}
- name: Deploy cluster with chart from latest tag
uses: ./current/.github/actions/upgrade-and-test
with:
name: ci-${{ github.run_id }}-${{ github.job }}
# Deploy using the tagged version here
chart-version: ${{ steps.latest-tag.outputs.tag-name }}
kubernetes-version: ${{ fromJson(inputs.images).kube-1-29-version }}
image-id: ${{ fromJson(inputs.images).kube-1-29-image }}
- name: Update test environment with current dependencies
uses: ./current/.github/actions/setup
with:
dependencies-path: current/dependencies.json
- name: Upgrade cluster to current chart
uses: ./current/.github/actions/upgrade-and-test
with:
name: ci-${{ github.run_id }}-${{ github.job }}
# And upgrade to the version under test
chart-version: ${{ inputs.chart-version }}
kubernetes-version: ${{ fromJson(inputs.images).kube-1-29-version }}
image-id: ${{ fromJson(inputs.images).kube-1-29-image }}
- name: Delete chart upgrade deployment
run: helm delete ci-${{ github.run_id }}-${{ github.job }} --wait
if: ${{ always() }}
- name: Upload logs
uses: ./current/.github/actions/upload-logs
with:
name-suffix: ${{ github.job }}
if: ${{ always() }}
# Release the CI lock when we are done
- name: Release S3 lock
uses: stackhpc/github-actions/s3-lock@master
with:
host: ${{ vars.CI_S3_LOCK_HOST }}
access-key: ${{ secrets.CI_S3_LOCK_ACCESS_KEY }}
secret-key: ${{ secrets.CI_S3_LOCK_SECRET_KEY }}
bucket: ${{ vars.CI_S3_LOCK_BUCKET }}
action: release
process-id: ${{ github.repository }}/${{ github.run_id }}/${{ github.job }}
if: ${{ vars.CI_S3_LOCK_HOST != '' && always() }}

View File

@ -1,183 +0,0 @@
name: Update addons
on:
# Allow manual executions
workflow_dispatch:
# Run nightly
schedule:
- cron: '0 0 * * *'
jobs:
propose_update_pr:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- key: calico
path: cni.calico.chart
# Because of the way the Calico chart and CRDs are written, it is very
# difficult to infer the images (other than the operator image)
# Instead, we must list them here
# This string is formatted with the chart version
additional-images: |-
docker.io/calico/apiserver:{0}
docker.io/calico/cni:{0}
docker.io/calico/csi:{0}
docker.io/calico/kube-controllers:{0}
docker.io/calico/node-driver-registrar:{0}
docker.io/calico/node:{0}
docker.io/calico/pod2daemon-flexvol:{0}
docker.io/calico/typha:{0}
- key: cilium
path: cni.cilium.chart
- key: openstack-ccm
path: openstack.ccm.chart
- key: csi-cinder
path: openstack.csiCinder.chart
- key: k8s-keystone-auth
path: openstack.k8sKeystoneAuth.chart
values: |
openstackAuthUrl: https://keystone.my.openstack
projectId: notarealprojectid
- key: metrics-server
path: metricsServer.chart
- key: kubernetes-dashboard
path: kubernetesDashboard.chart
# v7 introduces substantial changes that break Azimuth integration
constraints: '<7.0.0'
values: |
metricsScraper:
enabled: true
- key: ingress-nginx
path: ingress.nginx.chart
- key: kube-prometheus-stack
path: monitoring.kubePrometheusStack.chart
- key: loki-stack
path: monitoring.lokiStack.chart
- key: prometheus-blackbox-exporter
path: monitoring.blackboxExporter.chart
- key: node-feature-discovery
path: nodeFeatureDiscovery.chart
- key: nvidia-gpu-operator
path: nvidiaGPUOperator.chart
values: |
nfd:
enabled: false
- key: mellanox-network-operator
path: mellanoxNetworkOperator.chart
values: |
nfd:
enabled: false
deployCR: true
ofedDriver:
deploy: true
rdmaSharedDevicePlugin:
deploy: true
ibKubernetes:
deploy: false
nvPeerDriver:
deploy: false
sriovNetworkOperator:
enabled: false
sriovDevicePlugin:
deploy: false
secondaryNetwork:
deploy: false
name: ${{ matrix.key }}
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Extract repo and chart from values.yaml
id: chart-info
uses: stackhpc/github-actions/config-extract@master
with:
path: ./charts/cluster-addons/values.yaml
outputs: |
repo=${{ matrix.path }}.repo
name=${{ matrix.path }}.name
- name: Check for updates
id: next
uses: stackhpc/github-actions/helm-latest-version@master
with:
repository: ${{ steps.chart-info.outputs.repo }}
chart: ${{ steps.chart-info.outputs.name }}
constraints: ${{ matrix.constraints || '>=0.0.0' }}
- name: Update chart version in values.yaml
uses: stackhpc/github-actions/config-update@master
with:
path: ./charts/cluster-addons/values.yaml
updates: |
${{ matrix.path }}.version=${{ steps.next.outputs.version }}
- name: Template updated chart
id: helm-template
uses: stackhpc/github-actions/helm-template@master
with:
repository: ${{ steps.chart-info.outputs.repo }}
chart: ${{ steps.chart-info.outputs.name }}
version: ${{ steps.next.outputs.version }}
values: ${{ matrix.values || '{}' }}
- name: Extract images from templated manifests
id: extract-images
uses: stackhpc/github-actions/k8s-extract-images@master
with:
manifests-file: ${{ steps.helm-template.outputs.manifests-file }}
- name: Ensure manifests directory exists
run: mkdir -p ./skopeo-manifests
- name: Write Skopeo manifest
uses: stackhpc/github-actions/skopeo-manifest@master
with:
manifest-file: ./skopeo-manifests/${{ matrix.key }}.yaml
images: |
${{ steps.extract-images.outputs.images }}
${{
format(
matrix.additional-images || '',
steps.next.outputs.version,
steps.next.outputs.app-version
)
}}
- name: Generate app token for PR
uses: stackhpc/github-actions/generate-app-token@master
id: generate-app-token
with:
repository: ${{ github.repository }}
app-id: ${{ secrets.APP_ID }}
app-private-key: ${{ secrets.APP_PRIVATE_KEY }}
- name: Propose changes via PR if required
uses: peter-evans/create-pull-request@v6
with:
token: ${{ steps.generate-app-token.outputs.token }}
commit-message: >-
Update ${{ matrix.key }} addon to ${{ steps.next.outputs.version }}
branch: update-addon/${{ matrix.key }}
delete-branch: true
title: >-
Update ${{ matrix.key }} addon to ${{ steps.next.outputs.version }}
body: >
This PR was created automatically to update the
${{ matrix.key }} addon to ${{ steps.next.outputs.version }}.
labels: |
automation
addon-update

View File

@ -1,112 +0,0 @@
name: Update dependencies
on:
# Allow manual executions
workflow_dispatch:
# Run nightly
schedule:
- cron: '0 0 * * *'
jobs:
propose_update_pr:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- key: addon-provider
type: helm
repo_url: https://stackhpc.github.io/cluster-api-addon-provider
chart_name: cluster-api-addon-provider
- key: azimuth-images
type: github
repo: stackhpc/azimuth-images
- key: cluster-api
type: github
repo: kubernetes-sigs/cluster-api
- key: cluster-api-janitor-openstack
type: helm
repo_url: https://stackhpc.github.io/cluster-api-janitor-openstack
chart_name: cluster-api-janitor-openstack
- key: cluster-api-provider-openstack
type: github
repo: kubernetes-sigs/cluster-api-provider-openstack
- key: cert-manager
type: helm
repo_url: https://charts.jetstack.io
chart_name: cert-manager
- key: helm
type: github
repo: helm/helm
- key: sonobuoy
type: github
repo: vmware-tanzu/sonobuoy
name: ${{ matrix.key }}
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Check for updates (Helm)
id: helm-version
uses: stackhpc/github-actions/helm-latest-version@master
with:
repository: ${{ matrix.repo_url }}
chart: ${{ matrix.chart_name }}
if: ${{ matrix.type == 'helm' }}
- name: Check for updates (GitHub)
id: github-version
uses: stackhpc/github-actions/github-latest-release@master
with:
repository: ${{ matrix.repo }}
if: ${{ matrix.type == 'github' }}
- name: Get next version from relevant outputs
id: next
run: >-
echo "version=$NEXT_VERSION" >> "$GITHUB_OUTPUT"
env:
NEXT_VERSION: >-
${{
matrix.type == 'helm' &&
steps.helm-version.outputs.version ||
steps.github-version.outputs.version
}}
- name: Update dependency key
uses: stackhpc/github-actions/config-update@master
with:
path: ./dependencies.json
updates: |
${{ matrix.key }}=${{ steps.next.outputs.version }}
- name: Generate app token for PR
uses: stackhpc/github-actions/generate-app-token@master
id: generate-app-token
with:
repository: ${{ github.repository }}
app-id: ${{ secrets.APP_ID }}
app-private-key: ${{ secrets.APP_PRIVATE_KEY }}
- name: Propose changes via PR if required
uses: peter-evans/create-pull-request@v6
with:
token: ${{ steps.generate-app-token.outputs.token }}
commit-message: >-
Update ${{ matrix.key }} to ${{ steps.next.outputs.version }}
branch: update-dependency/${{ matrix.key }}
delete-branch: true
title: >-
Update ${{ matrix.key }} to ${{ steps.next.outputs.version }}
body: >
This PR was created automatically to update
${{ matrix.key }} to ${{ steps.next.outputs.version }}.
labels: |
automation
dependency-update