Use S3 lock for CI

This commit is contained in:
Matt Pryor 2024-03-11 12:31:19 +00:00
parent 88712c4029
commit 22aaa9e700
No known key found for this signature in database
2 changed files with 50 additions and 16 deletions

View File

@ -14,9 +14,9 @@ concurrency:
cancel-in-progress: true
jobs:
# Use a job that does nothing but has an environment as a guard to control
# access to the rest of workflow
# This allows us to control access to test infra for concurrenct and approval reasons
# This job does nothing, but uses an environment that has protection in place
# It is used as a guard to the rest of the workflow so that we can require approval
# for all commits to a PR
wait_for_approval:
runs-on: ubuntu-latest
environment: ci-approval

View File

@ -19,12 +19,27 @@ on:
default: false
jobs:
# We use the same resource as the Azimuth CI, so acquire the same CI lock
acquire-lock:
runs-on: ubuntu-latest
steps:
- name: Acquire S3 lock
uses: stackhpc/github-actions/s3-lock@master
with:
host: ${{ vars.CI_S3_LOCK_HOST }}
access-key: ${{ secrets.CI_S3_LOCK_ACCESS_KEY }}
secret-key: ${{ secrets.CI_S3_LOCK_SECRET_KEY }}
bucket: ${{ vars.CI_S3_LOCK_BUCKET }}
action: acquire
if: ${{ vars.CI_S3_LOCK_HOST != '' }}
# This job tests a clean deployment against the latest version
# It is the only job that runs when tests-full=false
# For tests-full=true it creates an internal network + router and runs Sonobuoy in conformance mode
# For tests-full=false it uses a pre-existing internal network and runs Sonobuoy in quick mode
latest:
runs-on: ubuntu-latest
needs: [acquire-lock]
steps:
- name: Checkout
uses: actions/checkout@v3
@ -52,11 +67,11 @@ jobs:
project_id: ${{ secrets.PROJECT_ID }}
verify: false
controlPlane:
machineFlavor: ${{ secrets.CONTROL_PLANE_FLAVOR }}
machineFlavor: ${{ vars.CONTROL_PLANE_FLAVOR }}
machineCount: 1
nodeGroups:
- name: md-0
machineFlavor: ${{ secrets.NODE_GROUP_FLAVOR }}
machineFlavor: ${{ vars.NODE_GROUP_FLAVOR }}
machineCount: 2
- name: Apply network configuration
@ -64,7 +79,7 @@ jobs:
env:
NETWORKING: |
clusterNetworking:
externalNetworkId: ${{ secrets.EXTERNAL_NETWORK_ID }}
externalNetworkId: ${{ vars.EXTERNAL_NETWORK_ID }}
if: ${{ inputs.tests-full }}
- name: Apply network configuration
@ -72,7 +87,7 @@ jobs:
env:
NETWORKING: |
clusterNetworking:
externalNetworkId: ${{ secrets.EXTERNAL_NETWORK_ID }}
externalNetworkId: ${{ vars.EXTERNAL_NETWORK_ID }}
internalNetwork:
networkFilter:
tags: capi-helm-chart-ci
@ -103,6 +118,7 @@ jobs:
# It uses a pre-existing internal network and the default volume type
etcd-volume:
runs-on: ubuntu-latest
needs: [acquire-lock]
if: ${{ inputs.tests-full }}
steps:
- name: Checkout
@ -131,7 +147,7 @@ jobs:
project_id: ${{ secrets.PROJECT_ID }}
verify: false
clusterNetworking:
externalNetworkId: ${{ secrets.EXTERNAL_NETWORK_ID }}
externalNetworkId: ${{ vars.EXTERNAL_NETWORK_ID }}
internalNetwork:
networkFilter:
tags: capi-helm-chart-ci
@ -139,11 +155,11 @@ jobs:
blockDevice:
size: 10
controlPlane:
machineFlavor: ${{ secrets.CONTROL_PLANE_FLAVOR }}
machineFlavor: ${{ vars.CONTROL_PLANE_FLAVOR }}
machineCount: 1
nodeGroups:
- name: md-0
machineFlavor: ${{ secrets.NODE_GROUP_FLAVOR }}
machineFlavor: ${{ vars.NODE_GROUP_FLAVOR }}
machineCount: 2
- name: Deploy Kubernetes 1.29 for etcd volume test
@ -199,16 +215,16 @@ jobs:
project_id: ${{ secrets.PROJECT_ID }}
verify: false
clusterNetworking:
externalNetworkId: ${{ secrets.EXTERNAL_NETWORK_ID }}
externalNetworkId: ${{ vars.EXTERNAL_NETWORK_ID }}
internalNetwork:
networkFilter:
tags: capi-helm-chart-ci
controlPlane:
machineFlavor: ${{ secrets.CONTROL_PLANE_FLAVOR }}
machineFlavor: ${{ vars.CONTROL_PLANE_FLAVOR }}
machineCount: 1
nodeGroups:
- name: md-0
machineFlavor: ${{ secrets.NODE_GROUP_FLAVOR }}
machineFlavor: ${{ vars.NODE_GROUP_FLAVOR }}
machineCount: 2
- name: Deploy Kubernetes 1.27 for Kubernetes upgrade test
@ -289,16 +305,16 @@ jobs:
project_id: ${{ secrets.PROJECT_ID }}
verify: false
clusterNetworking:
externalNetworkId: ${{ secrets.EXTERNAL_NETWORK_ID }}
externalNetworkId: ${{ vars.EXTERNAL_NETWORK_ID }}
internalNetwork:
networkFilter:
tags: capi-helm-chart-ci
controlPlane:
machineFlavor: ${{ secrets.CONTROL_PLANE_FLAVOR }}
machineFlavor: ${{ vars.CONTROL_PLANE_FLAVOR }}
machineCount: 1
nodeGroups:
- name: md-0
machineFlavor: ${{ secrets.NODE_GROUP_FLAVOR }}
machineFlavor: ${{ vars.NODE_GROUP_FLAVOR }}
machineCount: 2
addons:
kubernetesDashboard:
@ -356,3 +372,21 @@ jobs:
with:
name-suffix: ${{ github.job }}
if: ${{ always() }}
# Release the same CI lock as is used by the Azimuth CI
# If the Azimuth tests run the lock will already have been released, in which case
# this is a no-op, but we need to make sure it is released if the builds fail
release-lock:
runs-on: ubuntu-latest
needs: [latest, chart-upgrade]
if: ${{ always() }}
steps:
- name: Release S3 lock
uses: stackhpc/github-actions/s3-lock@master
with:
host: ${{ vars.CI_S3_LOCK_HOST }}
access-key: ${{ secrets.CI_S3_LOCK_ACCESS_KEY }}
secret-key: ${{ secrets.CI_S3_LOCK_SECRET_KEY }}
bucket: ${{ vars.CI_S3_LOCK_BUCKET }}
action: release
if: ${{ vars.CI_S3_LOCK_HOST != '' }}