name: Test OpenStack cluster chart on: workflow_call: inputs: images: type: string description: JSON-encoded dictionary of images and versions required: true tests-full: type: boolean description: Indicates whether to run the full test suite or just a smoke test required: true default: false env: HELM_VERSION: v3.11.3 CAPI_VERSION: v1.4.3 CAPO_VERSION: v0.7.3 ADDON_PROVIDER_VERSION: 0.1.0-dev.0.main.26 SONOBUOY_VERSION: 0.56.16 CERTMANAGER_VERSION: v1.12.1 jobs: test-chart: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v3 - name: Install tools run: sudo apt install -y zip unzip - name: Install sonobuoy run: > wget https://github.com/vmware-tanzu/sonobuoy/releases/download/v${SONOBUOY_VERSION}/sonobuoy_${SONOBUOY_VERSION}_linux_amd64.tar.gz && tar -xf sonobuoy_${SONOBUOY_VERSION}_linux_amd64.tar.gz && sudo mv -n sonobuoy /usr/bin/ - uses: actions/setup-python@v4 with: python-version: '3.9' check-latest: true - name: Set up Helm uses: azure/setup-helm@v3 with: version: ${{ env.HELM_VERSION }} - name: Create k8s Kind Cluster uses: helm/kind-action@v1.5.0 - name: Install cert-manager run: |- helm upgrade cert-manager cert-manager \ --repo https://charts.jetstack.io \ --version ${CERTMANAGER_VERSION} \ --namespace cert-manager \ --create-namespace \ --install \ --set installCRDs=true \ --wait \ --timeout 10m - name: Ensure Cluster API kustomization directory exists run: mkdir -p clusterapi # From here: https://github.com/stackhpc/ansible-collection-azimuth-ops/blob/main/roles/clusterapi/defaults/main.yml - name: Write Cluster API kustomization file uses: DamianReeves/write-file-action@master with: path: clusterapi/kustomization.yaml write-mode: overwrite contents: | resources: - https://github.com/kubernetes-sigs/cluster-api/releases/download/${{ env.CAPI_VERSION }}/cluster-api-components.yaml - https://github.com/kubernetes-sigs/cluster-api-provider-openstack/releases/download/${{ env.CAPO_VERSION }}/infrastructure-components.yaml patches: - patch: |- - op: replace path: /spec/template/spec/containers/0/args value: - --leader-elect - --metrics-bind-addr=localhost:8080 target: kind: Deployment namespace: capi-system name: capi-controller-manager - patch: |- - op: replace path: /spec/template/spec/containers/0/args value: - --leader-elect - --metrics-bind-addr=localhost:8080 target: kind: Deployment namespace: capi-kubeadm-bootstrap-system name: capi-kubeadm-bootstrap-controller-manager - patch: |- - op: replace path: /spec/template/spec/containers/0/args value: - --leader-elect - --metrics-bind-addr=localhost:8080 target: kind: Deployment namespace: capi-kubeadm-control-plane-system name: capi-kubeadm-control-plane-controller-manager - name: Install Cluster API resources run: kubectl apply -k clusterapi/ - name: Wait for Cluster API controllers to become ready run: |- kubectl rollout status deployment/capi-controller-manager \ --namespace capi-system \ --timeout 5m \ && \ kubectl rollout status deployment/capi-kubeadm-bootstrap-controller-manager \ --namespace capi-kubeadm-bootstrap-system \ --timeout 5m \ && \ kubectl rollout status deployment/capi-kubeadm-control-plane-controller-manager \ --namespace capi-kubeadm-control-plane-system \ --timeout 5m \ && \ kubectl rollout status deployment/capo-controller-manager \ --namespace capo-system \ --timeout 10m - name: Install Cluster API add-on provider run: |- helm upgrade cluster-api-addon-provider cluster-api-addon-provider \ --repo https://stackhpc.github.io/cluster-api-addon-provider \ --version ${ADDON_PROVIDER_VERSION} \ --namespace capi-addon-system \ --create-namespace \ --install \ --wait \ --timeout 10m - name: Write cloud credential run: > echo "$CLOUD" > clouds.yml shell: bash env: CLOUD: ${{ secrets.CLOUD }} - name: Write common Helm values uses: DamianReeves/write-file-action@master with: path: values-common.yaml write-mode: overwrite contents: | clouds: openstack: auth: project_id: ${{ secrets.PROJECT_ID }} verify: false controlPlane: machineFlavor: vm.ska.cpu.general.small machineCount: 1 nodeGroups: - machineCount: 2 machineFlavor: vm.ska.cpu.general.small name: test-group1 ##### # For the smoke test, we do a clean deployment of the latest supported version ##### - name: Test clean Kubernetes 1.27 deployment uses: ./.github/actions/upgrade-and-test with: name: ci-${{ github.run_id }}-kube-latest kubernetes-version: ${{ fromJson(inputs.images).kube-1-27-version }} image-id: ${{ fromJson(inputs.images).kube-1-27-image }} sonobuoy-upload: "yes" if: ${{ !inputs.tests-full }} - name: Delete Kubernetes 1.27 deployment run: helm delete ci-${{ github.run_id }}-kube-latest --wait if: ${{ !inputs.tests-full && always() }} ##### # On a full test, first test that we can upgrade from a cluster deployed using the latest tag ##### - name: Get latest tag id: latest-tag run: | set -eo pipefail TAG_NAME="$(curl -fsSL "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases/latest" | jq -r '.tag_name')" echo "tag-name=${TAG_NAME}" >> "$GITHUB_OUTPUT" if: ${{ inputs.tests-full }} - name: Deploy Kubernetes 1.27 with latest tag for chart upgrade test uses: ./.github/actions/upgrade-and-test with: name: ci-${{ github.run_id }}-chart-upgrade kubernetes-version: ${{ fromJson(inputs.images).kube-1-27-version }} image-id: ${{ fromJson(inputs.images).kube-1-27-image }} chart-version: ${{ steps.latest-tag.outputs.tag-name }} if: ${{ inputs.tests-full }} - name: Upgrade to current chart uses: ./.github/actions/upgrade-and-test with: name: ci-${{ github.run_id }}-chart-upgrade kubernetes-version: ${{ fromJson(inputs.images).kube-1-27-version }} image-id: ${{ fromJson(inputs.images).kube-1-27-image }} if: ${{ inputs.tests-full }} - name: Delete chart upgrade deployment run: helm delete ci-${{ github.run_id }}-chart-upgrade --wait if: ${{ inputs.tests-full && always() }} ##### # On a full test, next run a full upgrade test # This will run a sonobuoy smoke test after every step with a full test at the end ##### - name: Deploy Kubernetes 1.25 for Kubernetes upgrade test uses: ./.github/actions/upgrade-and-test with: name: ci-${{ github.run_id }}-kube-upgrade kubernetes-version: ${{ fromJson(inputs.images).kube-1-25-version }} image-id: ${{ fromJson(inputs.images).kube-1-25-image }} if: ${{ inputs.tests-full }} - name: Upgrade to Kubernetes 1.26 uses: ./.github/actions/upgrade-and-test with: name: ci-${{ github.run_id }}-kube-upgrade kubernetes-version: ${{ fromJson(inputs.images).kube-1-26-version }} image-id: ${{ fromJson(inputs.images).kube-1-26-image }} if: ${{ inputs.tests-full }} - name: Upgrade to Kubernetes 1.27 uses: ./.github/actions/upgrade-and-test with: name: ci-${{ github.run_id }}-kube-upgrade kubernetes-version: ${{ fromJson(inputs.images).kube-1-27-version }} image-id: ${{ fromJson(inputs.images).kube-1-27-image }} sonobuoy-mode: full sonobuoy-upload: "yes" if: ${{ inputs.tests-full }} - name: Delete Kubernetes upgrade deployment run: helm delete ci-${{ github.run_id }}-kube-upgrade --wait if: ${{ inputs.tests-full && always() }} - name: Output controller logs if: ${{ always() }} run: | kubectl -n capi-system logs deploy/capi-controller-manager > capi-logs.txt kubectl -n capi-kubeadm-control-plane-system logs deploy/capi-kubeadm-control-plane-controller-manager > capi-kubeadm-control-plane-logs.txt kubectl -n capi-kubeadm-bootstrap-system logs deploy/capi-kubeadm-bootstrap-controller-manager > capi-kubeadm-bootstrap-logs.txt kubectl -n capo-system logs deploy/capo-controller-manager > capo-logs.txt kubectl -n capi-addon-system logs deploy/cluster-api-addon-provider > capi-addon-provider-logs.txt - name: Upload controller log artifacts uses: actions/upload-artifact@v3 if: ${{ always() }} with: name: cluster-api-controller-logs path: ./*-logs.txt