Add upload-image-s3 role

This adds a role similar to upload-image-swift to upload dib images
to s3 for use by zuul-launcher.

Change-Id: Ie802a0221717e6d5d5dcaa91771d07f8d0321889
This commit is contained in:
James E. Blair 2025-03-17 14:40:15 -07:00
parent 4b025bbf2f
commit 0c004096c6
8 changed files with 364 additions and 0 deletions

View File

@ -5,3 +5,4 @@ Diskimage-Builder Roles
.. zuul:autorole:: build-diskimage
.. zuul:autorole:: convert-diskimage
.. zuul:autorole:: upload-image-swift
.. zuul:autorole:: upload-image-s3

View File

@ -0,0 +1,66 @@
Upload a filesystem image to an S3 bucket
This uploads a filesystem image (for example, one built by diskimage
builder) to an S3 bucket. The role returns an artifact to Zuul
suitable for use by the zuul-launcher.
**Role Variables**
.. zuul:rolevar:: upload_image_s3_endpoint
The endpoint to use when uploading an image to an s3 compatible
service. By default this will be automatically constructed by boto
but should be set when working with non-AWS hosted s3 service.
.. zuul:rolevar:: upload_image_s3_aws_access_key
AWS access key to use.
.. zuul:rolevar:: upload_image_s3_aws_secret_key
AWS secret key for the AWS access key.
.. zuul:rolevar:: upload_image_s3_bucket
This role *will not* create buckets which do not already exist.
Note that you will want to set this to a value that uniquely
identifies your Zuul installation.
The bucket should be dedicated to image uploads so that the
"delete_after" option may be safely used.
.. zuul:rolevar:: upload_image_s3_delete_after
:default: 0
Number of seconds to delete objects after upload. Default is 0
(disabled). Each run of the role will attempt to delete any
objects in the bucket older than this time.
It is also recommended to use the AWS console to configure
automatic expiration of objects in this bucket.
.. zuul:rolevar:: upload_image_s3_image_name
:default: `{{ build_diskimage_image_name }}`
The Zuul image name for use by zuul-launcher (e.g., `debian-bookworm`).
.. zuul:rolevar:: upload_image_s3_format
The image format (e.g., `qcow2`).
.. zuul:rolevar:: upload_image_s3_extension
:default: `{{ upload_image_s3_format }}`
The extension to use when uploading (only used in the default
values for the following variables.
.. zuul:rolevar:: upload_image_s3_filename
:default: `{{ build_diskimage_image_root }}/{{ build_diskimage_image_name }}.{{ upload_image_s3_extension }}`
The path of the local file to upload.
.. zuul:rolevar:: upload_image_s3_name
:default: `{{ zuul.build }}-{{ build_diskimage_image_name }}.{{ upload_image_s3_extension }}`
The object name to use when uploading.

View File

@ -0,0 +1,5 @@
upload_image_s3_image_name: '{{ build_diskimage_image_name }}'
upload_image_s3_delete_after: 0
upload_image_s3_filename: '{{ build_diskimage_image_root }}/{{ build_diskimage_image_name }}.{{ upload_image_s3_extension }}'
upload_image_s3_name: '{{ zuul.build }}-{{ build_diskimage_image_name }}.{{ upload_image_s3_extension }}'
upload_image_s3_extension: '{{ upload_image_s3_format }}'

View File

@ -0,0 +1,136 @@
# Copyright 2014 Rackspace Australia
# Copyright 2018 Red Hat, Inc
# Copyright 2024-2025 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import datetime
import logging
import os
import sys
import traceback
import boto3
from ansible.module_utils.basic import AnsibleModule
def prune(bucket, delete_after):
# In case the automatic expiration doesn't work, manually prune old uploads
if not delete_after:
return
target = (datetime.datetime.now(datetime.UTC) -
datetime.timedelta(seconds=delete_after))
for obj in bucket.objects.all():
if obj.last_modified < target:
obj.delete()
def run(endpoint, bucket_name, aws_access_key, aws_secret_key,
filename, name, delete_after=None):
endpoint = endpoint or 'https://s3.amazonaws.com/'
s3 = boto3.resource('s3',
endpoint_url=endpoint,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
bucket = s3.Bucket(bucket_name)
prune(bucket, delete_after)
bucket.upload_file(filename, name)
url = os.path.join(endpoint, bucket_name, name)
return url
def ansible_main():
module = AnsibleModule(
argument_spec=dict(
endpoint=dict(type='str'),
bucket=dict(required=True, type='str'),
filename=dict(required=True, type='path'),
name=dict(required=True, type='str'),
delete_after=dict(type='int'),
aws_access_key=dict(type='str'),
aws_secret_key=dict(type='str', no_log=True),
)
)
p = module.params
try:
url = run(
p.get('endpoint'),
p.get('bucket'),
p.get('aws_access_key'),
p.get('aws_secret_key'),
p.get('filename'),
p.get('name'),
delete_after=p.get('delete_after'),
)
except Exception:
s = "Error uploading to S3"
s += "\n" + traceback.format_exc()
module.fail_json(
changed=False,
msg=s)
module.exit_json(
changed=True,
url=url,
)
def cli_main():
parser = argparse.ArgumentParser(
description="Upload image to S3"
)
parser.add_argument('--verbose', action='store_true',
help='show debug information')
parser.add_argument('--endpoint',
help='http endpoint of s3 service')
parser.add_argument('bucket',
help='Name of the bucket to use when uploading')
parser.add_argument('filename',
help='the file to upload')
parser.add_argument('name',
help='the object name')
parser.add_argument('--delete-after',
help='Number of seconds to delete object after '
'upload. Default is 3 days (259200 seconds) '
'and if set to 0 X-Delete-After will not be set',
type=int)
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
logging.captureWarnings(True)
url = run(
args.endpoint,
args.bucket,
None,
None,
args.filename,
args.name,
delete_after=args.delete_after,
)
print(url)
if __name__ == '__main__':
if not sys.stdin.isatty():
ansible_main()
else:
cli_main()

View File

@ -0,0 +1,58 @@
# Run the checksums in the background while we're uploading
- name: Get sha256 hash
stat:
path: '{{ upload_image_s3_filename }}'
checksum_algorithm: sha256
async: 600
poll: 0
register: sha256_task
- name: Get md5 hash
stat:
path: '{{ upload_image_s3_filename }}'
checksum_algorithm: md5
async: 600
poll: 0
register: md5_task
- name: Upload image to S3
no_log: true
upload_image_s3:
endpoint: "{{ upload_image_s3_endpoint | default(omit) }}"
bucket: "{{ upload_image_s3_bucket }}"
aws_access_key: "{{ upload_image_s3_aws_access_key }}"
aws_secret_key: "{{ upload_image_s3_aws_secret_key }}"
filename: '{{ upload_image_s3_filename }}'
name: '{{ upload_image_s3_name }}'
delete_after: '{{ upload_image_s3_delete_after }}'
register: upload_results
- name: Wait for sha256
async_status:
jid: "{{ sha256_task.ansible_job_id }}"
register: sha256
until: sha256.finished
retries: 1
delay: 10
- name: Wait for md5
async_status:
jid: "{{ md5_task.ansible_job_id }}"
register: md5
until: md5.finished
retries: 1
delay: 10
- name: Return artifact to Zuul
zuul_return:
data:
zuul:
artifacts:
- name: '{{ upload_image_s3_format }} image'
url: '{{ upload_results.url }}'
metadata:
type: 'zuul_image'
image_name: '{{ upload_image_s3_image_name }}'
format: '{{ upload_image_s3_format }}'
sha256: '{{ sha256.stat.checksum }}'
md5sum: '{{ md5.stat.checksum }}'

View File

@ -0,0 +1,89 @@
- hosts: all
vars:
test_aws_access_key: minioadmin
test_aws_secret_key: minioadmin
test_bucket: zuul
test_image: "image"
test_filename: "/tmp/testfile"
test_objectname_old: "image-old.raw"
test_objectname: "image.raw"
test_ext: "raw"
test_content: "123abc"
roles:
- ensure-docker
- ensure-pip
tasks:
- name: Install boto
pip:
extra_args: "--break-system-packages"
name:
- boto3
- botocore
- name: Start minio server
command: >-
docker run -d -p 9000:9000
-e MINIO_ACCESS_KEY={{ test_aws_access_key }}
-e MINIO_SECRET_KEY={{ test_aws_secret_key }}
quay.io/minio/minio server /data
- name: Make sure bucket exists
s3_bucket:
name: "{{ test_bucket }}"
state: present
s3_url: 'http://localhost:9000'
aws_access_key: "{{ test_aws_access_key }}"
aws_secret_key: "{{ test_aws_secret_key }}"
- name: Add content to tempfile
copy:
content: "{{ test_content }}"
dest: "{{ test_filename }}"
- name: Upload file to s3
include_role:
name: upload-image-s3
vars:
upload_image_s3_endpoint: "http://localhost:9000"
upload_image_s3_aws_access_key: "{{ test_aws_access_key }}"
upload_image_s3_aws_secret_key: "{{ test_aws_secret_key }}"
upload_image_s3_bucket: "{{ test_bucket }}"
upload_image_s3_filename: "{{ test_filename }}"
upload_image_s3_name: "{{ test_objectname_old }}"
upload_image_s3_image_name: "{{ test_image }}"
upload_image_s3_format: "{{ test_ext }}"
upload_image_s3_delete_after: 1
- name: Delay for 5 seconds
wait_for:
timeout: 5
# This should delete the first object then replace it.
- name: Upload file to s3
include_role:
name: upload-image-s3
vars:
upload_image_s3_endpoint: "http://localhost:9000"
upload_image_s3_aws_access_key: "{{ test_aws_access_key }}"
upload_image_s3_aws_secret_key: "{{ test_aws_secret_key }}"
upload_image_s3_bucket: "{{ test_bucket }}"
upload_image_s3_filename: "{{ test_filename }}"
upload_image_s3_name: "{{ test_objectname }}"
upload_image_s3_image_name: "{{ test_image }}"
upload_image_s3_format: "{{ test_ext }}"
upload_image_s3_delete_after: 1
- name: Download mc
get_url:
url: https://dl.min.io/client/mc/release/linux-amd64/mc
dest: "{{ ansible_user_dir }}/mc"
mode: 0755
- name: Add localhost minio host
command: "{{ ansible_user_dir }}/mc config host add local http://localhost:9000 {{ test_aws_access_key }} {{ test_aws_secret_key }}"
- name: List files in minio bucket
command: "{{ ansible_user_dir }}/mc find local/zuul"
- name: Check for testfile in minio bucket
command: "{{ ansible_user_dir }}/mc find local/zuul/{{ test_objectname }}"

View File

@ -177,6 +177,14 @@
- name: ubuntu-noble
label: ubuntu-noble
- job:
name: zuul-jobs-test-upload-image-s3
description: Test the upload-image-s3 role
files:
- roles/upload-image-s3/.*
- test-playbooks/upload-image-s3.yaml
run: test-playbooks/upload-image-s3.yaml
- project:
check:
jobs: &id001
@ -195,6 +203,7 @@
- zuul-jobs-test-convert-diskimage-ubuntu-focal
- zuul-jobs-test-convert-diskimage-ubuntu-jammy
- zuul-jobs-test-convert-diskimage-ubuntu-noble
- zuul-jobs-test-upload-image-s3
gate:
jobs: *id001
periodic-weekly: