From 17f95d3c7de90c752e5d2eb62aee6194dd970f7a Mon Sep 17 00:00:00 2001 From: Steve Loranz Date: Thu, 3 Oct 2013 09:45:20 -0500 Subject: [PATCH] This commit represents 66 commits squashed together. Original commit messages can be found on GitHub: https://github.com/imcleod/novaimagebuilder/commits/new This work represents the switch from a REST service to a CLI and module library for building Glance images using Nova, Glance, Cinder, and native operating system installers. Change-Id: I1edd1ca5a66f18403c75acd8376ef859c6710907 --- README.md | 155 ----- etc/imagebuilder/imagebuilder.conf | 5 - imagebuilder-api | 24 - imagebuilder/MongoPersistentBuildManager.py | 124 ---- .../SQLAlchemyPersistentBuildManager.py | 119 ---- imagebuilder/__init__.py | 15 - imagebuilder/api/README | 28 - imagebuilder/api/__init__.py | 15 - imagebuilder/api/app.py | 75 --- imagebuilder/api/config.py | 60 -- .../api/controllers/osib/v1/Builds.py | 72 --- .../api/controllers/osib/v1/__init__.py | 21 - imagebuilder/api/templates/error.html | 12 - imagebuilder/api/templates/index.html | 34 - imagebuilder/api/templates/layout.html | 22 - imagebuilder/api/tests/__init__.py | 22 - imagebuilder/api/tests/config.py | 25 - imagebuilder/api/tests/test_functional.py | 22 - imagebuilder/api/tests/test_units.py | 7 - imagebuilder/create_image.py | 274 -------- imagebuilder/image_utils.py | 591 ------------------ imagebuilder/openstack/common/__init__.py | 0 imagebuilder/openstack/common/gettextutils.py | 226 ------- imagebuilder/openstack/common/importutils.py | 67 -- imagebuilder/openstack/common/jsonutils.py | 169 ----- imagebuilder/openstack/common/local.py | 48 -- imagebuilder/openstack/common/log.py | 558 ----------------- imagebuilder/openstack/common/timeutils.py | 187 ------ imagebuilder/ping.py | 217 ------- imagebuilder/public/css/style.css | 43 -- imagebuilder/public/images/logo.png | Bin 35094 -> 0 bytes imagebuilder/service.py | 44 -- install_scripts/fedora-17-jeos.ks | 35 -- install_scripts/fedora-18-jeos-DVD.ks | 37 -- install_scripts/fedora-18-jeos.ks | 36 -- install_scripts/rhel-5-jeos.ks | 35 -- install_scripts/rhel-6-jeos.ks | 32 - install_scripts/ubuntu-10.04-jeos.preseed | 54 -- install_scripts/ubuntu-12.04-jeos.preseed | 52 -- install_scripts/ubuntu-12.10-jeos.preseed | 52 -- nova-install | 147 +++++ novaimagebuilder/BaseOS.py | 118 ++++ novaimagebuilder/Builder.py | 176 ++++++ novaimagebuilder/CacheManager.py | 248 ++++++++ novaimagebuilder/ISOHelper.py | 430 +++++++++++++ novaimagebuilder/NovaInstance.py | 90 +++ novaimagebuilder/OSInfo.py | 233 +++++++ novaimagebuilder/RedHatOS.py | 165 +++++ .../Singleton.py | 20 +- novaimagebuilder/StackEnvironment.py | 502 +++++++++++++++ novaimagebuilder/SyslinuxHelper.py | 181 ++++++ novaimagebuilder/WindowsOS.py | 141 +++++ novaimagebuilder/__init__.py | 1 + openstack-common.conf | 4 - tests/MockCacheManager.py | 155 +++++ tests/MockNovaInstance.py | 47 ++ tests/MockOS.py | 62 ++ tests/MockStackEnvironment.py | 113 ++++ {imagebuilder/openstack => tests}/__init__.py | 0 tests/test_OSInfo.py | 85 +++ .../__init__.py => tests/test_cacheManager.py | 21 +- tests/testcache.py | 47 ++ 62 files changed, 2970 insertions(+), 3630 deletions(-) delete mode 100644 README.md delete mode 100644 etc/imagebuilder/imagebuilder.conf delete mode 100755 imagebuilder-api delete mode 100644 imagebuilder/MongoPersistentBuildManager.py delete mode 100644 imagebuilder/SQLAlchemyPersistentBuildManager.py delete mode 100644 imagebuilder/__init__.py delete mode 100644 imagebuilder/api/README delete mode 100644 imagebuilder/api/__init__.py delete mode 100644 imagebuilder/api/app.py delete mode 100644 imagebuilder/api/config.py delete mode 100644 imagebuilder/api/controllers/osib/v1/Builds.py delete mode 100644 imagebuilder/api/controllers/osib/v1/__init__.py delete mode 100644 imagebuilder/api/templates/error.html delete mode 100644 imagebuilder/api/templates/index.html delete mode 100644 imagebuilder/api/templates/layout.html delete mode 100644 imagebuilder/api/tests/__init__.py delete mode 100644 imagebuilder/api/tests/config.py delete mode 100644 imagebuilder/api/tests/test_functional.py delete mode 100644 imagebuilder/api/tests/test_units.py delete mode 100755 imagebuilder/create_image.py delete mode 100755 imagebuilder/image_utils.py delete mode 100644 imagebuilder/openstack/common/__init__.py delete mode 100644 imagebuilder/openstack/common/gettextutils.py delete mode 100644 imagebuilder/openstack/common/importutils.py delete mode 100644 imagebuilder/openstack/common/jsonutils.py delete mode 100644 imagebuilder/openstack/common/local.py delete mode 100644 imagebuilder/openstack/common/log.py delete mode 100644 imagebuilder/openstack/common/timeutils.py delete mode 100644 imagebuilder/ping.py delete mode 100644 imagebuilder/public/css/style.css delete mode 100644 imagebuilder/public/images/logo.png delete mode 100644 imagebuilder/service.py delete mode 100644 install_scripts/fedora-17-jeos.ks delete mode 100644 install_scripts/fedora-18-jeos-DVD.ks delete mode 100644 install_scripts/fedora-18-jeos.ks delete mode 100644 install_scripts/rhel-5-jeos.ks delete mode 100644 install_scripts/rhel-6-jeos.ks delete mode 100644 install_scripts/ubuntu-10.04-jeos.preseed delete mode 100644 install_scripts/ubuntu-12.04-jeos.preseed delete mode 100644 install_scripts/ubuntu-12.10-jeos.preseed create mode 100755 nova-install create mode 100644 novaimagebuilder/BaseOS.py create mode 100644 novaimagebuilder/Builder.py create mode 100644 novaimagebuilder/CacheManager.py create mode 100644 novaimagebuilder/ISOHelper.py create mode 100644 novaimagebuilder/NovaInstance.py create mode 100644 novaimagebuilder/OSInfo.py create mode 100644 novaimagebuilder/RedHatOS.py rename imagebuilder/api/controllers/__init__.py => novaimagebuilder/Singleton.py (52%) create mode 100644 novaimagebuilder/StackEnvironment.py create mode 100644 novaimagebuilder/SyslinuxHelper.py create mode 100644 novaimagebuilder/WindowsOS.py create mode 100644 novaimagebuilder/__init__.py delete mode 100644 openstack-common.conf create mode 100644 tests/MockCacheManager.py create mode 100644 tests/MockNovaInstance.py create mode 100644 tests/MockOS.py create mode 100644 tests/MockStackEnvironment.py rename {imagebuilder/openstack => tests}/__init__.py (100%) create mode 100644 tests/test_OSInfo.py rename imagebuilder/api/controllers/osib/__init__.py => tests/test_cacheManager.py (65%) create mode 100755 tests/testcache.py diff --git a/README.md b/README.md deleted file mode 100644 index a7aa9ed..0000000 --- a/README.md +++ /dev/null @@ -1,155 +0,0 @@ -Building OS images in NOVA -========================== - -This is an early demonstration of a new image building approach for OpenStack. - -It is a command line tool that builds working OpenStack images by -running Anaconda or other native installers within Nova. In its simplest form -it requires only a kickstart or preseed file as input. All of the heavy lifting -is done inside of OpenStack instances. - -Early discussion of this approach can be found here: - -https://wiki.openstack.org/wiki/NovaImageBuilding - -It has been developed and tested on RHEL6 and the Folsom OpenStack release installed -using packstack. However, it should work with newer host OSes and newer OpenStack releases. - -To try it out install the requirements listed below then run commands like this: - -(substituting the details of your own OpenStack environment where indicated) - - -#### Create a Fedora 18 JEOS image in glance using a network install - - ./create_image.py --username admin --tenant admin --password password --auth-url http://10.10.10.10:5000/v2.0 \ - --glance-url http://10.10.10.10:9292/ --root-password myrootpw install_scripts/fedora-18-jeos.ks - -#### Create an Ubuntu 12.04 image in glance using a network install - - ./create_image.py --username admin --tenant admin --password password --auth-url http://10.10.10.10:5000/v2.0 \ - --glance-url http://10.10.10.10:9292/ --root-password myrootpw \ - install_scripts/ubuntu-12.04-jeos.preseed - -#### Create a Fedora 18 JEOS image as a volume snapshot using a network install - - ./create_image.py --username admin --tenant admin --password password --auth-url http://10.10.10.10:5000/v2.0 \ - --glance-url http://10.10.10.10:9292/ --root-password myrootpw --create-volume \ - install_scripts/fedora-18-jeos.ks - -#### Create a Fedora 18 JEOS image as a volume snapshot using an install DVD pulled from a Fedora mirror - - ./create_image.py --username admin --tenant admin --password password --auth-url http://10.10.10.10:5000/v2.0 \ - --create-volume --install-media-url \ - http://mirror.pnl.gov/fedora/linux/releases/18/Fedora/x86_64/iso/Fedora-18-x86_64-DVD.iso \ - --install-tree-url \ - http://mirror.pnl.gov/fedora/linux/releases/18/Fedora/x86_64/os/ \ - --glance-url http://10.10.10.10:9292/ --root-password myrootpw install_scripts/fedora-18-jeos-DVD.ks - -#### Create a Fedora 18 JEOS image as a volume snapshot by re-using the DVD volume snapshot created above - - ./create_image.py --username admin --tenant admin --password password --auth-url http://10.10.10.10:5000/v2.0 \ - --create-volume --install-media-snapshot \ - --install-tree-url \ - http://mirror.pnl.gov/fedora/linux/releases/18/Fedora/x86_64/os/ \ - --glance-url http://10.10.10.10:9292/ --root-password myrootpw install_scripts/fedora-18-jeos-DVD.ks - - -### What does this do? - -The script generates a small syslinux-based bootable image that is used -to start unattended Anaconda or Ubuntu installations. It contains only -the initrd and vmlinuz from the install source and a syslinux.cfg file. -The installer then writes over this minimal image. - -The kickstart/preseed files are passed to the installers via OpenStack -user-data and the appropriate kernel command line parameters in the -syslinux configuration file. - -The script uploads this bootstrapping image to glance, launches it, and -waits for it to shut down. If shutdown occurs within the timeout period -we assume that the installer has finished and take a snapshot of the current -instance state, which is the completed install. - -You can monitor progress via Anaconda's VNC support, which is enabled -in the example kickstarts under the "install_scripts" directory. The -script reports the instance IP and gives the exact invocation of -vncviewer that is needed to connect to the install. - -You can do something similar with an Ubuntu install using an SSH console. -However, this feature stops the installation and waits for user input so -it is commented out in the example preseed files. See instructions in -the comments for how to enable this. - - -### What operating systems can it support? - -The install_scripts contains known-working kickstart and preseed files for: - -Fedora 18, Fedora 17, RHEL 6.4, RHEL 5.9 - -Ubuntu 12.10, 12.04 and 10.04 - -This approach should work as far back as Fedora 10 and RHEL 4 U8 and on -other Linux variants including SLES. - - -### Volume Based Images - -By default the script will build a Glance backed image. If passed the ---create-volume option it will instead build a volume backed "snapshot" -image. - - -### ISO Install Media - -It also contains initial support for presenting installer ISO images as -a source for installation packages. This support has only been tested for -Fedora 18 for the moment. It is somewhat limited because OpenStack currently -only allows these images to be mapped into the instance as "normal" -block devices, rather than CDROMs. Not all installers can deal with this. - -(Note: When using the install media volume feature you must still pass -a "--install-tree-url" option as demonstrated in the examples above. This -is necessary to allow the script to retrieve the install kernel and ramdisk -without having to pull down a copy of the entire ISO.) - -### Requirements - -This script has been tested with the following OpenStack client packages: - -* python-glanceclient-0.5.1-1.el6.noarch -* python-novaclient-2.10.0-2.el6.noarch -* python-keystoneclient-0.1.3.27-1.el6.noarch -* python-cinderclient-0.2.26-1.el6.noarch - -Newer and older versions may work. - -It also requires: - -* python-libguestfs -* syslinux -* qemu-img - -If you want to view ongoing installs over VNC you will need: - -* tigervnc - - -### TODO - -Better documentation - -Better error detection and reporting - -Support for more operating systems. - -Support for sourcing install scripts through libosinfo - -Support for enhanced block device mapping when it becomes available - -Support for direct booting of kernel/ramdisk/cmdline combinations when/if it is added to Nova - -Improved detection of install success or failure - -Support for caching of self-install images diff --git a/etc/imagebuilder/imagebuilder.conf b/etc/imagebuilder/imagebuilder.conf deleted file mode 100644 index 9528529..0000000 --- a/etc/imagebuilder/imagebuilder.conf +++ /dev/null @@ -1,5 +0,0 @@ -[DEFAULT] - -host=0.0.0.0 -port=1235 -persistence_backend=SQLAlquemy \ No newline at end of file diff --git a/imagebuilder-api b/imagebuilder-api deleted file mode 100755 index 6dd9833..0000000 --- a/imagebuilder-api +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env python -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# -# Author: Doug Hellmann -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from imagebuilder.api import app - - -if __name__ == '__main__': - app.start() diff --git a/imagebuilder/MongoPersistentBuildManager.py b/imagebuilder/MongoPersistentBuildManager.py deleted file mode 100644 index a08443f..0000000 --- a/imagebuilder/MongoPersistentBuildManager.py +++ /dev/null @@ -1,124 +0,0 @@ -# encoding: utf-8 - -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import pymongo - - -DB_NAME = "imagebuilder_db" -COLLECTION_NAME = "imagebuilder_collection" - - -class MongoPersistentBuildManager(object): - """ TODO: Docstring for PersistentBuildManager """ - - def __init__(self): - self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) - self.con = pymongo.Connection() - self.db = self.con[DB_NAME] - self.collection = self.db[COLLECTION_NAME] - - def all_builds(self): - try: - builds = self.builds_from_query(None) - except Exception as e: - self.log.exception('Failure listing builds: %s' % e) - - return builds - - def build_with_id(self, build_id): - """ - TODO: Docstring for build_with_id - - @param build_id TODO - - @return TODO - """ - try: - # build = self._builds_from_query({"_id": ObjectId(build_id)}) - build = self.builds_from_query({"identifier": build_id})[0] - except Exception as e: - self.log.debug('Exception caught: %s' % e) - return None - - return build - - def add_build(self, build): - """ - Add a PersistentBuild-type object to this PersistentBuildManager - This should only be called with an build that has not yet been added to the store. - To retrieve a previously persisted build use build_with_id() or build_query() - - @param build TODO - - @return TODO - """ - if 'identifier' in build: - metadata = self.collection.find_one({"_id": build['identifier']}) - if metadata: - raise Exception("Image %s already managed, use build_with_id() and save_build()" % - (build['identifier'])) - return self._save_build(build) - - def save_build(self, build): - """ - TODO: Docstring for save_build - - @param build TODO - - @return TODO - """ - build_id = str(build['identifier']) - metadata = self._builds_from_mongo_cursor(self.collection.find_one({"_id": build_id})) - if not metadata: - raise Exception('Image %s not managed, use "add_build()" first.' % build_id) - self._save_build(build) - - def _save_build(self, build): - try: - self.collection.insert(build) - self.log.debug("Saved metadata for build (%s)" % (build['identifier'])) - return build['identifier'] - except Exception as e: - self.log.debug('Exception caught: %s' % e) - raise Exception('Unable to save build metadata: %s' % e) - - def delete_build_with_id(self, build_id): - """ - TODO: Docstring for delete_build_with_id - - @param build_id TODO - - @return TODO - """ - try: - self.collection.remove(build_id) - except Exception as e: - self.log.warn('Unable to remove record: %s' % e) - - def builds_from_query(self, query): - mongo_cursor = self.collection.find(query) - builds = self._builds_from_mongo_cursor(mongo_cursor) - return builds - - def _builds_from_mongo_cursor(self, mongo_cursor): - builds = [] - for build in mongo_cursor: - build_dict = {} - for k, v in build.items(): - build_dict[k.__str__()] = v.__str__() - builds.append(build_dict) - return builds diff --git a/imagebuilder/SQLAlchemyPersistentBuildManager.py b/imagebuilder/SQLAlchemyPersistentBuildManager.py deleted file mode 100644 index 197c05a..0000000 --- a/imagebuilder/SQLAlchemyPersistentBuildManager.py +++ /dev/null @@ -1,119 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import Column, Integer, String -from sqlalchemy.orm import sessionmaker -from sqlalchemy import create_engine -import logging - - -Base = declarative_base() -engine = create_engine('sqlite:///imagebuilder.db', echo=True) -Session = sessionmaker(bind=engine) - -class Build(Base): - __tablename__ = 'imagebuilder_builds' - - id = Column(String, primary_key=True) - status = Column(String) - name = Column(String) - glance_id = Column(String) - cinder_id = Column(String) - nova_id = Column(String) - - def __init__(self, id, name): - self.id = id - self.name = name - - def __repr__(self): - return "" % (self.name, self.id) - -class SQLAlchemyPersistentBuildManager(object): - """ TODO: Docstring for PersistentBuildManager """ - - def __init__(self): - self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) - self.session = Session() - - def build_with_id(self, build_id): - """ - TODO: Docstring for build_with_id - - @param build_id TODO - - @return TODO - """ - build = self.session.query(Build).filter_by(id=build_id) - - return self._builds_from_iterative(build) - - def add_build(self, build): - """ - Add a PersistentBuild-type object to this PersistenBuildManager - This should only be called with an build that has not yet been added to the store. - To retrieve a previously persisted build use build_with_id() or build_query() - - @param build TODO - - @return TODO - """ - - return self._save_build(build) - - def save_build(self, build): - """ - TODO: Docstring for save_build - - @param build TODO - - @return TODO - """ - self._save_build(build) - - def _save_build(self, build): - try: - b = Build(build['id'], build['name']) - b.status = build['state'] - self.session.add(b) - self.session.commit() - self.log.debug("Saved metadata for build (%s)" % (b)) - return b.id - except Exception as e: - self.log.debug('Exception caught: %s' % e) - raise Exception('Unable to save build metadata: %s' % e) - - def all_builds(self): - builds = self.session.query(Build).all() - return self._builds_from_iterative(builds) - - def builds_from_query(self, query): - if not query: - return self.all_builds() - - def _builds_from_iterative(self, iterative): - builds = [] - for build in iterative: - build_dict = {} - build_dict['id'] = build.id - build_dict['name'] = build.name - build_dict['status'] = build.status - build_dict['glance_id'] = build.glance_id - build_dict['cinder_id'] = build.cinder_id - build_dict['nova_id'] = build.cinder_id - builds.append(build_dict) - return builds - -Base.metadata.create_all(engine) diff --git a/imagebuilder/__init__.py b/imagebuilder/__init__.py deleted file mode 100644 index 01e3433..0000000 --- a/imagebuilder/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/imagebuilder/api/README b/imagebuilder/api/README deleted file mode 100644 index d58de84..0000000 --- a/imagebuilder/api/README +++ /dev/null @@ -1,28 +0,0 @@ -WARNING: -------- -THIS IS STILL VERY BUGGY! -1) The response on the POST doesn't work with every client. -2) The responses are all str instead of real objects. - - -REQUIREMENTS: ------------- -Requires pecan and wsme - -On Fedora, install the packages python-pecan and python-wsme - - -RUNNING: -------- -For now, run the following: - - image-building-poc% pecan serve openstack-imagebuilder/api/config.py - - -- Once the server starts, use HTTPie or curl to POST to /osib/v1/builds - ex - http --json POST localhost:8080/osib/v1/builds - -- Now you can GET that object using the '_id' - ex - http --json localhost:8080/osib/v1/builds/51a9293ff731080a5ac2a24b \ No newline at end of file diff --git a/imagebuilder/api/__init__.py b/imagebuilder/api/__init__.py deleted file mode 100644 index 01e3433..0000000 --- a/imagebuilder/api/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/imagebuilder/api/app.py b/imagebuilder/api/app.py deleted file mode 100644 index 68c38c9..0000000 --- a/imagebuilder/api/app.py +++ /dev/null @@ -1,75 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import sys -import os -import logging -import pecan -from imagebuilder import service -from imagebuilder.api import config as pecan_config -from imagebuilder.openstack.common import log -from oslo.config import cfg -from wsgiref import simple_server - - -def get_pecan_config(): - # Set up the pecan configuration - filename = pecan_config.__file__.replace('.pyc', '.py') - return pecan.configuration.conf_from_file(filename) - - -def setup_app(config): - if not config: - config = get_pecan_config() - pecan.configuration.set_config(dict(config), overwrite=True) - return pecan.make_app( - config.app['root'], - static_root=config.app['static_root'], - template_path=config.app['template_path'], - debug=cfg.CONF.debug, - force_canonical=getattr(config.app, 'force_canonical', True), - ) - -def start(): - # Parse OpenStack config file and command line options, then - # configure logging. - service.prepare_service(sys.argv) - - # Build the WSGI app - host, port = cfg.CONF['host'], cfg.CONF['port'] - srvr_config = get_pecan_config() - srvr_config['server']['host'] = host - srvr_config['server']['port'] = port - root = setup_app(srvr_config) - # Create the WSGI server and start it - srvr = simple_server.make_server(host, port, root) - - LOG = log.getLogger(__name__) - LOG.info('Starting server in PID %s' % os.getpid()) - LOG.info("Configuration:") - cfg.CONF.log_opt_values(LOG, logging.INFO) - - if host == '0.0.0.0': - LOG.info('serving on 0.0.0.0:%s, view at http://127.0.0.1:%s' % - (port, port)) - else: - LOG.info("serving on http://%s:%s" % (host, port)) - - try: - srvr.serve_forever() - except KeyboardInterrupt: - # allow CTRL+C to shutdown without an error - LOG.info("Shutting down...") \ No newline at end of file diff --git a/imagebuilder/api/config.py b/imagebuilder/api/config.py deleted file mode 100644 index ebc023c..0000000 --- a/imagebuilder/api/config.py +++ /dev/null @@ -1,60 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Server Specific Configurations -server = { - 'port': 8080, - 'host': '0.0.0.0' -} - -# Pecan Application Configurations -app = { - 'root': 'imagebuilder.api.controllers.RootController', - 'modules': ['imagebuilder.api'], - 'static_root': '%(confdir)s/public', - 'template_path': '%(confdir)s/api/templates', - 'debug': True, - 'errors': { - 404: '/error/404', - '__force_dict__': True - } -} - -logging = { - 'loggers': { - 'root': {'level': 'INFO', 'handlers': ['console']}, - 'osib': {'level': 'DEBUG', 'handlers': ['console']} - }, - 'handlers': { - 'console': { - 'level': 'DEBUG', - 'class': 'logging.StreamHandler', - 'formatter': 'simple' - } - }, - 'formatters': { - 'simple': { - 'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]' - '[%(threadName)s] %(message)s') - } - } -} - -# Custom Configurations must be in Python dictionary format:: -# -# foo = {'bar':'baz'} -# -# All configurations are accessible at:: -# pecan.conf diff --git a/imagebuilder/api/controllers/osib/v1/Builds.py b/imagebuilder/api/controllers/osib/v1/Builds.py deleted file mode 100644 index 24f6ffe..0000000 --- a/imagebuilder/api/controllers/osib/v1/Builds.py +++ /dev/null @@ -1,72 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from pecan.rest import RestController -from wsmeext.pecan import wsexpose as expose -from wsme import types as wtypes -from imagebuilder.MongoPersistentBuildManager import MongoPersistentBuildManager -from uuid import uuid4 as uuid - - -class Build(object): - identifier = wtypes.text - status = wtypes.text - name = wtypes.text - glance_id = wtypes.text - cinder_id = wtypes.text - nova_id = wtypes.text - - def __init__(self, props={}): - for k in props.keys(): - setattr(self, k, props[k]) - -class BuildController(RestController): - def __init__(self): - self.pim = MongoPersistentBuildManager() - - # RESOURCE PATH: [GET] /osib/v1/builds - @expose([Build]) - def get_all(self): - builds = [] - for item in self.pim.all_builds(): - builds.append(Build(item)) - return builds - - # RESOURCE PATH: [GET] /osib/v1/builds/:uuid - @expose(Build, wtypes.text) - def get_one(self, build_id): - data = self.pim.build_with_id(build_id) - return Build(data) - - # RESOURCE PATH: [POST] /osib/v1/builds - @expose(Build) - def post(self): - build = {'identifier': str(uuid())} - self.pim.add_build(build) - return Build(build) - - # RESOURCE PATH: [PUT] /osib/v1/builds/:uuid - @expose(Build, wtypes.text, wtypes.text) - def put(self, build_id, build_updates): - build = self.pim.build_with_id(build_id) - build.update(build_updates) - self.pim.save_build(build) - return Build(build) - - # RESOURCE PATH: [DELETE] /osib/v1/builds/:uuid - @expose(wtypes.text) - def delete(self, build_id): - self.pim.delete_build_with_id(build_id) diff --git a/imagebuilder/api/controllers/osib/v1/__init__.py b/imagebuilder/api/controllers/osib/v1/__init__.py deleted file mode 100644 index 474bb9c..0000000 --- a/imagebuilder/api/controllers/osib/v1/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from pecan.rest import RestController -from Builds import BuildController - -class V1Controller(RestController): - builds = BuildController() \ No newline at end of file diff --git a/imagebuilder/api/templates/error.html b/imagebuilder/api/templates/error.html deleted file mode 100644 index f2d9796..0000000 --- a/imagebuilder/api/templates/error.html +++ /dev/null @@ -1,12 +0,0 @@ -<%inherit file="layout.html" /> - -## provide definitions for blocks we want to redefine -<%def name="title()"> - Server Error ${status} - - -## now define the body of the template -
-

Server Error ${status}

-
-

${message}

diff --git a/imagebuilder/api/templates/index.html b/imagebuilder/api/templates/index.html deleted file mode 100644 index ce01e11..0000000 --- a/imagebuilder/api/templates/index.html +++ /dev/null @@ -1,34 +0,0 @@ -<%inherit file="layout.html" /> - -## provide definitions for blocks we want to redefine -<%def name="title()"> - Welcome to Pecan! - - -## now define the body of the template -
-

-
- -
- -

This is a sample Pecan project.

- -

- Instructions for getting started can be found online at pecanpy.org -

- -

- ...or you can search the documentation here: -

- -
-
- - -
- Enter search terms or a module, class or function name. - - -
diff --git a/imagebuilder/api/templates/layout.html b/imagebuilder/api/templates/layout.html deleted file mode 100644 index 4090859..0000000 --- a/imagebuilder/api/templates/layout.html +++ /dev/null @@ -1,22 +0,0 @@ - - - ${self.title()} - ${self.style()} - ${self.javascript()} - - - ${self.body()} - - - -<%def name="title()"> - Default Title - - -<%def name="style()"> - - - -<%def name="javascript()"> - - diff --git a/imagebuilder/api/tests/__init__.py b/imagebuilder/api/tests/__init__.py deleted file mode 100644 index 78ea527..0000000 --- a/imagebuilder/api/tests/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -from unittest import TestCase -from pecan import set_config -from pecan.testing import load_test_app - -__all__ = ['FunctionalTest'] - - -class FunctionalTest(TestCase): - """ - Used for functional tests where you need to test your - literal application and its integration with the framework. - """ - - def setUp(self): - self.app = load_test_app(os.path.join( - os.path.dirname(__file__), - 'config.py' - )) - - def tearDown(self): - set_config({}, overwrite=True) diff --git a/imagebuilder/api/tests/config.py b/imagebuilder/api/tests/config.py deleted file mode 100644 index 0ca57e3..0000000 --- a/imagebuilder/api/tests/config.py +++ /dev/null @@ -1,25 +0,0 @@ -# Server Specific Configurations -server = { - 'port': '8080', - 'host': '0.0.0.0' -} - -# Pecan Application Configurations -app = { - 'root': 'imagebuilder.api.controllers.RootController', - 'modules': ['imagebuilder.api'], - 'static_root': '%(confdir)s/../../public', - 'template_path': '%(confdir)s/../templates', - 'debug': True, - 'errors': { - '404': '/error/404', - '__force_dict__': True - } -} - -# Custom Configurations must be in Python dictionary format:: -# -# foo = {'bar':'baz'} -# -# All configurations are accessible at:: -# pecan.conf diff --git a/imagebuilder/api/tests/test_functional.py b/imagebuilder/api/tests/test_functional.py deleted file mode 100644 index 3f90a36..0000000 --- a/imagebuilder/api/tests/test_functional.py +++ /dev/null @@ -1,22 +0,0 @@ -from unittest import TestCase -from webtest import TestApp -from pdiddy.tests import FunctionalTest - - -class TestRootController(FunctionalTest): - - def test_get(self): - response = self.app.get('/') - assert response.status_int == 200 - - def test_search(self): - response = self.app.post('/', params={'q': 'RestController'}) - assert response.status_int == 302 - assert response.headers['Location'] == ( - 'http://pecan.readthedocs.org/en/latest/search.html' - '?q=RestController' - ) - - def test_get_not_found(self): - response = self.app.get('/a/bogus/url', expect_errors=True) - assert response.status_int == 404 diff --git a/imagebuilder/api/tests/test_units.py b/imagebuilder/api/tests/test_units.py deleted file mode 100644 index 573fb68..0000000 --- a/imagebuilder/api/tests/test_units.py +++ /dev/null @@ -1,7 +0,0 @@ -from unittest import TestCase - - -class TestUnits(TestCase): - - def test_units(self): - assert 5 * 5 == 25 diff --git a/imagebuilder/create_image.py b/imagebuilder/create_image.py deleted file mode 100755 index a2b2f74..0000000 --- a/imagebuilder/create_image.py +++ /dev/null @@ -1,274 +0,0 @@ -#!/usr/bin/python -# -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys -import shutil -import argparse -from tempfile import mkdtemp -from time import strftime, gmtime -from image_utils import * - - -def get_cli_arguments(): - parser = argparse.ArgumentParser(description='Launch and snapshot a kickstart install using syslinux and Glance') - ospar = parser.add_argument_group('OpenStack Enviornment') - ospar.add_argument('--auth-url', dest='auth_url', required=True, help='URL for keystone authorization') - ospar.add_argument('--username', dest='username', required=True, help='username for keystone authorization') - ospar.add_argument('--tenant', dest='tenant', required=True, help='tenant for keystone authorization') - ospar.add_argument('--password', dest='password', required=True, help='password for keystone authorization') - ospar.add_argument('--glance-url', dest='glance_url', required=True, help='URL for glance service') - install_media_desc="""When one of these arguments is given the install environment will contain a second - block device. The image presented on this device can come from a URL, a file or - a pre-existing volume snapshot. You may only use one of these options at a time - and you can only use them in conjunction with the 'create-volume' option.""" - install_media = parser.add_argument_group('Install Media', install_media_desc) - install_media.add_argument('--install-media-url', dest='install_media_url', - help='Add an install media device using content at this URL') - install_media.add_argument('--install-media-file', dest='install_media_file', - help='Add an install media device using this file as a media image') - install_media.add_argument('--install-media-snapshot', dest='install_media_snapshot', - help='Add an install media device by creating a volume from this snapshot id') - instpar = parser.add_argument_group('Installation Parameters') - instpar.add_argument('--root-password', dest='admin_password', required=True, - help='root password for the resulting image - also used for optional remote access during install') - instpar.add_argument('--create-volume', dest='create_volume', action='store_true', default=False, - help='Create a volume snapshot instead of the default Glance snapshot (optional)') - instpar.add_argument('--install-volume-size', dest='install_volume_size', default=10, - help='Size of the install destination volume in GB (default: 10)') - instpar.add_argument('--install-tree-url', dest='install_tree_url', - help='URL for preferred network install tree (optional)') - instpar.add_argument('--distro', dest='distro', help='distro - must be "rpm" or "ubuntu" (optional)') - instpar.add_argument('--image-name', dest='image_name', help='name to assign newly created image (optional)') - instpar.add_argument('--leave-mess', dest='leave_mess', action='store_true', default=False, - help='Do not clean up local or remote artifacts when finished or when an error is encountered') - parser.add_argument('ks_file', help='kickstart/install-script file to use for install') - return parser.parse_args() - -def create_image(args): - # This is a string - working_kickstart = do_pw_sub(args.ks_file, args.admin_password) - - distro = detect_distro(working_kickstart) - if args.distro: - # Allow the command line distro to override our guess above - distro = args.distro - - (install_tree_url, console_password, console_command, poweroff) = install_extract_bits(working_kickstart, distro) - if args.install_tree_url: - # Allow the specified tree to override anything extracted above - install_tree_url = args.install_tree_url - - if args.image_name: - image_name = args.image_name - else: - image_name = "Image from ks file: %s - Date: %s" % (os.path.basename(args.ks_file), strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())) - - # Let's be nice and report as many error conditions as we can before exiting - error = False - - if (args.install_media_url or args.install_media_file or args.install_media_snapshot) and not args.create_volume: - print "ERROR: You can only use install media when creating a volume snapshot image using the --create-volume option." - error = True - - if (args.install_media_url and args.install_media_file) or (args.install_media_file and args.install_media_snapshot) or \ - (args.install_media_url and args.install_media_snapshot): - print "ERROR: You may only specify a single install media source" - error = True - - if not install_tree_url: - print "ERROR: no install tree URL specified and could not extract one from the kickstart/install-script" - error = True - - if not distro: - print "ERROR: no distro specified and could not guess based on the kickstart/install-script" - error = True - - if not poweroff: - if distro == "rpm": - print "ERROR: supplied kickstart file must contain a 'poweroff' line" - elif distro == "ubuntu": - print "ERROR: supplied preseed must contain a 'd-i debian-installer/exit/poweroff boolean true' line" - error = True - - if error: - sys.exit(1) - - # We start creating artifacts here - cleanup in finally - modified_image = None # filename - tmp_content_dir = None # directory - install_image = None # Nova image object - install_media_volume=None # cinder volume object - install_media_snapshot_id=None # UUID string - installed_instance = None # Nova instance object - finished = False # silly marker - retcode = 0 - - try: - # Artifact of borrowing factory code - pass this as a dict - creds = { 'username': args.username, 'tenant': args.tenant, 'password': args.password, 'auth_url': args.auth_url } - - # Generate "blank" syslinux bootable mini-image - # This is the only step that strictly requires root access due to the need - # for a loopback mount to install the bootloader - generate_blank_syslinux() - - # Take a copy of it - if args.create_volume: - disk_format = 'raw' - modified_image = "./syslinux_modified_%s.raw" % os.getpid() - try: - subprocess_check_output(["qemu-img","convert","-O","raw","./syslinux.qcow2",modified_image]) - except: - print "Exception while converting image to raw" - raise - else: - disk_format = 'qcow2' - modified_image = "./syslinux_modified_%s.qcow2" % os.getpid() - shutil.copy("./syslinux.qcow2",modified_image) - - # Generate the content to put into the image - tmp_content_dir = mkdtemp() - print "Collecting boot content for auto-install image" - generate_boot_content(install_tree_url, tmp_content_dir, distro, args.create_volume) - - # Copy in the kernel, initrd and conf files into the blank boot stub using libguestfs - print "Copying boot content into a bootable syslinux image" - copy_content_to_image(tmp_content_dir, modified_image) - - # Upload the resulting image to glance - print "Uploading image to glance" - install_image = glance_upload(image_filename = modified_image, image_url = None, creds = creds, glance_url = args.glance_url, - name = "INSTALL for: %s" % (image_name), disk_format=disk_format) - - print "Uploaded successfully as glance image (%s)" % (install_image.id) - - install_volume=None - # TODO: Make volume size configurable - if args.create_volume: - print "Converting Glance install image to a Cinder volume" - install_volume = volume_from_image(install_image.id, creds, args.glance_url, volume_size = args.install_volume_size) - - - if args.install_media_url or args.install_media_file: - if args.install_media_url: - print "Generating Glance image from URL: %s" % (args.install_media_url) - install_media_image = glance_upload(image_filename = None, image_url = args.install_media_url, - creds = creds, glance_url = args.glance_url, name = "FromURL: %s" % (args.install_media_url), - disk_format='raw') - else: - print "Generating Glance image from file: %s" % (args.install_media_file) - install_media_image = glance_upload(image_filename = args.install_media_file, image_url = None, - creds = creds, glance_url = args.glance_url, name = os.path.basename(args.install_media_file), - disk_format='raw') - - print "Generating volume from image (%s)" % (install_media_image.id) - install_media_volume = volume_from_image(install_media_image.id, creds, args.glance_url) - print "Generating snapshot of volume (%s) to allow install media reuse" % (install_media_volume.id) - install_media_snapshot = snapshot_from_volume(install_media_volume.id, creds) - install_media_snapshot_id = install_media_snapshot.id - print "#### Future installs can reference this snapshot with the following argument:" - print " --install-media-snapshot %s" % install_media_snapshot_id - elif args.install_media_snapshot: - print "Generating working volume from snapshot (%s)" % (args.install_media_snapshot) - install_media_snapshot_id = args.install_media_snapshot - install_media_volume = volume_from_snapshot(args.install_media_snapshot, creds) - - # Launch the image with the provided ks.cfg as the user data - # Optionally - spawn a vncviewer to watch the install graphically - # Poll on image status until it is SHUTDOWN or timeout - print "Launching install image" - installed_instance = launch_and_wait(install_image, install_volume, install_media_volume, working_kickstart, - os.path.basename(args.ks_file), creds, console_password, console_command) - - # Take a snapshot of the now safely shutdown image - # For volume snapshots we must terminate the instance first then snapshot - # For glance/image snapshots we must _not_ terminate the instance until the snapshot is complete - print "Taking snapshot of completed install" - if args.create_volume: - print "Terminating instance (%s) in preparation for taking a snapshot of the root volume" % (installed_instance.id) - terminate_instance(installed_instance.id, creds) - installed_instance = None - finished_image_snapshot = snapshot_from_volume(install_volume.id, creds) - print "Volume-based image available from snapshot ID: %s" % (finished_image_snapshot.id) - print "Finished snapshot name is: %s" % (finished_image_snapshot.display_name) - finished = True - else: - finished_image_id = installed_instance.create_image(image_name) - print "Waiting for glance image snapshot to complete" - wait_for_glance_snapshot(finished_image_id, creds, args.glance_url) - print "Terminating instance (%s) now that snapshot is complete" % (installed_instance.id) - terminate_instance(installed_instance.id, creds) - installed_instance = None - print "Finished image snapshot ID is: %s" % (finished_image_id) - print "Finished image name is: %s" % (image_name) - finished = True - - except Exception as e: - print "Uncaught exception encountered during install" - print str(e) - retcode = 1 - - finally: - if args.leave_mess: - print "Leaving a mess - this includes local files, local dirs, remote images, remote volumes and remote snapshots" - sys.exit(retcode) - - print "Cleaning up" - - try: - if tmp_content_dir: - print "Removing boot content dir" - shutil.rmtree(tmp_content_dir) - - if modified_image: - print "Removing install image %s" % (modified_image) - #TODO:Note that thie is actually cacheable on a per-os-version basis - os.remove(modified_image) - - if installed_instance: - # Note that under normal operation this is terminated when completing the snapshot process - print "Terminating install instance (%s)" % (installed_instance.id) - terminate_instance(installed_instance.id, creds) - - if install_image: - print "Deleting Glance image (%s) used to launch install" % (install_image.id) - install_image.delete() - - if install_media_volume: - print "Removing working volume containing install media" - print "Snapshot (%s) remains available for future use" % (install_media_snapshot_id) - install_media_volume.delete() - except: - print "WARNING: Exception while attempting to clean up - we may have left a mess" - retcode = 1 - - # For usability - reprint the most important bits from above as the last output - if finished: - print "FINISHED!" - print - print "Image Details:" - if args.create_volume: - print "Volume snapshot name: %s" % (finished_image_snapshot.display_name) - print "ID: %s" % (finished_image_snapshot.id) - else: - print "Glance image name: %s" % (image_name) - print "ID: %s" % (finished_image_id) - - sys.exit(retcode) - -if __name__ == '__main__': - create_image(get_cli_arguments()) \ No newline at end of file diff --git a/imagebuilder/image_utils.py b/imagebuilder/image_utils.py deleted file mode 100755 index 188aef5..0000000 --- a/imagebuilder/image_utils.py +++ /dev/null @@ -1,591 +0,0 @@ -#!/usr/bin/python -# -# Copyright 2013 Red Hat, Inc. -# Portions Copyright (C) 2010,2011,2012 Chris Lalancette -# Portions Copyright (C) 2012,2013 Chris Lalancette -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys -import subprocess -import re -from string import Template -from tempfile import NamedTemporaryFile, TemporaryFile -from time import sleep - -import guestfs -import pycurl -from glanceclient import client as glance_client -from cinderclient import client as cinder_client -from keystoneclient.v2_0 import client as keystone_client -from novaclient.v1_1 import client as nova_client - - - -### Utility functions borrowed from Oz and lightly modified -def executable_exists(program): - """ - Function to find out whether an executable exists in the PATH - of the user. If so, the absolute path to the executable is returned. - If not, an exception is raised. - """ - def is_exe(fpath): - """ - Helper method to check if a file exists and is executable - """ - return os.path.exists(fpath) and os.access(fpath, os.X_OK) - - if program is None: - raise Exception("Invalid program name passed") - - fpath, fname = os.path.split(program) - if fpath: - if is_exe(program): - return program - else: - for path in os.environ["PATH"].split(os.pathsep): - exe_file = os.path.join(path, program) - if is_exe(exe_file): - return exe_file - - raise Exception("Could not find %s" % (program)) - - -def subprocess_check_output(*popenargs, **kwargs): - """ - Function to call a subprocess and gather the output. - Addresses a lack of check_output() prior to Python 2.7 - """ - if 'stdout' in kwargs: - raise ValueError('stdout argument not allowed, it will be overridden.') - if 'stderr' in kwargs: - raise ValueError('stderr argument not allowed, it will be overridden.') - - executable_exists(popenargs[0][0]) - - # NOTE: it is very, very important that we use temporary files for - # collecting stdout and stderr here. There is a nasty bug in python - # subprocess; if your process produces more than 64k of data on an fd that - # is using subprocess.PIPE, the whole thing will hang. To avoid this, we - # use temporary fds to capture the data - stdouttmp = TemporaryFile() - stderrtmp = TemporaryFile() - - process = subprocess.Popen(stdout=stdouttmp, stderr=stderrtmp, *popenargs, - **kwargs) - process.communicate() - retcode = process.poll() - - stdouttmp.seek(0, 0) - stdout = stdouttmp.read() - stdouttmp.close() - - stderrtmp.seek(0, 0) - stderr = stderrtmp.read() - stderrtmp.close() - - if retcode: - cmd = ' '.join(*popenargs) - raise Exception("'%s' failed(%d): %s" % (cmd, retcode, stderr), retcode) - return (stdout, stderr, retcode) - - -def http_download_file(url, filename): - """ - Function to download a file from url to filename - """ - - def _data(buf): - """ - Function that is called back from the pycurl perform() method to - actually write data to disk. - """ - os.write(fd, buf) - - fd = os.open(filename,os.O_CREAT | os.O_WRONLY | os.O_TRUNC) - - try: - c = pycurl.Curl() - c.setopt(c.URL, url) - c.setopt(c.CONNECTTIMEOUT, 15) - c.setopt(c.WRITEFUNCTION, _data) - c.setopt(c.FOLLOWLOCATION, 1) - c.perform() - c.close() - finally: - os.close(fd) -### End of borrowed Oz functions - - -### Borrowed from Image Factory OpenStack plugin -def glance_upload(image_filename = None, image_url = None, creds = {'auth_url': None, 'password': None, 'strategy': 'noauth', 'tenant': None, 'username': None}, - glance_url = None, token = None, name = 'Factory Test Image', disk_format = 'raw'): - - k = keystone_client.Client(username=creds['username'], password=creds['password'], tenant_name=creds['tenant'], auth_url=creds['auth_url']) - - if (k.authenticate()): - #Connect to glance to upload the image - glance = glance_client.Client("1", endpoint=glance_url, token=k.auth_token) - image_meta = {'container_format': 'bare', - 'disk_format': disk_format, - 'is_public': True, - 'min_disk': 0, - 'min_ram': 0, - 'name': name, - 'properties': {'distro': 'rhel'}} - try: - image = glance.images.create(name=name) - if image_filename: - image_data = open(image_filename, "r") - image_meta['data'] = image_data - print "Uploading to Glance" - image.update(**image_meta) - elif image_url: - image_meta['copy_from'] = image_url - image.update(**image_meta) - print "Waiting for Glance to finish creating image from URL: %s" % (image_url) - while (image.status != 'active'): - if image.status == 'killed': - raise Exception("Glance error while waiting for image to generate from URL") - print '.', - sys.stdout.flush() - sleep(10) - image=glance.images.get(image.id) - return image - except Exception, e: - raise - else: - raise Exception("Unable to authenticate into glance") - -def volume_from_image(image_id, creds, glance_url, volume_size = None): - k = keystone_client.Client(username=creds['username'], password=creds['password'], tenant_name=creds['tenant'], auth_url=creds['auth_url']) - if not k.authenticate(): - raise Exception("Could not authenticate into keystone") - - glance = glance_client.Client("1", endpoint=glance_url, token=k.auth_token) - cinder = cinder_client.Client('1', creds['username'], creds['password'], creds['tenant'], creds['auth_url']) - try: - image = glance.images.get(image_id) - except: - raise Exception("Could not find Glance image with id" % (image_id)) - - # Unclear if this is strictly needed - # If size is not explicitly set then set it based on the image size - # TODO: Check if we even have to set a size when pulling from an image - if not volume_size: - # Gigabytes rounded up - volume_size = int(image.size/(1024*1024*1024)+1) - - print "Starting asyncronous copying to Cinder" - volume = cinder.volumes.create(volume_size, display_name=image.name, imageRef=image.id) - while (volume.status != 'available'): - print "Waiting for volume to be ready ... current status (%s)" % (volume.status) - sleep(5) - volume = cinder.volumes.get(volume.id) - if (volume.status == 'error'): - raise Exception('Error converting image to volume') - return volume - -def snapshot_from_volume(volume_id, creds): - cinder = cinder_client.Client('1', creds['username'], creds['password'], creds['tenant'], creds['auth_url']) - volume = volume=cinder.volumes.get(volume_id) - snapshot = cinder.volume_snapshots.create(volume.id,False,volume.display_name,volume.display_description) - while (snapshot.status != 'available'): - print "Waiting for snapshot to be ready ... current status (%s)" % (snapshot.status) - sleep(5) - snapshot = cinder.volume_snapshots.get(snapshot.id) - if snapshot.status == 'error': - raise Exception('Error while taking volume snapshot') - return snapshot - -def volume_from_snapshot(snapshot_id, creds): - cinder = cinder_client.Client('1', creds['username'], creds['password'], creds['tenant'], creds['auth_url']) - snapshot = cinder.volume_snapshots.get(snapshot_id) - volume = cinder.volumes.create(size=None, snapshot_id=snapshot_id, display_name=snapshot.display_name, - display_description=snapshot.display_description) - while (volume.status != 'available'): - print "Waiting for volume to be ready ... current status (%s)" % (volume.status) - sleep(5) - volume = cinder.volumes.get(volume.id) - if volume.status == 'error': - raise Exception('Error while taking volume snapshot') - return volume - -def ks_extract_bits(ksfile): - # I briefly looked at pykickstart but it more or less requires you know the version of the - # format you wish to use - # The approach below actually works as far back as RHEL5 and as recently as F18 - - install_url = None - console_password = None - console_command = None - poweroff = False - distro = None - - for line in ksfile.splitlines(): - # Install URL lines look like this - # url --url=http://download.devel.redhat.com/released/RHEL-5-Server/U9/x86_64/os/ - m = re.match("url.*--url=(\S+)", line) - if m and len(m.groups()) == 1: - install_url = m.group(1) - continue - - # VNC console lines look like this - # Inisist on a password being set - # vnc --password=vncpasswd - m = re.match("vnc.*--password=(\S+)", line) - if m and len(m.groups()) == 1: - console_password = m.group(1) - console_command = "vncviewer %s:1" - continue - - # SSH console lines look like this - # Inisist on a password being set - # ssh --password=sshpasswd - m = re.match("ssh.*--password=(\S+)", line) - if m and len(m.groups()) == 1: - console_password = m.group(1) - console_command = "ssh root@%s" - continue - - # We require a poweroff after install to detect completion - look for the line - if re.match("poweroff", line): - poweroff=True - continue - - return (install_url, console_password, console_command, poweroff) - -def install_extract_bits(install_file, distro): - if distro == "rpm": - return ks_extract_bits(install_file) - elif distro == "ubuntu": - return preseed_extract_bits(install_file) - else: - return (None, None, None, None) - -def preseed_extract_bits(preseedfile): - - install_url = None - console_password = None - console_command = None - poweroff = False - - for line in preseedfile.splitlines(): - - # Network console lines look like this: - # d-i network-console/password password r00tme - m = re.match("d-i\s+network-console/password\s+password\s+(\S+)", line) - if m and len(m.groups()) == 1: - console_password = m.group(1) - console_command = "ssh installer@%s\nNote that you MUST connect to this session for the install to continue\nPlease do so now\n" - continue - - # Preseeds do not need to contain any explicit pointers to network install sources - # Users can specify the install-url on the cmd line or provide a hint in a - # comment line that looks like this: - # "#ubuntu_baseurl=http://us.archive.ubuntu.com/ubuntu/dists/precise/" - m = re.match("#ubuntu_baseurl=(\S+)", line) - if m and len(m.groups()) == 1: - install_url = m.group(1) - - # A preseed poweroff directive looks like this: - # d-i debian-installer/exit/poweroff boolean true - if re.match("d-i\s+debian-installer/exit/poweroff\s+boolean\s+true", line): - poweroff=True - continue - - return (install_url, console_password, console_command, poweroff) - - -def detect_distro(install_script): - - for line in install_script.splitlines(): - if re.match("d-i\s+debian-installer", line): - return "ubuntu" - elif re.match("%packages", line): - return "rpm" - - return None - - -def generate_blank_syslinux(): - # Generate syslinux.qcow2 in working directory if it isn't already there - if os.path.isfile("./syslinux.qcow2"): - print "Found a syslinux.qcow2 image in the working directory - using it" - return - - print "Generating an empty bootable syslinux image as ./syslinux.qcow2" - raw_fs_image = NamedTemporaryFile(delete=False) - raw_image_name = raw_fs_image.name - try: - output_image_name = "./syslinux.qcow2" - - # 200 MB sparse file - outsize = 1024 * 1024 * 200 - raw_fs_image.truncate(outsize) - raw_fs_image.close() - - # Partition, format and add DOS MBR - g = guestfs.GuestFS() - g.add_drive(raw_image_name) - g.launch() - g.part_disk("/dev/sda","msdos") - g.part_set_mbr_id("/dev/sda",1,0xb) - g.mkfs("vfat", "/dev/sda1") - g.part_set_bootable("/dev/sda", 1, 1) - dosmbr = open("/usr/share/syslinux/mbr.bin").read() - ws = g.pwrite_device("/dev/sda", dosmbr, 0) - if ws != len(dosmbr): - raise Exception("Failed to write entire MBR") - g.sync() - g.close() - - # Install syslinux - this is the ugly root-requiring part - gotloop = False - for n in range(4): - # If this has a nonzero return code we will take the exception - (stdout, stderr, retcode) = subprocess_check_output(["losetup","-f"]) - loopdev = stdout.rstrip() - # Race - Try it a few times and then give up - try: - subprocess_check_output(["losetup",loopdev,raw_image_name]) - except: - sleep(1) - continue - gotloop = True - break - - if not gotloop: - raise Exception("Failed to setup loopback") - - loopbase = os.path.basename(loopdev) - - try: - subprocess_check_output(["kpartx","-a",loopdev]) - # On RHEL6 there seems to be a short delay before the mappings actually show up - sleep(5) - subprocess_check_output(["syslinux", "/dev/mapper/%sp1" % (loopbase)]) - subprocess_check_output(["kpartx", "-d", loopdev]) - subprocess_check_output(["losetup", "-d", loopdev]) - except: - print "Exception while executing syslinux install commands." - raise - - try: - subprocess_check_output(["qemu-img","convert","-c","-O","qcow2",raw_image_name,output_image_name]) - except: - print "Exception while converting image to qcow2" - - finally: - pass - # Leave a mess for debugging for now - #os.remove(raw_image_name) - - -def generate_boot_content(url, dest_dir, distro, create_volume): - """ - Insert kernel, ramdisk and syslinux.cfg file in dest_dir - source from url - """ - # TODO: Add support for something other than rhel5 - - if distro == "rpm": - kernel_url = url + "images/pxeboot/vmlinuz" - initrd_url = url + "images/pxeboot/initrd.img" - if create_volume: - # NOTE: RHEL5 and other older Anaconda versions do not support specifying the CDROM device - use with caution - cmdline = "ks=http://169.254.169.254/latest/user-data repo=cdrom:/dev/vdb" - else: - cmdline = "ks=http://169.254.169.254/latest/user-data" - elif distro == "ubuntu": - kernel_url = url + "main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux" - initrd_url = url + "main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz" - cmdline = "append preseed/url=http://169.254.169.254/latest/user-data debian-installer/locale=en_US console-setup/layoutcode=us netcfg/choose_interface=auto keyboard-configuration/layoutcode=us priority=critical --" - - kernel_dest = os.path.join(dest_dir,"vmlinuz") - http_download_file(kernel_url, kernel_dest) - - initrd_dest = os.path.join(dest_dir,"initrd.img") - http_download_file(initrd_url, initrd_dest) - - syslinux_conf="""default customhd -timeout 30 -prompt 1 -label customhd - kernel vmlinuz - append initrd=initrd.img %s -""" % (cmdline) - - f = open(os.path.join(dest_dir, "syslinux.cfg"),"w") - f.write(syslinux_conf) - f.close() - - -def copy_content_to_image(contentdir, target_image): - g = guestfs.GuestFS() - g.add_drive(target_image) - g.launch() - g.mount_options ("", "/dev/sda1", "/") - for filename in os.listdir(contentdir): - g.upload(os.path.join(contentdir,filename),"/" + filename) - g.sync() - g.close() - -def wait_for_shutoff(instance, nova): - for i in range(1200): - status = nova.servers.get(instance.id).status - if status == "SHUTOFF": - print "Instance has entered SHUTOFF state" - return instance - if i % 10 == 0: - print "Waiting for instance status SHUTOFF - current status (%s): %d/1200" % (status, i) - sleep(1) - -def wait_for_noping(instance, nova, console_password, console_command): - # pre-grizzly releases are slow to notice an instance is shut off - see thread: - # http://lists.openstack.org/pipermail/openstack-dev/2013-January/004501.html - # - # This is an imperfect workaround using pings - - from ping import do_one - print "Warning - using ping to monitor progress - this is a crude shutdown detection scheme" - - # It is unclear where in the instance lifecycle this first becomes available - # Just try for a few minutes then give up - instance_ip = None - for i in range(18): - try: - instance = nova.servers.get(instance.id) - print "Instance status: %s" % (instance.status) - # First IP for the first key returned in the networks dict - instance_ip = instance.networks[instance.networks.keys()[0]][0] - break - except: - sleep(10) - pass - - if not instance_ip: - raise Exception("Unable to determine instance IP after 3 minutes") - - print "Using instance ip: %s" % (instance_ip) - print "Waiting 3 minutes for instance to respond to pings" - # First wait up to 3 minutes for ping to _start_ replying - started = False - for i in range(18): - print '.', - sys.stdout.flush() - if do_one(instance_ip, 10): - started = True - break - print '' - - if not started: - raise Exception("Instance at IP (%s) failed to start after 3 minutes." % (instance_ip) ) - - print "Instance responding to pings - waiting up to 40 minutes for it to stop" - # TODO: Automate this using subprocess - if console_password: - print "Install script contains a remove console directive with a password" - print "You should be able to view progress with the following command:" - print "$", - print console_command % (instance_ip) - print "password: %s" % (console_password) - print - print "Note that it may take a few mintues for the server to become available" - misses=0 - for i in range(240): - print '.', - sys.stdout.flush() - if do_one(instance_ip, 10): - misses=0 - sleep(10) - else: - print '-', - sys.stdout.flush() - misses += 1 - if misses == 4: - break - print '' - - if misses != 4: - print "Instance still pinging after 40 minutes - Assuming install failure" - return - - print "Instance has stopped responding to ping for at least 30 seconds - assuming install is complete" - return instance - - -def launch_and_wait(image, image_volume, install_media_volume, working_ks, instance_name, creds, console_password, console_command): - if install_media_volume and image_volume: - block_device_mapping = {'vda': image_volume.id + ":::0", 'vdb': install_media_volume.id + ":::0"} - elif image_volume: - block_device_mapping = {'vda': image_volume.id + ":::0" } - else: - block_device_mapping = None - - nova = nova_client.Client(creds['username'], creds['password'], creds['tenant'], - auth_url=creds['auth_url'], insecure=True) - instance = nova.servers.create(instance_name, image.id, 2, userdata=working_ks, meta={}, - block_device_mapping = block_device_mapping) - print "Started instance id (%s)" % (instance.id) - - #noping for Folsom - shutoff for newer - result = wait_for_shutoff(instance, nova) - #result = wait_for_noping(instance, nova, console_password, console_command) - - if not result: - raise Exception("Timeout while waiting for install to finish") - - return result - - -def terminate_instance(instance_id, creds): - nova = nova_client.Client(creds['username'], creds['password'], creds['tenant'], - auth_url=creds['auth_url'], insecure=True) - instance = nova.servers.get(instance_id) - instance.delete() - print "Waiting for instance id (%s) to be terminated/delete" % (instance_id) - while True: - print "Current instance status: %s" % (instance.status) - sleep(2) - try: - instance = nova.servers.get(instance_id) - except Exception as e: - print "Got exception (%s) assuming deletion complete" % (e) - break - -def wait_for_glance_snapshot(image_id, creds, glance_url): - k = keystone_client.Client(username=creds['username'], password=creds['password'], tenant_name=creds['tenant'], auth_url=creds['auth_url']) - if not k.authenticate(): - raise Exception("Unable to authenticate into Keystone") - - glance = glance_client.Client("1", endpoint=glance_url, token=k.auth_token) - image = glance.images.get(image_id) - print "Waiting for glance image id (%s) to become active" % (image_id) - while True: - print "Current image status: %s" % (image.status) - sleep(2) - image = glance.images.get(image.id) - if image.status == "error": - raise Exception("Image entered error status while waiting for completion") - elif image.status == 'active': - break - -def do_pw_sub(ks_file, admin_password): - f = open(ks_file, "r") - working_ks = "" - for line in f: - working_ks += Template(line).safe_substitute({ 'adminpw': admin_password }) - f.close() - return working_ks diff --git a/imagebuilder/openstack/common/__init__.py b/imagebuilder/openstack/common/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/imagebuilder/openstack/common/gettextutils.py b/imagebuilder/openstack/common/gettextutils.py deleted file mode 100644 index 1df9166..0000000 --- a/imagebuilder/openstack/common/gettextutils.py +++ /dev/null @@ -1,226 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Red Hat, Inc. -# All Rights Reserved. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -gettext for openstack-common modules. - -Usual usage in an openstack.common module: - - from imagebuilder.openstack.common.gettextutils import _ -""" - -import copy -import gettext -import logging.handlers -import os -import UserString - -_localedir = os.environ.get('imagebuilder'.upper() + '_LOCALEDIR') -_t = gettext.translation('imagebuilder', localedir=_localedir, fallback=True) - - -def _(msg): - return _t.ugettext(msg) - - -def install(domain): - """Install a _() function using the given translation domain. - - Given a translation domain, install a _() function using gettext's - install() function. - - The main difference from gettext.install() is that we allow - overriding the default localedir (e.g. /usr/share/locale) using - a translation-domain-specific environment variable (e.g. - NOVA_LOCALEDIR). - """ - gettext.install(domain, - localedir=os.environ.get(domain.upper() + '_LOCALEDIR'), - unicode=True) - - -""" -Lazy gettext functionality. - -The following is an attempt to introduce a deferred way -to do translations on messages in OpenStack. We attempt to -override the standard _() function and % (format string) operation -to build Message objects that can later be translated when we have -more information. Also included is an example LogHandler that -translates Messages to an associated locale, effectively allowing -many logs, each with their own locale. -""" - - -def get_lazy_gettext(domain): - """Assemble and return a lazy gettext function for a given domain. - - Factory method for a project/module to get a lazy gettext function - for its own translation domain (i.e. nova, glance, cinder, etc.) - """ - - def _lazy_gettext(msg): - """ - Create and return a Message object encapsulating a string - so that we can translate it later when needed. - """ - return Message(msg, domain) - - return _lazy_gettext - - -class Message(UserString.UserString, object): - """Class used to encapsulate translatable messages.""" - def __init__(self, msg, domain): - # _msg is the gettext msgid and should never change - self._msg = msg - self._left_extra_msg = '' - self._right_extra_msg = '' - self.params = None - self.locale = None - self.domain = domain - - @property - def data(self): - # NOTE(mrodden): this should always resolve to a unicode string - # that best represents the state of the message currently - - localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR') - if self.locale: - lang = gettext.translation(self.domain, - localedir=localedir, - languages=[self.locale], - fallback=True) - else: - # use system locale for translations - lang = gettext.translation(self.domain, - localedir=localedir, - fallback=True) - - full_msg = (self._left_extra_msg + - lang.ugettext(self._msg) + - self._right_extra_msg) - - if self.params is not None: - full_msg = full_msg % self.params - - return unicode(full_msg) - - def _save_parameters(self, other): - # we check for None later to see if - # we actually have parameters to inject, - # so encapsulate if our parameter is actually None - if other is None: - self.params = (other, ) - else: - self.params = copy.deepcopy(other) - - return self - - # overrides to be more string-like - def __unicode__(self): - return self.data - - def __str__(self): - return self.data.encode('utf-8') - - def __getstate__(self): - to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg', - 'domain', 'params', 'locale'] - new_dict = self.__dict__.fromkeys(to_copy) - for attr in to_copy: - new_dict[attr] = copy.deepcopy(self.__dict__[attr]) - - return new_dict - - def __setstate__(self, state): - for (k, v) in state.items(): - setattr(self, k, v) - - # operator overloads - def __add__(self, other): - copied = copy.deepcopy(self) - copied._right_extra_msg += other.__str__() - return copied - - def __radd__(self, other): - copied = copy.deepcopy(self) - copied._left_extra_msg += other.__str__() - return copied - - def __mod__(self, other): - # do a format string to catch and raise - # any possible KeyErrors from missing parameters - self.data % other - copied = copy.deepcopy(self) - return copied._save_parameters(other) - - def __mul__(self, other): - return self.data * other - - def __rmul__(self, other): - return other * self.data - - def __getitem__(self, key): - return self.data[key] - - def __getslice__(self, start, end): - return self.data.__getslice__(start, end) - - def __getattribute__(self, name): - # NOTE(mrodden): handle lossy operations that we can't deal with yet - # These override the UserString implementation, since UserString - # uses our __class__ attribute to try and build a new message - # after running the inner data string through the operation. - # At that point, we have lost the gettext message id and can just - # safely resolve to a string instead. - ops = ['capitalize', 'center', 'decode', 'encode', - 'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip', - 'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill'] - if name in ops: - return getattr(self.data, name) - else: - return UserString.UserString.__getattribute__(self, name) - - -class LocaleHandler(logging.Handler): - """Handler that can have a locale associated to translate Messages. - - A quick example of how to utilize the Message class above. - LocaleHandler takes a locale and a target logging.Handler object - to forward LogRecord objects to after translating the internal Message. - """ - - def __init__(self, locale, target): - """ - Initialize a LocaleHandler - - :param locale: locale to use for translating messages - :param target: logging.Handler object to forward - LogRecord objects to after translation - """ - logging.Handler.__init__(self) - self.locale = locale - self.target = target - - def emit(self, record): - if isinstance(record.msg, Message): - # set the locale and resolve to a string - record.msg.locale = self.locale - - self.target.emit(record) diff --git a/imagebuilder/openstack/common/importutils.py b/imagebuilder/openstack/common/importutils.py deleted file mode 100644 index dbee325..0000000 --- a/imagebuilder/openstack/common/importutils.py +++ /dev/null @@ -1,67 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Import related utilities and helper functions. -""" - -import sys -import traceback - - -def import_class(import_str): - """Returns a class from a string including module and class.""" - mod_str, _sep, class_str = import_str.rpartition('.') - try: - __import__(mod_str) - return getattr(sys.modules[mod_str], class_str) - except (ValueError, AttributeError): - raise ImportError('Class %s cannot be found (%s)' % - (class_str, - traceback.format_exception(*sys.exc_info()))) - - -def import_object(import_str, *args, **kwargs): - """Import a class and return an instance of it.""" - return import_class(import_str)(*args, **kwargs) - - -def import_object_ns(name_space, import_str, *args, **kwargs): - """ - Import a class and return an instance of it, first by trying - to find the class in a default namespace, then failing back to - a full path if not found in the default namespace. - """ - import_value = "%s.%s" % (name_space, import_str) - try: - return import_class(import_value)(*args, **kwargs) - except ImportError: - return import_class(import_str)(*args, **kwargs) - - -def import_module(import_str): - """Import a module.""" - __import__(import_str) - return sys.modules[import_str] - - -def try_import(import_str, default=None): - """Try to import a module and if it fails return default.""" - try: - return import_module(import_str) - except ImportError: - return default diff --git a/imagebuilder/openstack/common/jsonutils.py b/imagebuilder/openstack/common/jsonutils.py deleted file mode 100644 index ea822ef..0000000 --- a/imagebuilder/openstack/common/jsonutils.py +++ /dev/null @@ -1,169 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -''' -JSON related utilities. - -This module provides a few things: - - 1) A handy function for getting an object down to something that can be - JSON serialized. See to_primitive(). - - 2) Wrappers around loads() and dumps(). The dumps() wrapper will - automatically use to_primitive() for you if needed. - - 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson - is available. -''' - - -import datetime -import functools -import inspect -import itertools -import json -import types -import xmlrpclib - -import six - -from imagebuilder.openstack.common import timeutils - - -_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, - inspect.isfunction, inspect.isgeneratorfunction, - inspect.isgenerator, inspect.istraceback, inspect.isframe, - inspect.iscode, inspect.isbuiltin, inspect.isroutine, - inspect.isabstract] - -_simple_types = (types.NoneType, int, basestring, bool, float, long) - - -def to_primitive(value, convert_instances=False, convert_datetime=True, - level=0, max_depth=3): - """Convert a complex object into primitives. - - Handy for JSON serialization. We can optionally handle instances, - but since this is a recursive function, we could have cyclical - data structures. - - To handle cyclical data structures we could track the actual objects - visited in a set, but not all objects are hashable. Instead we just - track the depth of the object inspections and don't go too deep. - - Therefore, convert_instances=True is lossy ... be aware. - - """ - # handle obvious types first - order of basic types determined by running - # full tests on nova project, resulting in the following counts: - # 572754 - # 460353 - # 379632 - # 274610 - # 199918 - # 114200 - # 51817 - # 26164 - # 6491 - # 283 - # 19 - if isinstance(value, _simple_types): - return value - - if isinstance(value, datetime.datetime): - if convert_datetime: - return timeutils.strtime(value) - else: - return value - - # value of itertools.count doesn't get caught by nasty_type_tests - # and results in infinite loop when list(value) is called. - if type(value) == itertools.count: - return six.text_type(value) - - # FIXME(vish): Workaround for LP bug 852095. Without this workaround, - # tests that raise an exception in a mocked method that - # has a @wrap_exception with a notifier will fail. If - # we up the dependency to 0.5.4 (when it is released) we - # can remove this workaround. - if getattr(value, '__module__', None) == 'mox': - return 'mock' - - if level > max_depth: - return '?' - - # The try block may not be necessary after the class check above, - # but just in case ... - try: - recursive = functools.partial(to_primitive, - convert_instances=convert_instances, - convert_datetime=convert_datetime, - level=level, - max_depth=max_depth) - if isinstance(value, dict): - return dict((k, recursive(v)) for k, v in value.iteritems()) - elif isinstance(value, (list, tuple)): - return [recursive(lv) for lv in value] - - # It's not clear why xmlrpclib created their own DateTime type, but - # for our purposes, make it a datetime type which is explicitly - # handled - if isinstance(value, xmlrpclib.DateTime): - value = datetime.datetime(*tuple(value.timetuple())[:6]) - - if convert_datetime and isinstance(value, datetime.datetime): - return timeutils.strtime(value) - elif hasattr(value, 'iteritems'): - return recursive(dict(value.iteritems()), level=level + 1) - elif hasattr(value, '__iter__'): - return recursive(list(value)) - elif convert_instances and hasattr(value, '__dict__'): - # Likely an instance of something. Watch for cycles. - # Ignore class member vars. - return recursive(value.__dict__, level=level + 1) - else: - if any(test(value) for test in _nasty_type_tests): - return six.text_type(value) - return value - except TypeError: - # Class objects are tricky since they may define something like - # __iter__ defined but it isn't callable as list(). - return six.text_type(value) - - -def dumps(value, default=to_primitive, **kwargs): - return json.dumps(value, default=default, **kwargs) - - -def loads(s): - return json.loads(s) - - -def load(s): - return json.load(s) - - -try: - import anyjson -except ImportError: - pass -else: - anyjson._modules.append((__name__, 'dumps', TypeError, - 'loads', ValueError, 'load')) - anyjson.force_implementation(__name__) diff --git a/imagebuilder/openstack/common/local.py b/imagebuilder/openstack/common/local.py deleted file mode 100644 index f1bfc82..0000000 --- a/imagebuilder/openstack/common/local.py +++ /dev/null @@ -1,48 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Greenthread local storage of variables using weak references""" - -import weakref - -from eventlet import corolocal - - -class WeakLocal(corolocal.local): - def __getattribute__(self, attr): - rval = corolocal.local.__getattribute__(self, attr) - if rval: - # NOTE(mikal): this bit is confusing. What is stored is a weak - # reference, not the value itself. We therefore need to lookup - # the weak reference and return the inner value here. - rval = rval() - return rval - - def __setattr__(self, attr, value): - value = weakref.ref(value) - return corolocal.local.__setattr__(self, attr, value) - - -# NOTE(mikal): the name "store" should be deprecated in the future -store = WeakLocal() - -# A "weak" store uses weak references and allows an object to fall out of scope -# when it falls out of scope in the code that uses the thread local storage. A -# "strong" store will hold a reference to the object so that it never falls out -# of scope. -weak_store = WeakLocal() -strong_store = corolocal.local diff --git a/imagebuilder/openstack/common/log.py b/imagebuilder/openstack/common/log.py deleted file mode 100644 index a6e2fb1..0000000 --- a/imagebuilder/openstack/common/log.py +++ /dev/null @@ -1,558 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Openstack logging handler. - -This module adds to logging functionality by adding the option to specify -a context object when calling the various log methods. If the context object -is not specified, default formatting is used. Additionally, an instance uuid -may be passed as part of the log message, which is intended to make it easier -for admins to find messages related to a specific instance. - -It also allows setting of formatting information through conf. - -""" - -import ConfigParser -import cStringIO -import inspect -import itertools -import logging -import logging.config -import logging.handlers -import os -import sys -import traceback - -from oslo.config import cfg - -from imagebuilder.openstack.common.gettextutils import _ -from imagebuilder.openstack.common import importutils -from imagebuilder.openstack.common import jsonutils -from imagebuilder.openstack.common import local - - -_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" - -common_cli_opts = [ - cfg.BoolOpt('debug', - short='d', - default=False, - help='Print debugging output (set logging level to ' - 'DEBUG instead of default WARNING level).'), - cfg.BoolOpt('verbose', - short='v', - default=False, - help='Print more verbose output (set logging level to ' - 'INFO instead of default WARNING level).'), -] - -logging_cli_opts = [ - cfg.StrOpt('log-config', - metavar='PATH', - help='If this option is specified, the logging configuration ' - 'file specified is used and overrides any other logging ' - 'options specified. Please see the Python logging module ' - 'documentation for details on logging configuration ' - 'files.'), - cfg.StrOpt('log-format', - default=None, - metavar='FORMAT', - help='A logging.Formatter log message format string which may ' - 'use any of the available logging.LogRecord attributes. ' - 'This option is deprecated. Please use ' - 'logging_context_format_string and ' - 'logging_default_format_string instead.'), - cfg.StrOpt('log-date-format', - default=_DEFAULT_LOG_DATE_FORMAT, - metavar='DATE_FORMAT', - help='Format string for %%(asctime)s in log records. ' - 'Default: %(default)s'), - cfg.StrOpt('log-file', - metavar='PATH', - deprecated_name='logfile', - help='(Optional) Name of log file to output to. ' - 'If no default is set, logging will go to stdout.'), - cfg.StrOpt('log-dir', - deprecated_name='logdir', - help='(Optional) The base directory used for relative ' - '--log-file paths'), - cfg.BoolOpt('use-syslog', - default=False, - help='Use syslog for logging.'), - cfg.StrOpt('syslog-log-facility', - default='LOG_USER', - help='syslog facility to receive log lines') -] - -generic_log_opts = [ - cfg.BoolOpt('use_stderr', - default=True, - help='Log output to standard error') -] - -log_opts = [ - cfg.StrOpt('logging_context_format_string', - default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s [%(request_id)s %(user)s %(tenant)s] ' - '%(instance)s%(message)s', - help='format string to use for log messages with context'), - cfg.StrOpt('logging_default_format_string', - default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s [-] %(instance)s%(message)s', - help='format string to use for log messages without context'), - cfg.StrOpt('logging_debug_format_suffix', - default='%(funcName)s %(pathname)s:%(lineno)d', - help='data to append to log format when level is DEBUG'), - cfg.StrOpt('logging_exception_prefix', - default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' - '%(instance)s', - help='prefix each line of exception output with this format'), - cfg.ListOpt('default_log_levels', - default=[ - 'amqplib=WARN', - 'sqlalchemy=WARN', - 'boto=WARN', - 'suds=INFO', - 'keystone=INFO', - 'eventlet.wsgi.server=WARN' - ], - help='list of logger=LEVEL pairs'), - cfg.BoolOpt('publish_errors', - default=False, - help='publish error events'), - cfg.BoolOpt('fatal_deprecations', - default=False, - help='make deprecations fatal'), - - # NOTE(mikal): there are two options here because sometimes we are handed - # a full instance (and could include more information), and other times we - # are just handed a UUID for the instance. - cfg.StrOpt('instance_format', - default='[instance: %(uuid)s] ', - help='If an instance is passed with the log message, format ' - 'it like this'), - cfg.StrOpt('instance_uuid_format', - default='[instance: %(uuid)s] ', - help='If an instance UUID is passed with the log message, ' - 'format it like this'), -] - -CONF = cfg.CONF -CONF.register_cli_opts(common_cli_opts) -CONF.register_cli_opts(logging_cli_opts) -CONF.register_opts(generic_log_opts) -CONF.register_opts(log_opts) - -# our new audit level -# NOTE(jkoelker) Since we synthesized an audit level, make the logging -# module aware of it so it acts like other levels. -logging.AUDIT = logging.INFO + 1 -logging.addLevelName(logging.AUDIT, 'AUDIT') - - -try: - NullHandler = logging.NullHandler -except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 - class NullHandler(logging.Handler): - def handle(self, record): - pass - - def emit(self, record): - pass - - def createLock(self): - self.lock = None - - -def _dictify_context(context): - if context is None: - return None - if not isinstance(context, dict) and getattr(context, 'to_dict', None): - context = context.to_dict() - return context - - -def _get_binary_name(): - return os.path.basename(inspect.stack()[-1][1]) - - -def _get_log_file_path(binary=None): - logfile = CONF.log_file - logdir = CONF.log_dir - - if logfile and not logdir: - return logfile - - if logfile and logdir: - return os.path.join(logdir, logfile) - - if logdir: - binary = binary or _get_binary_name() - return '%s.log' % (os.path.join(logdir, binary),) - - -class BaseLoggerAdapter(logging.LoggerAdapter): - - def audit(self, msg, *args, **kwargs): - self.log(logging.AUDIT, msg, *args, **kwargs) - - -class LazyAdapter(BaseLoggerAdapter): - def __init__(self, name='unknown', version='unknown'): - self._logger = None - self.extra = {} - self.name = name - self.version = version - - @property - def logger(self): - if not self._logger: - self._logger = getLogger(self.name, self.version) - return self._logger - - -class ContextAdapter(BaseLoggerAdapter): - warn = logging.LoggerAdapter.warning - - def __init__(self, logger, project_name, version_string): - self.logger = logger - self.project = project_name - self.version = version_string - - @property - def handlers(self): - return self.logger.handlers - - def deprecated(self, msg, *args, **kwargs): - stdmsg = _("Deprecated: %s") % msg - if CONF.fatal_deprecations: - self.critical(stdmsg, *args, **kwargs) - raise DeprecatedConfig(msg=stdmsg) - else: - self.warn(stdmsg, *args, **kwargs) - - def process(self, msg, kwargs): - if 'extra' not in kwargs: - kwargs['extra'] = {} - extra = kwargs['extra'] - - context = kwargs.pop('context', None) - if not context: - context = getattr(local.store, 'context', None) - if context: - extra.update(_dictify_context(context)) - - instance = kwargs.pop('instance', None) - instance_extra = '' - if instance: - instance_extra = CONF.instance_format % instance - else: - instance_uuid = kwargs.pop('instance_uuid', None) - if instance_uuid: - instance_extra = (CONF.instance_uuid_format - % {'uuid': instance_uuid}) - extra.update({'instance': instance_extra}) - - extra.update({"project": self.project}) - extra.update({"version": self.version}) - extra['extra'] = extra.copy() - return msg, kwargs - - -class JSONFormatter(logging.Formatter): - def __init__(self, fmt=None, datefmt=None): - # NOTE(jkoelker) we ignore the fmt argument, but its still there - # since logging.config.fileConfig passes it. - self.datefmt = datefmt - - def formatException(self, ei, strip_newlines=True): - lines = traceback.format_exception(*ei) - if strip_newlines: - lines = [itertools.ifilter( - lambda x: x, - line.rstrip().splitlines()) for line in lines] - lines = list(itertools.chain(*lines)) - return lines - - def format(self, record): - message = {'message': record.getMessage(), - 'asctime': self.formatTime(record, self.datefmt), - 'name': record.name, - 'msg': record.msg, - 'args': record.args, - 'levelname': record.levelname, - 'levelno': record.levelno, - 'pathname': record.pathname, - 'filename': record.filename, - 'module': record.module, - 'lineno': record.lineno, - 'funcname': record.funcName, - 'created': record.created, - 'msecs': record.msecs, - 'relative_created': record.relativeCreated, - 'thread': record.thread, - 'thread_name': record.threadName, - 'process_name': record.processName, - 'process': record.process, - 'traceback': None} - - if hasattr(record, 'extra'): - message['extra'] = record.extra - - if record.exc_info: - message['traceback'] = self.formatException(record.exc_info) - - return jsonutils.dumps(message) - - -def _create_logging_excepthook(product_name): - def logging_excepthook(type, value, tb): - extra = {} - if CONF.verbose: - extra['exc_info'] = (type, value, tb) - getLogger(product_name).critical(str(value), **extra) - return logging_excepthook - - -class LogConfigError(Exception): - - message = _('Error loading logging config %(log_config)s: %(err_msg)s') - - def __init__(self, log_config, err_msg): - self.log_config = log_config - self.err_msg = err_msg - - def __str__(self): - return self.message % dict(log_config=self.log_config, - err_msg=self.err_msg) - - -def _load_log_config(log_config): - try: - logging.config.fileConfig(log_config) - except ConfigParser.Error as exc: - raise LogConfigError(log_config, str(exc)) - - -def setup(product_name): - """Setup logging.""" - if CONF.log_config: - _load_log_config(CONF.log_config) - else: - _setup_logging_from_conf() - sys.excepthook = _create_logging_excepthook(product_name) - - -def set_defaults(logging_context_format_string): - cfg.set_defaults(log_opts, - logging_context_format_string= - logging_context_format_string) - - -def _find_facility_from_conf(): - facility_names = logging.handlers.SysLogHandler.facility_names - facility = getattr(logging.handlers.SysLogHandler, - CONF.syslog_log_facility, - None) - - if facility is None and CONF.syslog_log_facility in facility_names: - facility = facility_names.get(CONF.syslog_log_facility) - - if facility is None: - valid_facilities = facility_names.keys() - consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', - 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', - 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', - 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', - 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] - valid_facilities.extend(consts) - raise TypeError(_('syslog facility must be one of: %s') % - ', '.join("'%s'" % fac - for fac in valid_facilities)) - - return facility - - -def _setup_logging_from_conf(): - log_root = getLogger(None).logger - for handler in log_root.handlers: - log_root.removeHandler(handler) - - if CONF.use_syslog: - facility = _find_facility_from_conf() - syslog = logging.handlers.SysLogHandler(address='/dev/log', - facility=facility) - log_root.addHandler(syslog) - - logpath = _get_log_file_path() - if logpath: - filelog = logging.handlers.WatchedFileHandler(logpath) - log_root.addHandler(filelog) - - if CONF.use_stderr: - streamlog = ColorHandler() - log_root.addHandler(streamlog) - - elif not CONF.log_file: - # pass sys.stdout as a positional argument - # python2.6 calls the argument strm, in 2.7 it's stream - streamlog = logging.StreamHandler(sys.stdout) - log_root.addHandler(streamlog) - - if CONF.publish_errors: - handler = importutils.import_object( - "imagebuilder.openstack.common.log_handler.PublishErrorsHandler", - logging.ERROR) - log_root.addHandler(handler) - - datefmt = CONF.log_date_format - for handler in log_root.handlers: - # NOTE(alaski): CONF.log_format overrides everything currently. This - # should be deprecated in favor of context aware formatting. - if CONF.log_format: - handler.setFormatter(logging.Formatter(fmt=CONF.log_format, - datefmt=datefmt)) - log_root.info('Deprecated: log_format is now deprecated and will ' - 'be removed in the next release') - else: - handler.setFormatter(ContextFormatter(datefmt=datefmt)) - - if CONF.debug: - log_root.setLevel(logging.DEBUG) - elif CONF.verbose: - log_root.setLevel(logging.INFO) - else: - log_root.setLevel(logging.WARNING) - - for pair in CONF.default_log_levels: - mod, _sep, level_name = pair.partition('=') - level = logging.getLevelName(level_name) - logger = logging.getLogger(mod) - logger.setLevel(level) - -_loggers = {} - - -def getLogger(name='unknown', version='unknown'): - if name not in _loggers: - _loggers[name] = ContextAdapter(logging.getLogger(name), - name, - version) - return _loggers[name] - - -def getLazyLogger(name='unknown', version='unknown'): - """ - create a pass-through logger that does not create the real logger - until it is really needed and delegates all calls to the real logger - once it is created - """ - return LazyAdapter(name, version) - - -class WritableLogger(object): - """A thin wrapper that responds to `write` and logs.""" - - def __init__(self, logger, level=logging.INFO): - self.logger = logger - self.level = level - - def write(self, msg): - self.logger.log(self.level, msg) - - -class ContextFormatter(logging.Formatter): - """A context.RequestContext aware formatter configured through flags. - - The flags used to set format strings are: logging_context_format_string - and logging_default_format_string. You can also specify - logging_debug_format_suffix to append extra formatting if the log level is - debug. - - For information about what variables are available for the formatter see: - http://docs.python.org/library/logging.html#formatter - - """ - - def format(self, record): - """Uses contextstring if request_id is set, otherwise default.""" - # NOTE(sdague): default the fancier formating params - # to an empty string so we don't throw an exception if - # they get used - for key in ('instance', 'color'): - if key not in record.__dict__: - record.__dict__[key] = '' - - if record.__dict__.get('request_id', None): - self._fmt = CONF.logging_context_format_string - else: - self._fmt = CONF.logging_default_format_string - - if (record.levelno == logging.DEBUG and - CONF.logging_debug_format_suffix): - self._fmt += " " + CONF.logging_debug_format_suffix - - # Cache this on the record, Logger will respect our formated copy - if record.exc_info: - record.exc_text = self.formatException(record.exc_info, record) - return logging.Formatter.format(self, record) - - def formatException(self, exc_info, record=None): - """Format exception output with CONF.logging_exception_prefix.""" - if not record: - return logging.Formatter.formatException(self, exc_info) - - stringbuffer = cStringIO.StringIO() - traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], - None, stringbuffer) - lines = stringbuffer.getvalue().split('\n') - stringbuffer.close() - - if CONF.logging_exception_prefix.find('%(asctime)') != -1: - record.asctime = self.formatTime(record, self.datefmt) - - formatted_lines = [] - for line in lines: - pl = CONF.logging_exception_prefix % record.__dict__ - fl = '%s%s' % (pl, line) - formatted_lines.append(fl) - return '\n'.join(formatted_lines) - - -class ColorHandler(logging.StreamHandler): - LEVEL_COLORS = { - logging.DEBUG: '\033[00;32m', # GREEN - logging.INFO: '\033[00;36m', # CYAN - logging.AUDIT: '\033[01;36m', # BOLD CYAN - logging.WARN: '\033[01;33m', # BOLD YELLOW - logging.ERROR: '\033[01;31m', # BOLD RED - logging.CRITICAL: '\033[01;31m', # BOLD RED - } - - def format(self, record): - record.color = self.LEVEL_COLORS[record.levelno] - return logging.StreamHandler.format(self, record) - - -class DeprecatedConfig(Exception): - message = _("Fatal call to deprecated config: %(msg)s") - - def __init__(self, msg): - super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/imagebuilder/openstack/common/timeutils.py b/imagebuilder/openstack/common/timeutils.py deleted file mode 100644 index 008e9c8..0000000 --- a/imagebuilder/openstack/common/timeutils.py +++ /dev/null @@ -1,187 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Time related utilities and helper functions. -""" - -import calendar -import datetime - -import iso8601 - - -# ISO 8601 extended time format with microseconds -_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' -_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' -PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND - - -def isotime(at=None, subsecond=False): - """Stringify time in ISO 8601 format.""" - if not at: - at = utcnow() - st = at.strftime(_ISO8601_TIME_FORMAT - if not subsecond - else _ISO8601_TIME_FORMAT_SUBSECOND) - tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' - st += ('Z' if tz == 'UTC' else tz) - return st - - -def parse_isotime(timestr): - """Parse time from ISO 8601 format.""" - try: - return iso8601.parse_date(timestr) - except iso8601.ParseError as e: - raise ValueError(e.message) - except TypeError as e: - raise ValueError(e.message) - - -def strtime(at=None, fmt=PERFECT_TIME_FORMAT): - """Returns formatted utcnow.""" - if not at: - at = utcnow() - return at.strftime(fmt) - - -def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): - """Turn a formatted time back into a datetime.""" - return datetime.datetime.strptime(timestr, fmt) - - -def normalize_time(timestamp): - """Normalize time in arbitrary timezone to UTC naive object.""" - offset = timestamp.utcoffset() - if offset is None: - return timestamp - return timestamp.replace(tzinfo=None) - offset - - -def is_older_than(before, seconds): - """Return True if before is older than seconds.""" - if isinstance(before, basestring): - before = parse_strtime(before).replace(tzinfo=None) - return utcnow() - before > datetime.timedelta(seconds=seconds) - - -def is_newer_than(after, seconds): - """Return True if after is newer than seconds.""" - if isinstance(after, basestring): - after = parse_strtime(after).replace(tzinfo=None) - return after - utcnow() > datetime.timedelta(seconds=seconds) - - -def utcnow_ts(): - """Timestamp version of our utcnow function.""" - return calendar.timegm(utcnow().timetuple()) - - -def utcnow(): - """Overridable version of utils.utcnow.""" - if utcnow.override_time: - try: - return utcnow.override_time.pop(0) - except AttributeError: - return utcnow.override_time - return datetime.datetime.utcnow() - - -def iso8601_from_timestamp(timestamp): - """Returns a iso8601 formated date from timestamp.""" - return isotime(datetime.datetime.utcfromtimestamp(timestamp)) - - -utcnow.override_time = None - - -def set_time_override(override_time=datetime.datetime.utcnow()): - """ - Override utils.utcnow to return a constant time or a list thereof, - one at a time. - """ - utcnow.override_time = override_time - - -def advance_time_delta(timedelta): - """Advance overridden time using a datetime.timedelta.""" - assert(not utcnow.override_time is None) - try: - for dt in utcnow.override_time: - dt += timedelta - except TypeError: - utcnow.override_time += timedelta - - -def advance_time_seconds(seconds): - """Advance overridden time by seconds.""" - advance_time_delta(datetime.timedelta(0, seconds)) - - -def clear_time_override(): - """Remove the overridden time.""" - utcnow.override_time = None - - -def marshall_now(now=None): - """Make an rpc-safe datetime with microseconds. - - Note: tzinfo is stripped, but not required for relative times. - """ - if not now: - now = utcnow() - return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, - minute=now.minute, second=now.second, - microsecond=now.microsecond) - - -def unmarshall_time(tyme): - """Unmarshall a datetime dict.""" - return datetime.datetime(day=tyme['day'], - month=tyme['month'], - year=tyme['year'], - hour=tyme['hour'], - minute=tyme['minute'], - second=tyme['second'], - microsecond=tyme['microsecond']) - - -def delta_seconds(before, after): - """ - Compute the difference in seconds between two date, time, or - datetime objects (as a float, to microsecond resolution). - """ - delta = after - before - try: - return delta.total_seconds() - except AttributeError: - return ((delta.days * 24 * 3600) + delta.seconds + - float(delta.microseconds) / (10 ** 6)) - - -def is_soon(dt, window): - """ - Determines if time is going to happen in the next window seconds. - - :params dt: the time - :params window: minimum seconds to remain to consider the time not soon - - :return: True if expiration is within the given duration - """ - soon = (utcnow() + datetime.timedelta(seconds=window)) - return normalize_time(dt) <= soon diff --git a/imagebuilder/ping.py b/imagebuilder/ping.py deleted file mode 100644 index af41f1e..0000000 --- a/imagebuilder/ping.py +++ /dev/null @@ -1,217 +0,0 @@ -#!/usr/bin/env python - -""" - A pure python ping implementation using raw socket. - - - Note that ICMP messages can only be sent from processes running as root. - - - Derived from ping.c distributed in Linux's netkit. That code is - copyright (c) 1989 by The Regents of the University of California. - That code is in turn derived from code written by Mike Muuss of the - US Army Ballistic Research Laboratory in December, 1983 and - placed in the public domain. They have my thanks. - - Bugs are naturally mine. I'd be glad to hear about them. There are - certainly word - size dependenceies here. - - Copyright (c) Matthew Dixon Cowles, . - Distributable under the terms of the GNU General Public License - version 2. Provided with no warranties of any sort. - - Original Version from Matthew Dixon Cowles: - -> ftp://ftp.visi.com/users/mdc/ping.py - - Rewrite by Jens Diemer: - -> http://www.python-forum.de/post-69122.html#69122 - - - Revision history - ~~~~~~~~~~~~~~~~ - - March 11, 2010 - changes by Samuel Stauffer: - - replaced time.clock with default_timer which is set to - time.clock on windows and time.time on other systems. - - May 30, 2007 - little rewrite by Jens Diemer: - - change socket asterisk import to a normal import - - replace time.time() with time.clock() - - delete "return None" (or change to "return" only) - - in checksum() rename "str" to "source_string" - - November 22, 1997 - Initial hack. Doesn't do much, but rather than try to guess - what features I (or others) will want in the future, I've only - put in what I need now. - - December 16, 1997 - For some reason, the checksum bytes are in the wrong order when - this is run under Solaris 2.X for SPARC but it works right under - Linux x86. Since I don't know just what's wrong, I'll swap the - bytes always and then do an htons(). - - December 4, 2000 - Changed the struct.pack() calls to pack the checksum and ID as - unsigned. My thanks to Jerome Poincheval for the fix. - - - Last commit info: - ~~~~~~~~~~~~~~~~~ - $LastChangedDate: $ - $Rev: $ - $Author: $ -""" - - -import os, sys, socket, struct, select, time - -if sys.platform == "win32": - # On Windows, the best timer is time.clock() - default_timer = time.clock -else: - # On most other platforms the best timer is time.time() - default_timer = time.time - -# From /usr/include/linux/icmp.h; your milage may vary. -ICMP_ECHO_REQUEST = 8 # Seems to be the same on Solaris. - - -def checksum(source_string): - """ - I'm not too confident that this is right but testing seems - to suggest that it gives the same answers as in_cksum in ping.c - """ - sum = 0 - countTo = (len(source_string)/2)*2 - count = 0 - while count> 16) + (sum & 0xffff) - sum = sum + (sum >> 16) - answer = ~sum - answer = answer & 0xffff - - # Swap bytes. Bugger me if I know why. - answer = answer >> 8 | (answer << 8 & 0xff00) - - return answer - - -def receive_one_ping(my_socket, ID, timeout): - """ - receive the ping from the socket. - """ - timeLeft = timeout - while True: - startedSelect = default_timer() - whatReady = select.select([my_socket], [], [], timeLeft) - howLongInSelect = (default_timer() - startedSelect) - if whatReady[0] == []: # Timeout - return - - timeReceived = default_timer() - recPacket, addr = my_socket.recvfrom(1024) - icmpHeader = recPacket[20:28] - type, code, checksum, packetID, sequence = struct.unpack( - "bbHHh", icmpHeader - ) - if packetID == ID: - bytesInDouble = struct.calcsize("d") - timeSent = struct.unpack("d", recPacket[28:28 + bytesInDouble])[0] - return timeReceived - timeSent - - timeLeft = timeLeft - howLongInSelect - if timeLeft <= 0: - return - - -def send_one_ping(my_socket, dest_addr, ID): - """ - Send one ping to the given >dest_addr<. - """ - dest_addr = socket.gethostbyname(dest_addr) - - # Header is type (8), code (8), checksum (16), id (16), sequence (16) - my_checksum = 0 - - # Make a dummy heder with a 0 checksum. - header = struct.pack("bbHHh", ICMP_ECHO_REQUEST, 0, my_checksum, ID, 1) - bytesInDouble = struct.calcsize("d") - data = (192 - bytesInDouble) * "Q" - data = struct.pack("d", default_timer()) + data - - # Calculate the checksum on the data and the dummy header. - my_checksum = checksum(header + data) - - # Now that we have the right checksum, we put that in. It's just easier - # to make up a new header than to stuff it into the dummy. - header = struct.pack( - "bbHHh", ICMP_ECHO_REQUEST, 0, socket.htons(my_checksum), ID, 1 - ) - packet = header + data - my_socket.sendto(packet, (dest_addr, 1)) # Don't know about the 1 - - -def do_one(dest_addr, timeout): - """ - Returns either the delay (in seconds) or none on timeout. - """ - icmp = socket.getprotobyname("icmp") - try: - my_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, icmp) - except socket.error, (errno, msg): - if errno == 1: - # Operation not permitted - msg = msg + ( - " - Note that ICMP messages can only be sent from processes" - " running as root." - ) - raise socket.error(msg) - raise # raise the original error - - my_ID = os.getpid() & 0xFFFF - - send_one_ping(my_socket, dest_addr, my_ID) - delay = receive_one_ping(my_socket, my_ID, timeout) - - my_socket.close() - return delay - - -def verbose_ping(dest_addr, timeout = 2, count = 4): - """ - Send >count< ping to >dest_addr< with the given >timeout< and display - the result. - """ - for i in xrange(count): - print "ping %s..." % dest_addr, - try: - delay = do_one(dest_addr, timeout) - except socket.gaierror, e: - print "failed. (socket error: '%s')" % e[1] - break - - if delay == None: - print "failed. (timeout within %ssec.)" % timeout - else: - delay = delay * 1000 - print "get ping in %0.4fms" % delay - print - - -if __name__ == '__main__': - verbose_ping("heise.de") - verbose_ping("google.com") - verbose_ping("a-test-url-taht-is-not-available.com") - verbose_ping("192.168.1.1") diff --git a/imagebuilder/public/css/style.css b/imagebuilder/public/css/style.css deleted file mode 100644 index 55c9db5..0000000 --- a/imagebuilder/public/css/style.css +++ /dev/null @@ -1,43 +0,0 @@ -body { - background: #311F00; - color: white; - font-family: 'Helvetica Neue', 'Helvetica', 'Verdana', sans-serif; - padding: 1em 2em; -} - -a { - color: #FAFF78; - text-decoration: none; -} - -a:hover { - text-decoration: underline; -} - -div#content { - width: 800px; - margin: 0 auto; -} - -form { - margin: 0; - padding: 0; - border: 0; -} - -fieldset { - border: 0; -} - -input.error { - background: #FAFF78; -} - -header { - text-align: center; -} - -h1, h2, h3, h4, h5, h6 { - font-family: 'Futura-CondensedExtraBold', 'Futura', 'Helvetica', sans-serif; - text-transform: uppercase; -} diff --git a/imagebuilder/public/images/logo.png b/imagebuilder/public/images/logo.png deleted file mode 100644 index 5994d22d9b1edf086925d4e786bfc385125ce457..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35094 zcmd?Pg;N|s*EfpG;_k3OaCZsr0fK9Q1%d~M#oaZy26qS=NN`=;-GeOd?v~5*yzlqb zt@|(BshX*t?$f_>PEVcg>C?X`H5GXb04V?l1_tAkf{X?X3@qrqoR5O^{*>uM7=(d= zr?iolR{JC^4ODY+w6w9afPwLZ4b$4NomInsTd~NggT@G3;h>6HCpwsGf~B(?ZFO76 zzZ~&5Q7dGX+8h2D_;Bi9hrAr)DBSOHi7fVrjJ_P==#zvofN%Ox6-lFfN7Im-#rv5- z6TaoXZR7Dwh+%&Rl>g}bWfq?U3twAPwIA}*w}ZcN1q&jVW_hvx91~8cNG~seHCyj> z-f)dcIba7qbKIzWGn@aBijH$Z4(v?f>!X3{=i3<0f$=sMqRw-;_#orTu6kt%s8m*h zj%Gp7*zdGymb`otKTsja{Zhh8%@TqcHk;QNOA|r(I}G#{N$fEqm9iQfyXLa>=yw-X zt|3>(b>%6CS=Ia4-|c^ps7(7tuTu&6ImPb5%=d^J6L&~9=Q!!{Zx`|TOO1c1!J+Ti zKLgujoLc-y*Jr||BmdC;RjY=_&Snn{KI+Hb?CFhKQe7Tz7cYYML2pL5sf z7US9p!up$vmef!G1Rb{4}HYKSE8Y)zeVrY=w*U4R~#?EtD_M3D5ZxuJ>mYMjwPXI8PwT+Q);6S^7YnPc~P^#HoXyYJ6x3a#} z7o9wvV#ZC5k)~yHDdBPPAHmhWDQ$XeC1c&1XUJ*V;BoImWv@1JNK9AcDYMn2L^_XClN+h=_F!AH6S+%IWX={v_PUR|? z-GCCd4XbiKZ}R8-+Wt-enrG1qQUxtw0a9Sh{T?u=P73<2Ffi=2|G8m3OT^vZ*6s7sEgz$U$Kh+#G z!2gPXZAEGHKC1zx9bGJdyzIQ}oHSwpAP^|xVs0s{AtU#{-QV9tX{^CuCt(f_4-XG^ z4<2?$7b^}fAt50SPHqlvZnk#`HdikPu&F1TgDdU-fczgE84Fi47aJ$AjiUqbKe(n} z9o@j9G&KKF^nb7awbS0||0;5D{a>%%{p0X7b>iS+=j8am$nPCR-hC97cCj!8JGy8& zI@*az{$~(?pX^PoEHo@k&Ami8{=X9ct5SsHKX&}T?D`+4|6BX+wHV;N=l>Z&F#y@F zHzo`W5ayGNq?RY_Ne*(Lq4xY>*<>TH0`LQMLg){mD45GP5Fb{38KxivNnIVMSg-Rl z{=>l%0k&i@eQ2BF*9^$$we%|@)AhiG2 zb&~D8^D}4GIMO$*S&7f^d>C`QQvswXx5nCketaV>x5+OJ`hhehF@Cvl^55b&(gQ1u zIdVsFmUbPZ|B+X7ZRaEVU&$wZc)r2aNkFZ(?*GD(JHBK850L-DJI-AL#CCq3SM9&e zg9Z)n@c(Vj<9o+>t-AQ)_8%MGo8ML1{r^p+%u`F}*-td)pq{+6<|+;d#E|X8r+otX zRTyYzZrU|Q_*Q;IW9;7LaFv(I9@%u!T1sTQ{ZeVHlRUn)VBz)3ePgk7GH|146ngDu zbMN;}pvR8?*s7Sc=lk@xtE341CMLQfPR4XdXXwr`6G!LP6QA|fDxg?Hw41e+Q(6iu z^&s&D{<|`RJ6|TVy;&D1!8w09YS>D5heyBC2G0&9BlpPwv~z$;vLVXUdle4nS{%c4 z#$UamIC8i8x%L=)&^Bi+qx$9>61;1^A$QLe0k<9^Iu2=Ab9YttZzgIcFbYd(2}qAf z-qI=c%Jgw@;1B-f=dUP68I*;9RX~XnQFV1S3mF;NwVPy?UR&XYTa+>bILUWCF~CVa z-%QOYzXk-&2(-smePZumY}e4EH{tN-ZZlYQ*HBbhaZH$+TV7tqjvq`+zMD)ZAi;&> zZbTjW%lt5GjCIK3EcWIyg8H0OBW|byQj!)^D}QDCT*2a%n8|XtS!UYW)5ME%fZz5D zTr3-ZknlFgPOP7K5sK-fEVxcdw8a9`J9N(4Imm9U_zTQvLT0LTE(}_nhB(3QYck~L ztHAb8Wv&yg#--;2_pSp_I5zJO{a7; zDinVMPOlcy%yz%i!0OIi5VQ#w^jWNd`f2=wDT`@gpu;8GF&YM(A7GVd0}_h)T=y30 z=%v0HJwfe@v%4zlOn?`6zY^_QJt8q^3>?FIV+F!MvNi$kiEivZ;hI8v88pg-jo3e_ zg-J2@55dB(YJ%kc5>PsjWGa7ldi5mxg>;CiGWLX-qp7N?JnXgv$GnCVi80|DwFq;5 zi~s^-N*j3obggDSW;wLWm5WxAstt7&%>a-E5M26(nqyW5E(`#uq6FX&qN)WPgbODu zo&;n-d{Bp4a_U;@q5{oMF{JpB$anlS9vUuMVRo;RVyfTLudI8WA?XN6vBeU;Eujtv zqk^IjrG6-QSXGmZ%}BeWAsmk>7pSmgQnmBguc%pGUz&BB<*f;nYcxcd^qnD>mNDY& z&%-{7LIMejMud?vl-k6yk~)nWBs?qGH00DLK+a%^;y;+ykEchto;Ivwsr#h}XX+!G z5DEwe91mQn>O1Hu4&xKpuHU?Ao}s9Y&>Tazu9QNns|ffcQ=V9uF8)EgE) zcnZ<0d!^V{?0_{7?~b66;Go@~d-Y;;)HQi+=7*vCu8`Z~EZgj>D0D=KYFy^#nJ(nwm@aYZ9tAkV`)F+Qxtfj<~VG7 z#bvV2##wNH`s=S1w_DDIQcrgCEtRZW;{4}61gJTbGf4Y4j_}B2R_&y(=+$2dp@%ea zG+i_gK=J)z_39PsV2s*)5|%1CaKmb=i{fVQVyKWW5CLVwnFI=vjxUx+8DLhd-kLj!1nK3gnw z510EzSb1!lWuBJy2m2p?K5(Fvp)KKsiUfjxGc77cy*vur>Hfulc!c%`|EUcoz~)(} z)R24Bl#0-uFf@ulg{9sz$^v)D=uu1TeDc;k^NE>Zj&%91!+e2f8VVzB+H-2_bKiGM zM+17C6Z-s6W@0WJxZjyCwJZKJ`VK#VBv%U5%;C1lLH4>TYJX)9W$PK5c>>3{mHLFnSVR|o+_HhD{+2?lUdMQ% z)%ZY*fXvRZQi_2`a2_Lqmvb>d&J9x!t>X4;5#rVXnl97~wOlUrU5FQ7q7 zI)ofOt5v8FC?mh_`c}{8AH#>0Ar^HWvFiB$syqyTDsYFQ_kh0&{~Qpjiv6s$UKFez zu+%l58Ze&j(Si%C9kvo#^=r{>xLtQ@_s^}K{9C4wC1NVS!0yBR;$a;AHpS+*=!=F| z5e4VXO_5TLSMB`Ejs2x5!yLoo1X zTw*YjGcMzw3X$Sz4K(X3OA^0uppVqa2ylci67G&X{w`rVXLlsEICcqMj# z@l zn|MhG0i>y8jg$oeg_buKjq%LGCueCo7_70LZ%?D=?d7;*L zHlk84HAiUNjtIJB=gw3ehxS&;P^k!IPGvS;Mpnx#Y4E6!TTsA z{fKorQV`d9;K&)h%!2h|u)a3<)h_0D>Yl5CpnMIVRy!3-zq zEF55dqRl4UeWfo*%g%5Ny1*SHgS&HL#;cI*p~0&MpVkw3HDbp0AAU|@x{>p|m64Zl z?Rbud_$LB|Dzme{00L+hst2rQjxI{Q6tpJI*x^e=x6P2z*Tm~y=u`zifps$!Zeod4 ztA`SYyYNBUyiZ)$k30gxS>E)eS0X*-(;yo{uCDaZiEwt!K3as&eX1a_1=FWF*h#!l zW7KEBTvV=i10}&?rHqd$5ndRcFP-j6I5Hzgy|idzIzRi|9btr@mM!wdo}J{~2u@v0 zXd;|N?X4IKW$b4ENoqAt+QZ-8=aml+5S=XuIH)MdavMNcwysq}LT`Ys+~sVS(7%7S za8~Qadn{Ii8=K+v3^U}i$)q!rn-Kv3F z5YFix-~N!l5>)ORiDs~7@6ZJ@c(O&rGHeg#A?F$zJkzqEKf8I6DxXkLy`>mK!+X^xaVT-uQ(GY`B^7-`DdU%4y(|wXw>jZ;>g=Q( zgsTpp*~0$Oi?E8YOkJjrqHab~XOm*)W`5+jyuM zy>v>0$a~-S>Dq1{AW`&|U5+^CTFfA0qHboF!&`;oZS7Lzs|5J3rOBqGYCpEmEtgl3 zS2FyZWea4i#0@syF=YJ9Vv_go>g;f8iAZKcR*qbx;rKPI1M#5*UG3A_+^1E0dUCqo zD5)C$egnw5Ln|B}i?6k*^2V>hoIRs6IkP2V=1|3)61&MK$CCDnC3bGHdku8^OWmHW zR@_H;6t!q3O}$~M7sMyXjwEAE*il`53#o!GV|5`#B4%#`cO^6?xEy|+9<|-@sdSZe zGS$|XsnwwH*XF(_6oG@!LKi}W1gB0&`?2BsaoB#?*SQL0B}CyJJ_r;pWXJ(76n*y! zrfVpWEt}~@_90%P{j$_V-~4IwvsfHaaQe6JD<{b7n_>D(8(}Rj(br%<0|~5CxT~$m zrM+dTCG91HB}vXs#xlb~u`7&AZmq9Gy9>iRXW{ zP6-nFYAx;(!Z)8^p8?`TjBP$2Gm3;u87)+d(ftz+SB$%c)0L)QYj-W&bTOyp<|M_s z#{Kf2EuV@1pYE&l|LOVGE?XU4FdFM|8>BRVp-OR9q1dnI=qcT+L8Io$&5l-3(u z9IwOj$9$1Y0O8j*Ei{CvjDYPz*v#>Q55lR5Oqtw2)zgHs5J%#Rx~Hds7a+{c`b_5+ z(2{-~_vo-H=?$Fv0Z0F&#r{qiaoBTzhK9mBSZEPALzb|=QzaR9wr9$oX9{` zg5KmmrqLvm`B5>U6F9kCsDhq2{Oo*sj@0UCxJB_t&95{Wc#ShyL>Ss>>VmtQHZ_+3 zr1W;oQ;y7ToA9TvN%oa#cedcY6djdUD zqN4j-lOTzRr-(PmlF}2JS>N;nFQo`KGX@bqfjjmcb|YRn^13yCXjSM?O#Q%-Suh^B zX5P|zHw7xNhnE^Oz|)_eoX5M6xcN*7C4=KZe+W)M1IzEk0}h_>gPtpb|L66TJ3WbrQwWBEo8V!=xj>uhIy9l zu@l=ExG86a7&`#lsquF+<4h@xB;`NIC0--O1_m=zYrgt_X0aVcdNIv zZJHCIodc>}T3b^-;0LRp(vfp0;{YoK-b0LgEJ-qMT<@ zVRR!xCySdVrb-x+6|bB)ap$(uqDy4sSNwR?gIim;`jZb@g?QUJn8=hktSUW8=1=)K z6HZ-^3%Y2hhSI43L;xc;lDZS#i$$7v1 zMfKOaSl)182MwYO6+tPC$oWlSa4P@kp%k#(pqH1O+^Tx}t1h7LCle<}9N^aLlT)I_ zORVM(QwUkf%-69;IlEM9bPRna{Wqf&XL%?oRI(O(L8cx6BL6(Ds)`}tXNp7w8 zTHXIp1yC@gT5>SoKCLp6vy0FGiLUmV!U|g>_tVZ0!bnV*kk@pic7isy<)G+6u*>A^ z!j&np99OTJdNfl^<)a&MtwtdyEk=t9YBa$jf)bHE_(wJ54OB{bquP(Phc-3!GvUan zL$HQFHKmbGhoBLrggS1XYGQ!*XX8I80GY z>)cQwu^?sBBJ7m+g?}!Ejtq`OqlWY|H478iqIaoP?jO9xaohOT#5Q9`b233l;B5 zs1^K%VkIZ>2|e#com-h9IPKCNC|GT3Hf4u&hpicjg=lG6Y920`69T$lkR;0uU-va? z6;<_{0Apg-2H6SA!NJ;zmylHVkQaAtR=d_ZTXrjwE~Pbg{l$&of#uo*B=mNrj>2&v zuOXiC_BzI)hVgIS;ZbLe1Cje}a}3Z|QbjG$`}ipF_@w;Rsw3G8$qPz% ztM_;~qRS14H<0SUU|Xasr54M&f7g>&4XUvFmle3wrU| zV@j?LopKgBg{w{Z)ro90vXuw!AKJOU)?u|5y0qkoXTGd}WWjZ!cLjDJ(Wc;u9aZ@7Src7W9hn zDYR4rk--8)OZ<$xo)s~|6^;Qta{VsoUxtl9p5D66nJ75-^Q18>`9qUM(E;*HTAb3_ zbswmy?ECD}s|zDKe2f4|hjILGzfGPlvFZ_do@k~S4mWTo2(0D7Gh`~d{orMCo?%1? zHmnswZOPD0;3=t_Ro7kiojJD}oBl_wtc{0`0 zpKeE)Pi)`aeEn5+_th^{@|p>DFa|TB^i!)-S zgMkCoaY$!u+@eKj!v9JM@RdQXp{t&cQy7;6ZA7Q+I*Yljt0O)p&Trs;i2rh}PGJ}7 z0Rp9WOqD&AZjiA;2^>_RxYDn!5*`G)!POU9+x@lw+lH>S&i9~rrgiAJb`)Y~T4%Q& zieQ23Rqg+8?E8ZhznbN*jwJNX3;m_Rd&HA0{-cUnZJan|M~(xBsKA#wn@d2N>}N#_ zcSBA`=_mdf4lm{qTDkjVlt9DB=Bd z9>Pq_cUSyWYf=#ZQK1AR&Q^y<4;%kvH9y8Ebcn1BGYa)j7zbPE=IUqY`;+0&#Eh<` z-j7DR0TYLv$}+z8>wRuZNg5onDjE6)J80`%aZ;a@9d8crY<%JYfp zqfv>he*!@cXH1utgj|CpqE9Muar$jtUlMOl+;^EG z0$Ltk@^NCQu`I3zri#7KxtGW8Q%sLSJ`MyctQhlF9^8o^a%3+>iB#|l)I)D9bgTCz zO{tyLr$B*TY0%_PRQkYHw7EXpitx;8bVsjGaNLxQF?M6m!mI=>*s?K51+WbNnlg{v z*c%<&u?U6Zsqc|0>OR3=?bH83iK>d5uW^^B^y_HxDG@{DLhI9N4Rgi4)WGa3^VK=% zR~UFcnY0?yS4UL`aG|DQ?6>P#t&w(ueNUefbI?o0gPXlC3K_PTvv_f)I4+4Wj$s&q zjHw7P6iU&E%OJ001!l9S2Pw957P(4?5&vnfzR6zZGIwriyYi>ftZ!;B?pRA4+0Or` zS-{}hh|bHugdKs`-{U~7j8D8YfT|48``(ZEWUm$B;hAU{C(UP|(-=MUWXE^Lcw29&W^urq2Hb(KdFa+z8n_z2xi(i{`6^)0&YdIP;WJ__V z9>xWJIN=~u*S7tx?BGg@zEgyNR3{p`hOo}Zc`z*=)0pRBj+>)zz}WmH;|>_maw_!} zq%ah@4#E?3o?4vSe>zec90iRT_*ECs>}F9<%)4g)vxIXl!T;u!VP9vyO{Z6I!+Pt@ z-K%-WUMB6CkHcMr2JSwo#MURxh4MBJCgQu{dhapVqJdx9@ADICUcDA>Ia^`*lfNRh zC0~d`I^h~trJk-o>U71ij?GgooXV$BCY!gT1y*(Koh|&@=yYi>55hP~Uq}+GQco*? z+edJ1->vIa?mhRcW*v<&J45Txe}sodj|m28xcmUUXC(h-!8^Uj3G4Z)qFH4j?(pKF z{6Xk4DJBua2qDB&!so^3Tifut-T3fRPQ|~L`d8hg!%dq{lj|(^meR+?Qi^U>xq#4F zpIH`jdq&jl{w3o(&&^KquUL&KAVGeMa5aa_-MKdQ5=n<_1OvfF3U!Qsf@`^*edVR= zZzCbSvZ_ne9A2t~arlh{yoxQgEcTVQR*DuKrU$2kz-D`>@Fhg4VNj6)kD0n9V%g zw7%R)d{W)u-sjYYIs!iWjG3k%zb<(=N$CNM; zIL4K0{ct-aX*J_qmU#$s%7=kQdM%LMDHzwknd{0Dd^kF``AERpoJXrKX_lp1}e7Km} z-!iuLqW7iI{%HoO&9rEdiWUviJ=>|Fdz$lX+E{gmSF}iA1d^$<)y9 zBUiE+NNsMG=Mw}|tHvev@uup>gh}g8zoGF8u>GEh%LHmJG&`nDR5scbPChMRd&T_I zPHKON+RL8v#%ch^G-inwSp~P1(K6yVTf)!u#?rj~wQSm% z`}OmM1TFPO8fbTye+5ud22Vf>BL1PCZabzU;Je1`Yb+tI6Bi60zL^@Q!qCW*+>b<) z=FiWfQ}=a?@k>tn=t=386p{pzrvODx2cT0#{>J%DxL4<5B_U=*o zPMW(}m;kn*P9r-Wzv?sPhwL*{DTAzRkX92>JJ(s^|KS2;(|Or#`Y1!?!>kaL&%k3~0mza#P0U1I?+-JBU=~BD-fg|L8hI2NS29uiVTm*cTd_LciIha67Q6QF@a^ zO87r8M@%6$aNbx5AF)%lHeQL>aKdBXFCYPGc+5r4nc}eSnj4#4!X(^pB9e31$!Y(d zuFX15Vd>6to%Ze^Vc&(+!#JZOofIuv2igEPT5bq^QexC&@x)FEjyxPdT6u{b$TP%1{E>q0*}0to45X)8Op zK+G{LKPv9>|M99t1!+MDzT08=WkcEk#KZ%0ROSOh6P2+(+huC6X>a1*3doFd2Qjb4 zcYlGx*}HD<(|9=*g~T%|d3lqH{NGkN`g>olx6Z-#{#4qS<@IJ|`jA;tMwyQ!j%%0Z zho0#ds2t98!rh5qrL&bE;+*0=RLLhA4dp9~5p6X4b)MpiOeFsdtT3K zs&bel)VtdL{j~Dp@u zV-Q(BQ?16;Vpjuq(W0AIV6E1h8%>8$aOYRTvS6G59l_Dc(8PA}Yz!VnOPbvTx4;0V zU*qeWAM;q(D!seWsQ+gJHZ5?~aPM9P6OS(Ew}$z%5UB3>$BnkhR$gdAoMlX+&#&Z3 zEv@?ul8zGTMDD*2rFn)9+NJW7uxQZ?B9z2?iKLBwypzotbN9+ba^AozQzDsDCCO%9 zF)p8mnUnaFQ)8(9BKJUHAv``BX?k-z)&z13Jd4D%^fp0*m4E*uo?9N43ARXa1!4D;iw{iO_2vl9%L(RSlLPRef!N_L7eu05OKuv47_5IV-qaqF}wa9%{taX zyUOb}fFnab8VK$Vxrd{B+K%P~2qbb&#dVRWMGbH(7zvnSxQZua#~bjx_Y>u;BwbAV z9Wft}17Sdw2gswUF?Hk%MDF@lfZ5d(sgBOh9KYfZx61K~s8O=~O|a2if|h-SARMQA=&5b+mz&SNbIS( zcvSvd;mN*vU8jvP5Vg4c_#kulJ&X#qH!QT9wKC(ydycly5DZ`YyFU&>0oU;Hh=H<` znHp^6<)Ga>+g2+_wX7B?2@0COnkRdi8(}OA>f}74(H7e7pQ^a#g;GLY;TjOPs_{%1 ziLohqv3Qt;0sk>R1bZMzQgAJdUCVbbW}A~jq>z(G@2L3YYah=HE8amBwuPDwoearE zdy`WRr&OTF4zK?Ed31s5QE)ldzL-(7LHA=lmS?*fTjpO^t?q3QHKDFU=7lw8@OH!^ z3Le5k3F|}CK0xR5Z#)*Xm4y1o=FMY>BVyWKUoz6i75#1%YEMS3uKsC<)Dh(t?Bv98 zqjD?pq5gbH{eLV#7d7M6!L#++FoWlT6#GOuLV z63U(sP~wyt_}0uJAFsF|d!bBu!31kHgpAsk<&apWk<%S>2nTXjNx615ZFDLLWKUST zDa|j!yKQTd5E~g{n2}aO7(J&Md7$Rl{d)bJlGv$*i=?^p4YvlH9_1qBM3j0YR5e@p zDvO=1s+{zZ*MvVlcZ$p#FgoI*+ooIWri}Rf-2{Ndg?_X#*?Zh7MqT&U7|2Svf*6^J zI^kYbS5X%wVWqEXjm?Q3$?c_o+2~X&p?{9DQ>2n&y3-XPkywku7|GpAD~#UmdUh`W zVLGmdCKfHfxPs{k6t$ORb44=(Qj%C*kRP3 zyF(Nz_U#u0L7ANQ7k$@?hw&qU>-u{dU3ngB?n?gaBoZXCH8}osoPrQCnU=H#S^S4z z`$MMnoNdK_x5I4U;s7K2saF-Y($GrCEL@|WGRG2`g$dq4ZfhIuc04!&&;|&=^CX&~ zPLWOtTBAdF>X{6cM%W2k%$-3XkPs5wBu%Xi`lJ}mwzKlfYdluOkSw&v@O>N{P>`aTg3{!Htg7DB?>ULZbXY^lamp1@xk9K^(q zao6$7DO189MJ(p>2p4>9W*=TNfp3XAC!Ki#aeXk`QgIUSyqlE94O3VT9?Axb~9)#7>8MfdO@-vF4TDmW`3qsfyv9CPKEX3+!?|$ zQ6+f=53B69Vm-rI1Mo2M4%Fzgf-P_?vOpYK4s$Yj~v>r69_ zCp2JxS0(_)C?k$-I1w-;y}4D_2$U@6&;~-hP`W%zoA;!$kZe_OLjQDiVRm@I9T2dS@{H z6MYd7OoIJ>AOW5WQmMhe!xdxK8&t3R%1xm`Klee6KF~PUJ9CPCDK=K$s>se#^w6XZ zB3Inx&h$P|lk+#+>QD{E69etPoHkUX^UlI3kc+{;8jw$mH^J4AQY!>G zdV72yYp;!Tc3 zEX#T7(S}&@6eADm*08@=Bu*ZE{0x;x1e;xo@K8WGyJ+-Q3F<{f*%eZdopQ4PJfX?& z{6Sp=B;peS6LZ;VY}XgDk~M4cV&mAir%B6<0~lmvbCHbaY&6xLTM zGt~auN(GU~PCPhAdy&a&eN(vyW4j8^D=2t@#bWa;=I9}BaB@+2Khs_)-sgStf6Q|m z1uxEhSw%mK9j?@{CQewP#6c=L75)iX^^`;tgf)cWC@I<)#v==|{yD$~V-BOJb{KD% z4%6+qFm%>^=XXNent1gPau__~!`1cA)p``rG@h^H7Brxqd)Wl2SFUUq=N&z)AVeXR z)qT>XCoIaWm;!YaID@rteq~JhW20)$QYRmY?IAVdc$CVZ&vxiTaFq^|Pu!)0s zt2GTK#BbHgU>Bj?1Xt9SSyZAdws87Ohz2&ozX?J_?KK}nr3CfM)2uyigq2M8l=dS8 z$YmXIcC%hmat)|ku6a(-(ea;iSglbTf2S9&c}C@bF@W9#8^ z?+y}kovj0f;V14eOn;b6EDA>zdZ$h~I+CJLBsCT)gToy1zX>yY#Woo;`o?uv#c)p# zwo33Tyl>L)E8%>G!?J%K9yo}0siP>`iYDT%smBYMo?#qdryR9e#dVL-iDAW{jO zh`ug)senkYzpg9bMA8nWFKCu>l~!9OKBwbtq7W8U;#2Ps^nVq422 z#axH-AY_xY6V+Q>U7!z+`GhPM3n6)p95QA0{;~DgeY}0sr1O^{UN9*&t^fV; zfal155N&(mBA$7S8*jS^{>~gJ|3n+7eM;S_mE)Zjs0silb#4u2pWA}`y5^dDN|lveE~NgKs))Rg5`P`c4D zEeZk~1m!mAaGZZzI9^y!#9p1@h%x+Sc5+nlvKKZnFZLG~Up#ozgq(uf|75;Ty2IY- z8h+|}Oq@d|SRDmQ1ez%?+%J+9o%F2N8k4j0Pa(Ie=jx|hvoQ8D2GagHx8AGzbS*!< z@6(&QiLZCzCI-plNxW;(iiE4|j{fMMrUI_(BQw+bnU?lttct`eU&QCQcHrG!J76-+ zig>1Qo}`H8wi#x>3mL9Z4w|U!Ap>UBp)E}m1#a#;r4(^QD!3s07tG0k2t*=gWOjn%^GzUCCNha zk?#Dfk2sk3tfo;@c`CZ|Lct6>i0yVCb@F*;dTf+Ew@6;LPny5dZBUXE(V~-B7g<>d=)>*mOkD9`@cHXx0ulHe^??;Gk)X2W=%$%@aH$Cq$o2V5S8TvuZ z`1e?=43}x6k;JK$7$%I-2U!e zNw-b(Toz{u<%M3uQ7!9r@hmG-wlNQXGj~~-aq0}vIf@BIZa4g0cFVNAn;vbp z=U2n2Q=XNG71XDWAZ@M$$9*Vc?4k2v=n0QBm#E;I-(SqM$Q+d z7Z?yAh<|h(7wfSd5qE(a1ZyK;8q6s*#9+>e;8Ll?M0ZcZXA^T6md=cZ0D2JlQ0MEd z%_${*A+>B9a&^2Rp-j%|tFffBBx8+x#gu2YP33b(AB1?iBTW$cqKbBpiUk;hqXwf? z*+(8@NXa!ka{=-g$v^li=i@yMbEL~0CY7i?*GE6Mb$7$+03Pp380B{aD7dCn@#xW( zzLa35xD;;LhvAEAo9dwXleo2!KHffcc|!mxl5rxt*us*k9)KNLhn(5@J1t6?z}q z6iS3>X)x)nYeZf^#KU|XyI%Cih0A}lq>_gYH>&#UuO-ar=Zw14z*z`?IGUT3M_+z} zf~UORf5`hxV+892u~ls4ZM8S1ZW*F8rOzmBXuoegVVAEMfYC=Zc3G$cm2eYhlMrc$ zA}k~^H3?P}ewGW{7ns7KPa2LZQ2flML`1c$UsnjR|LXtg)R^|3G$MTlm(@u zfJ^_OfXf9!ReH+RTJG$CARE0v-)0-dv7~7V!i!Rm`bEOcvy%GKzQ;#9l{`%^Jen$}JVJ zBZ=kTv3lLv`oq`1Z4fmckJwy)!Xd*hM!>IEKs%Xr)TA&|K*-%xzGfZxd^YW@;;2gFG5Ac6dYXM-|!+zjLP4V$JIN zd$r8jo-dM%zdTu7k4i7?@U|hY^11VDpecgETa9s zvB^3q|M*3LYUp?o^}d+L>7ROu6jEL-nz=kt*bJst2mZH@U4|kN3v`(-HE%)aCK)UBkI-A%yRVW2(FL(9>!HeLS~3Q}Vj_ zP(6om1iZ?&|EV@G)ppZW`-);{t_A#tfW9?2$!^u+9@r)BZHz`W_C(<=rY z)VE>|Xdwa{xtkk+2p^;{gWZ->6{cRpT zCoe}gbNEScZ9?V?OO~CHaPsbPt^5L7c zZk3fV&aUUD$^`!h>G-43I>bY*iw#@zpR!fSq#1(Fp{p)GV`Lk3_DQmoOCGJfdw(f} zc9cK<`||L#io(C)Q!xKe>YE%psXEmvFa^T_%csDEu}Bp{!9q&@=JIv)JPm2dGJ~sk zc;=so|8HA!n($CVp2L&f%hnFv@t1tro20?VcyJ zE(clyR4M*)$B5J5TIce$a)o#?3=&8{O!;rq?qc6loG)Mxqz`NGi1U zcFcvj7730^SoFfS=NroWx7vVIVKjciY&V_~eai@Jxa)kBG3ZAr+o}AsE~upC4gXNg zR~}Y@u-9Z3*C)wFl2%{Cu4?dn+Fff`nCfR=J{@lOUIz~RDiT;?B1N?cNZZxt{0T}; z^K@u@9-A+dGbUT8dgJGt`YVEld_vBHnJUn`qG|i3maCC%WZb;oX58KED1@Z}x;e`z zUj(#OKfN|@U}I}(2M;Z4x=pw&{Ja7%)$5UuEXJe&X{&q#J+Is!7`wtsR5*E^zc#2? zPm#DzMyG0j7O7u7aCqb@JzG!t*ty5&L87v3rKC;8pi;&DCK~bAL{E?ZrgxO{O0e_B z!T6i=>E3$ik-4(rMW6QF=zkX5{tog0=p%lCv&vWOD<|0oz^L^Dv zKabrC8nvM`-&f^V^Pf*qCwjhFDkMwRU4RnfX&+TGPO=k$42!a*;$u(Ex?f{|7xl!oOW@?7_E845Ulh1m&6iI$ii=cdh63hkW~8 zTYna^%w}iASY>>(zdLo@@L_d-(7UurD?1X2x@aV((P?oancDN<^KN}<%|gBW>P+hW z>ANpDW|zT@k(;EO&00H?iN<&|CWXJO7cZYV;GCQ1t(kxIm|SORS!M96KVI`rN)NQm zP`6km!%AN{U+>ZaG;Uv}Eb7vGuxCo|V5E!WoNx7`{Al#EzY z7nYn(UdT!1;>)*xX4x57$M*yRB$TBsBAlX`A7_mF=C=KJ~5J9w}L!9#(22q^C?KJp*~h>zwW%FFIye zeLOfNoeZXj4vy(XM0$%S$Gx z*YpnoJn;DZcmCXR-6L*quzN?UQ9d_ImJ@9W+;i#Fk+E!a50y7rldMftp8ohWQd! z#!I*kE%Sr=uOXZbKfT}CS@z7-VgdKhD$HUQg<#03ZNKL_t&`b=&Qx?T7uv;+B$au(tc#f}!jJwaQvTqMu5|H&)qB?^rwpnKwC}67&kjjPHxEUEyfdOWxAi2OZI48vTRwQ% zF@rN{{Yw*(U@9EQ#xt4V8~cCr#=nv<*CsQ9xm9CJ;O@((4X+P`rf7?Jvg#Twz;jX- zyeJe7|2r8-J^B8M_x|0|g8$G9JLzh`I$jx!mM61xjy$yg;JVmPJ430#Nog{XOhpTyCQ{kc_n0{9u7@u_=9~X>X7^OKqi?^wYt;MQp785tV=eVOlQBIOb(y>z!k@SZ~t%H_>eRBf)3H?EjwY;v4d0T%w%<7O3K6| zk#I*MwPIx=G3PbW)kwSR#7#;6d>j9jTNYZ#JP9frE9j=DNq9eaCffxLHNn?7f#9w`DXGI3pYi ze>NTs4b#&I{GuagN4hFbgI#FR1D8zu)4i8XyZ*qlZhitim$cVal@tt-7dyH5)cprX zhK~7eV?+4x){aD*`cgC=3uhzg?9|y8OkKQ3!{|#D_vu(vI>r1P<@%!>(wFbOWZF$j z+E=z5-E!CBLW4ohHZ-HpCJ!G}*KmpUTeMWBu;xiOKlbd)PkvHm&eR_Ms%=2- zbK8hG{@v?GUD{X|-CKTPrH(3vWCL}}J35b7wim7syb(JQ{iNDWe9aE+B@rYx*?_x)}VX698Uk8{2NRh;bvBM z%>3LKz2V-5I20}as}F{<|Iv&rna!kcYKq6crjIE$P#NhJZJh~~krdKx9mz~Om5vW- ztZNR3Bfq@k!s9mT>i87qBbIlrqblyf%Z~fhkWloV`grvF8W^_FOO!g5mDCHKgm_GF zZEf%DR2^CIGpMOP`WZQcyC1%6+M!4$FSW`;`|2n^$e!Q4WJfC9uE8as`WmE^dO7)s z1a4@~eR)FP?V=VhG-^cSrTb(_bmU`R6aQ;Dtipo$75fYvmL*@ zH4&B(H>iy??+E_*uNAHe?!avrQ(N1dH2CY6)(o;g= zr0Pwyb|yMkwkI>{91V^2(Y@p0@Ne(Eb4O(>*}IA#^SLX@4xie(~H&R z;w$~ZBYCqud6?bAA3Q`gNDY?EffXHz=iI6)*Y>XWV&e~9e!^byNZ^i!y2zUAt-;+$&R%m}rc#arE#98_Hsdx} zU%J{u-6AAl2l3^t9UrV{Z{K8ieckWn@Q#*F8(P~F3H3$#PfF+H(?Uk8)FJvv=}YU? z4f^5iZ%nPX!Jvy7>BUw*=}d4p$u&gzb;2V(n9~N!+Y-;6eeGXX%$_|v?8(xVb)x>Z z1viuHMsKb6r(5gjV4%Gt$$4$<+v=YN%LLr%gvLOto7Of)!rSZQLVVfE&N^*TYGtfB zvB>?%Hq|EhlY04pdf7x=dTr3;p;H~j4T#U^eG~lOeBL3$CJnB;sv#Qwfd&cmaG8zfqJZ4nGZyvey*gXfPU>SMd8QQhbv zMdCwTZ&Ns{juezYp}J@^SQm>1qq4f8O?4WfBF}%YY~=zJ`sQORolf;0ZJg$}6LaU( zgCT82aA>_r9sf)vJ6N4ApwTtZ)DVj>E0muIDTz)!1j5=!PJghxJtGD0Hzc0@BIUXc zL`O0b&vv5XzDtk&vXp$Q%9$kRl~JQ;_7@IFmZ**l$v1^#T$}3SagD5ux~b*bbe5Ce zFc1v?{CD4_wBKBrP1S*GR?8U6{L)rge|5%*m*Z7=P;AEVi{2DWrkY7Dh?q~>WSj%r8T3W)$j>JRQ)_E2__`mEE-i1|M&%D-%L*SdJPQO=sVL>iT9+v>Sx$>pzzm0)wrti<(a zd}gR-(os4Z3bn~Kg;iEeI?*;)HpyhJL%JI}sQ!>lF!-}o!W#nv z;A(HhH8wU6|=^!0|U>J2s8>ze5<*7d;`Uy8ji zQGy$6v$5-p+RK@!EE{S>%cwo&c-``%J`xFz)Dl;w6#PK(f0KiHO!oPToUn}B^@xmG zTYK)>(nozbJOqPzVq86$1 z|5gokzGf`nRAbGMf!!g#6Y|6nF%Vjz*@7Bx^pJ-5*V*{z3p$okl{Sg9PL{MHEz>*1 zmO^e%Ta&eh;DNHQg3;c!eHCX)`Pf5I_8|8~#AR~+B0P03$K@9QtaYQ@@Md0;8HU2Su> zba1gc&o|}3zpF{pZK`*v+Cl?RHX@$b3mp{B2EIG{|E7*$aPk}!q9Tv$NSRD}H;Jhw zGN%@TK@FS`Gr=YjNe8c}t~=B|*UBmXM*j8*9Ipm%u@Z8SwTV-@h-cVI(v2| zI8|LO6xfY3(K(%JGNPHHid$bg8>V^#nqjMtvOgVUIsT3&5n=;t)8HQpMr8XLoReZd z&1}(q9qQTk9M1{2h63Bm5=JvAVs^kpNDRN8%qHC#dplBUGGMnh+0lON?n*5CWFTYj zC=ifAJ%3EgJtv==+MAWb^8(O#|va{i{V=g0;awEcBn}Clh7L~!}$~0Gc%Mq zhn(^2cg)_Kb_jn)KI16aj~h5OJ7A)=RI864wk6Z2XyBeMpYrWN`TzxDOReNg@ z72%tMg_;y!P~D8mB=4gu2jEd$xIVDpgB5LG(X1=1k@PES%xRkXtDorK{H}`zJCdpE zmn?1lwfbd5FHV9|K&CB~is{YSYb~>8CFqngoYZv-U#c5BX!K5Uq@3C1n|PdAT80po zVpB4Wt5&vl-rkwbz7>wJ{~I3G5Q!cx<({cDO-dt62x;VYTcTsKb=l37|37w2%l%nD zb(H%rKI$M%pe~S&X{nt`$jBn<2Am(gTOIw%w>nooIbFw8Ol>T?8i$8ZjzmKjtDZ(J ztFn?0LpUJb9@$Ij9>~oMitHy_L3vj?m}C#}XDsCY+R}ReB9gzA2|?Lxbm5Zmxf}g* zyk<&EOX^P-9le7((n%a77s+2S;4oFrYR^DrgtLu2Ff#drZ9|(iOWmK@keVi|whC$n z^xI|cq-K$JQNV5YB<{Xo>MnJ$*pIZAK3G0Cp>7#dT5Ts~9+d;UtTmK*todB`M~Yk( zQ`O;5+VQ9(>f({h)W%~vQ|XY5I-t6C9~O`Atwgs6yua1KjLr=LmmE7r-g6tAnp;G| zkb){BsF$YiYW8$hXGh`>ote^2;fl%IKGSM;XW8`n(sNqgN#>}3u{DwU*MZ-Q*p)7e1sovoMo zvj(K&RXaIF9dHJ3mIF`8GK2SDI_-Q-zNA{6S_p+A+CBhM(qTe^dAWxc`!AF^%%)Nx{0seDp>` zm8+$Ok)FSf#@kY6CoIv`c`wxDd1$*frqwtRoi~=#OOF(_F~CzMeZc@9aIvyzcYB}fEM zv6@V*H!`;c6Oi%ZboG=z@-69Qv}WVHLC5KK5#eMuJBWy8ovC$lXvXLMeTuIx-mq=;sqI$N9m-G4qv zeeJ08Z+(lhDVOUA&H84|%EH%e_g#9-$ht`Q2kME;V$)2f5{dM_`a12dtFD^MNEMtv zV~XQ89LVgfrT7M#eb8h$wREF;qIB@AoZB}JJolD4lbYjcF zsA|$o%l)(SZclag?A*)XNGLKvqu%Hw2NWcoX2s*2ct5fb}jGjI=p3;bJmVE-;cBRtX4f2OdUO0j9dt%;I_#tR>{rUj=mix~0olkV zYDCR~+~fi$B)>>W;S2UX>o?Bnx{og&%r(%6NsZAdE!TB)B(rzO&7Q9OP3*0yad^Ya zT+kUiFLUMxbPiQV#)6T|bnO^yFQ>)m9?+7H+1a1k+uP5k!_HBM3%vP8DxlHbZJTlB zMT4AEC>!~`CRwM*05po~D2%>ZjD{oQub;V3-HEdv>2RGE%dLX+nMo@y1j!xmxiX?m z*`e8ic@cGh#61^HJ4*YfpVIQ)#p~T8vj?(BQJUpQC~bB(xrh20F(S7|T%Spuq9JBi z8R!w!9B?Jy$-k!kGg|z(=N+vUZTfQ>&jib)L(KhN5X-+_tc>@8`bcc&5D)GW;=}<#S${fmXDm8t4B=i zCf}ep;g0!?&T8`qkmhUVbgGR0X?WuEb8Li9hH1{LPdzm{ebm+IgJuMN|G*{3+@>si zJ*J}NblrYqVB(NS6NAojdzqtfMPyE5y|+5Hjt!1)rsXD%D673LHEedFhLmS0&}}nJ zn&KQqC>$7uEy*VIBpY-y#&l-KTW5ZH)KM*WzDs)N5Hyp4eP`B-(qC|f+L`pGkGn27V&}n)4SUEZC|1iqwSifg##cuXl!{p+a98%9#X4)~Dio>hbRmnSNCp`p}@I zZ*=&xwVG&YqOxC0*8N4i>^ZjtX{&)b<8>lx38KzHFrrzEjyBi3iJ07)n=NG0f0xM4 z@lI@+ot!8IdfAjuB!z1WK(kUg-e{C3w#FZrV8YH>Xbncun zyK5wp?$q|way4cXhw|jD^=BmMk(u{MFn-CfP;*N*dH)4RZyc?U9D!>T7YUUgSI2s- zBa`{|;ry`^5e|f_Bk|;2nemFj)Vn&HtgVSqgBp}lbgU&XxUqdiTu)c!bBW9Qu`0AM zZd1o*e~>qnZuKou;_t8DjzbMF8@=P(pWA5IdJSKdv#sY4hxkuxpLLFA>?38wleCOt z>COIU6NWtZs_Ee=Q?5$>?)yh=w_a21L<&-eNob}X*K4Wg^d7~%P+PL!YoY1W=O+1L za7cah^NrD{wt7-&4P;SXdUs}$=N;a1+Z&FLGiK#-Z0Li*;m|_O4o7zA zkfzg6HXt8e$EsI{TIcB6lDb;6z)b^z9o?9k4d@R{g`;v>+8s@O?;tr{o-dpEOASi|z|D_Uv_TG&hQRB~)|~Z!%`r|D=LF)CY8YjqN8L&~1nX zIy+N0|LvuPw;iZL=Sct4r$3SmSpU%!7SjA-IWMLqvkWAdWiS^lOzCAm0UZ0BNwgiS zsq{ASKxCZus_DA!ue52~Z9%GitCsM1am^zv&{?J!JG&~0h(60b9peR26S9n4Mx2c7;W^>M5$M^kihlL*Y? zboPQ7M1pLKXd7<0UhMzy{)>(t@j++n9UPg%d~vXRNhoAz&(lgyk>20=9mpS0NHQHe z65F))9`RsE!=zj5urJGA?K5tV&h6vU?@76eY!-Bv&ZG|7aLDd-Mx|jtM;**{fE%Z1 zkB?(pZlixyRyS+rRtwGbRo8HKdLT6z%g#5F+3*4v;r*n;f&J@4p$S?BGN$W&Ab;~U z`PA|iiT8eg{!tTj9xW@U>W-?oa=frs=Q>^yY>(Clw^H}w^JQmrq)|AzBHQWY_o}Jr zVjqpj>9&fe4$TUpI&I*}P&Ls+V30F-BASNP>l|%pH|-traxW&$zo^b}etoR|yK-KW zv|XYXv3!IOYPxE=KHpwA$`v%IKKyy*+k&rtv`o`r!&kZ4D?a`0TePpZPh{A{@s9td zSY#*VpX~N3RUb=c4Wf@F+LMd!)Sx9jJKmyU9x!Ki#oR_{9Y6K5aacJyYNp&xhi z!lkW$J^sQwK7b83!_wAQw=8v&z`eIn*VDP@Av&_V19}j6K^Aq#rqZcdE&8CyW|6BE z=gmYlz>MZShxrhRIQgYOsZKlG%^XaZ@^clMI z21@ctPIQ;P{rn>~AJN#b1p_3bLOpL5!=d*y$8e4pJ?3at8$@HVopcB!u&k{kqf;3g zU}ah1(35N#y1K-xYj?}4YqMT5;z(OsZh7~;A5*&uL{wv_3gFm zBsw=bsV)|d$suvC$%nj8B=XGdFV0{2DMh?CY>C^5Rl$4(RWBa#yn6edHB>y0q9Yx6*M@QE4wk#0P8~fdc)z_bvo7w4a8;7C+@&C(3ZLRZ~bl%G8 zoh;-Frkh$D$4BJ6l7GDL$OH5u?zr4`a3Cm$t$eBb-+c9@n@Kx`)ryt_gd^eL-u2OF z+-*mA$GfDn?S>3$x>SK9&pZ=g zLtJxI&2}RjC6ao98Vu@dqimawG~9(VyZT*iCy4JgYq6MI7tLrBL;pI7>cL>#|Ee*V z$|S=j@>&^1uCis#vNNnKhL33c80E?-u?5Cv>|Et@UArzU#_{voiE7V{ zX&7Y>YGmRt{XZ8kYkQaUGp%8~%-Ro~= znR-e5W-vNh-D9upi_Oe57&FW7P#wyvA4Cljku8@+xwFB+XJ}7)ruNihvFNFqc@5DrKB2)(e^oQHSjBz8`}Z9H z03ZNKL_t(vdwWS%5rN#kn&VqP6V)qtH|iV;+pj8iBRS+9q@#}&)D8MVKT4zP<|>U_Wj8D1GIfBhuDOeM#C`?*-nGpK>!Fd%@zRD{x_&rnlR{=x{Yb zgYvq2^d??c=vBUZIqD)S{9!3hmRMbU(`+UOj*EE9W&IV3Wldf-K^5HjaHD?bzia-azAiOJd(%5;<)D;7o%v_`F!lbjMB-UjOU?{duh6@rzhiHQee34u zR&*r4s2BWmbrdqHtvPr4NBuRW6_!36Nn}*+<~j>}e$$}3+wZ;bm;+p1`4-2M${Vnb z*X@mnL3K9Q@Hk$lSec^~`>$rbvDM?Qb9|m|au-R*9~zmdR*`P^(D^w{S?EbRimLot zcK_}5CzdQzNTc8k=Nv4*y#(20=#ZhBg-&+H;AYA)hR*a#eG29}GDoDy6Yj{YuTM`0 zGc%Oqbe#o0*=~^GaTbf0R0ZK?01}YS&G9%jU}$Jly}p?Ze)#|Gop+Q~^|i;(xqa?5 z=|v>iKtYTYHJC()7c7WiB+djRu9ddTO4iaB6BP8B+?m9OZ6%tRqUI$UBSm;>6cu9$ zXr5gxL{MTVf`S4v%rNb?bKmFtJNHaun3>C@@W(lD=iG9BZU6Rfx8L6Ths=<78h1In z*S}uh^9V%Ma@)e(pbOM+D~MKAugE-c)4X%(@uhN`qPv(19GMJiv>8|ioq&Ya0 zeh+SYZnT;t-$T2M@S&n`$jL045cA>MPnmKg@3&;0Q`Rx{vwaiZ`War58V5ZZ)m~u2 zZSvh@HnmC5yMlo$9Tf%1mymTvBQ-K2D}Z36`Q3Q$y7Xo}3(@M(hKRcenIvU4MOK)DirW*`KoSfAqhJNCj5;@cP5n z)X*A2XeM6vT~fm5xuyuOQ6psM@sfwM2a>Vxa`emCE30DATO6x3AKLK@5XHnfYfkRv z#A*d6OvS&k>kWvKU}y}BZw!W6Q#HG~+?~I&A(USvZi}d;F(x|dQZ!r9Z8Gj}XJ`Kx zkY6^ove<6BR;AnbnL60XVHG(_EfCm}t#2}tK}BMX+ky5E7w{g$XJgH7aygE?6GZ&% z)Y9r%L$Nh}7a=!$yg+DVO}(&I8r6kA!nwiO;lM zmrAA;{e^V$PBM`CwVpLLMyZYgQ^!lVH{KmgWoI*q7ZR;D5CKaJp3r$G$2_LX ziX&&%HRk41#g$s<8UxSf`Rzfv*y(GSrE!sbl)M&2q38+H_aN|240|m^ZU?66o`r&J z?(|43SjPTJTrl$EJPYWNLEcy{n`;;S_k7z+ zk(80G#!GtDrFr9h9TeCQUauX&P;}x7e4S?>J?Jz7oR|Z%ZvkDAAZEQ_G&L9%L_G&H zX)D^{UEU@p$yb}1iRFLi=LHWgnsYzG+zjCSWdsBwRglN<4dreVvDd=dDh~xSKg6uJ zt`eVQ2_vE=hu!58R00y@Ef5QyV1y4AEjBSUQ+r#J6EH+6Z%sI$|DJs}J~S{2q#5tJ zjj)w()K6<`x;e8^g*|I4quv7v4{zYnr@ z8>E3+DEcaGBtuj{O+J&k2OPbt;=6Ynj|Q*)g6~}*U7>ZbLobJnCVH=;l90rNa{c2a zU#F|nh=|fAl=3mWpH4bIQ{airC~s`pojh~MghsJu#%9U^h;V6f{A(<}DQx!0WFtuk zKR)LtGv}j$fVd!I2S(BSUQsG{-2N6(Rm$g>K;8u!_O~SOKQFP&?BuIshkEVb`8P9P zwv$Cw4U9CpH}j=_ArzMO8(2Pox*Jyz(|<60!IdyH>9RZ1{bJ#%O!OJ3|026=23Y~U zhnW8*q0DqLq2#lK^|rrmmO3j-V9qe6bE5hMM1e!VCLpAVV`mHl@o?7ED@hf*w9Ey< zM|PE*H3UIuGnwBXCs^W{F(U`wM*Q$)80y*J3>Kj-^PpwBHh25NM{b^b%dD%H?yy|V z!@#JwefCa!A-U5;nDdb`ed%KT$$q*Cp?AGLA7JKVfWTM=A@${S9#PJO3_}g54m=6p z{3>pmhB>>Yj2z6Q|Df{NIJ16Oa*>zvMXV4L)=YxX_(V9kqJ1A|rD7h*?1nT_9hexJ#lkQI0;;}?o8twyIa4j9wZXk3Xd%+3XbNh zx8muPx!AFZg?s_7Kd~{v@^6rKDY@|7AtVn1hM{Ze{Owt@Qw4gU22Fa!m z6C4%F1w;V0$v2yO2d1EM8we+zghzqY)*;0MuA@wG3ezc*>BJ+E5NrDN#-`@{=9ko7 zHQxKnBVr`w(AjU}vjGXAlFot_nHRP+IW2}rc=Kh563k0S7ilj6SOoGCs0kmJDxVLQ z5YxH>|Lb)~aJS-8RYJZTNv4G528zN?U?{)u5F88aa(jY0fjas_NR<1)aBgsYJvPpd z%?`iW1Z)Qj5qYHSRfKcn!Z}{(%L|Vl1U%c?O+5!NS}gHLK&n5LW#0*y0~rxTu+FiK zE8JTAgLPyKUz-s9u~wxRQ0kHEtHzYZW8YNkVz!vbO#iSgzH;v6KR5dqp7iR$pu8kL z3JrK%3XGy_wzLn{z{ZT7k>m8w5+PM60?Z6`1hmN5Wg zW;mJ@ux1=(cK<5-4(zc)gg!dWV^*Ok0nCX`beYpoba!IEcM%6n*2HL%l%E!}!WPwoG;R`t@OB910^`i6 zK#WT4kW!Dl*P63HVmQ8t?mc*Itg87+8GUwYC@33}UIsN(X&z&x64wWfjSp|*NY%9HQJ!eB4Gze&M@!MnptuAHIOv>;64n%p0+|MD?-pPZy+!WvYgNLX1(TBT_8mx-uq9@gG9IV`-<+`IS6 zfIA0MV?&Gk|D0}p4m+xmNwM`B`%pHWU5qdGR+Z75t6#mo7_heq6bZ#9GVVA+gz7N% zU5M|?ySHw0O&H9%{ETJ0R0h1cCye_kZc8&n$OKpQ*TYZsn&~jy5=|S7Uu8`_z}my>r%T?Ozvk9U7wUWUOY=~z(=3nxc^{BDX~T+|$OQ|Q zy+y+hzyJ-CH8oKieQceR!%^ZD;WRVG3K}id9Whm(Z4dzU&mJL9g382dHpdioG%Nn8 zqhrI<$(9}FxDF-=x|kI)BiDm*8)4uI5tcg+NguOj_tFsUJaAP{`zx5vDPUs=(wXKb zzw>Fo&UTy$oX|!M?kzI18^Yq)foDOUxlFq90B0o~GpX|kIo@>cf~7y<;ndjn4d{J^ zWj%7^#S@M$FTD=O?~kxI|7YIbe87f9=c2v&T*E^NUZ}|_7*J!oG{A=V0uqis3Wgz^ zjYwX%Uz&Jumls~T8>CemY<)F~U;+ap__}^2Y+NCcNV>#kchB^NOa3gBs;KBJP0Ey0 zX%-iLC{M5m$t4>ohbGwM4noxq&JGt9zxS(}sn>rev05s0>s66|_wbLa#t$1Z=qlQ> zA=2N{{8mG>v^eyL$0Wjm+;Qy70AdMApf|08^0Mb=u_;d*66p7<2Wu|=;q1g~n^ms; z^~WDqO&U>h7!lvr&xf7-R# ztoH-^l^hSA9>=I<3KR2J!uLJ=BJQc?y^Qg}dPwd_~YbOH5 ztE$V@HoLAB-Zz;(xCyD$A^ta|Gw)omVEHymxSj@;E^ff+GVB#L=@}y#fNbET-d+sj zd2!dCy-O0etZr0GHD8+=n>2dRgd?h?b`3L|ZXK%4;I?2|AqVpQk|>qQ`!Z zG>EF>Pt%p*VhoU$Q3I`rw1dr;K}>HYnsj7;XGFX__VM(~@2fZ72>iHe(VWYR zizADW-|Q!Eo_8!Z;-6LiV0qp{Lsgo~2!J(JRgo!^Llb0|fPv&N1=;K?5&n|lb$ ztzUpUUa{#@NR)^Fp>ZQ)vj~a6%DuC(cCjUj}dx&@&*^5uCZ1Al5LQ zAfMve%0qBuz|Cv5V=~QDP_)!h_{4@jI_G!glk%kADL3deotE5SK-7mVn_UTA;+4 zo7n&RUtM_r%2!)XgrKc9YK$t06qReHm8(cp7y?Xq^>_$Fsk|)Ae8Ho+%fJ?aC}r9Co_9nj zIR2@heB}Wa^+se_3=(#~;%IOpE?O56bu>a{0XfBNvtfvBe+M}?u2U3}?5h=rkYvUq zV*UHY2jF&as#)^Do%V>F%+``j*GP|Rk?@h^(hrj0=ZvvO-UhH|@7dRs<~dGd2;CYE zmCSm4(cG2H-)n?Ef0&H~_D7r`2@G?ejzuF^gSy8^r3Ew+)KO`zrz)?`CmWLXbkNNr zL9^rKnfdV69ScB?#_v|Rl8>-UZ;rK$HdF&U=Y zJzGw~N+mc{SySWT_g>(#1p!N^zKH8799cp^DO>p`@N}eYA_ij2usj=NnUyTPyMPJ=PYuZ&}3Y~$O)QZPs<6C z%_&M>=P~0|AmTP=Tv7_j$erP+>P6TOt-WNDMg=MwSQfv5qf53V5{p-(vMF-O5J9zb zD3@zvt+pRuZ>7KUnM;ZOX}f@;lAI{9{QJCaO(2CyZQ=<-$kuEaXIJnc=mfNfQEE#Uc^lQ&H|kq5AL_F>?j&BM*z50v zov|NU8T7bf(%=L^%w`bw=o5hf8&BJ9|;BT!bt0T5m*1|$=b=C*x(CddIZiEOXsC85!J zo|iNu1qtnBak+l7`9d-o<<^7e%a`wc`j)Re10lOmOoJsM(?Jl%?l5*zHUwu!f>A?Z zRaE9b`XpQ=!Ic#iTJl08(++s>d6^Vv&Pn!wdQo2<7xEdc4m_~1 z<)4S!*LjkFN!V`HHMA^Qn+?2wfr^eZhoX+v1~kIC4qnI!fBAeo__Yv*zmI-yMZ&M6 zYRqw=kthkrNLyx7QfN{U)k}!=?-kLRrx#gChuvLX*M%{%dzic4_wZ7k9VSL!^zv#| z;eqLM;FEU3wK0O^MjzNEC8wp53b&vdW@NZ3`t{z-W_CyG-!h^|)ODr~k zKr((^xdZf^7OkH1E>AisFAj``3mz-$2II|OhhtMOxDopOo}TUO385$>5iN27JjrPf zS3pskw0P;OkW~r1<>fBh@3AbHllc8uzK`H&on#Cd2sP^}Nahe{6OmVjKz8SI>h-rb zcRAsc;Va8nn&|@z$E2o}qq)IW2*F)5u3h%F*)h+<&8>Hhx#20$0HTu|$l$h;1d)d# zEIZs&`}bt;&_mC*-|#Y}I{uM;Om@SwuUh z*GULR$1}-sqTG_?PBVYQZ73(dT%k0bUyvx`~ZsoX&VoGv?Wn=^5_fN2Seh*j+SIq`dFM8Vc z2>7D;k_{E-e|K|?JRey5v_DdhK49FmJQGq|L05g&XCgdK1Y1Ud+S(IP;DjXK4{~wW z1B)C$J;EN)luMMF2_cz}B)K_VamYegqoAncpXL$WO8?qx6ST^K2) zl~fAv6XwxLr(RMYR$$^%{d&*uN|74WMrAf_Jt-n6vm=vY=n20o;2{O0W9qZXk82wt zyvgS|b1$W{`$Wj=n$lMiaCJQbKvOBH?l5zNC5#z-%I3;gTSVR2XR)p-Ak1hVI)uXP zWsnr!WJE;x|#bUg0=VCNzw@>K>9n zx|?I&^oUd*Utpo@95+jnC}Dxx9pA92wswn(d3|fGf(bVHgW<9>7B2l=%5lEP8h??# z=Cp7itb#{Sgj}{OO}yYp?(ZnS(3D3*MaUEa=Hu=r1q*(%ifXPB1^bHr&Mktpy82|ij_5D%3N!i}Peq=Sgol5X<$ z=4^V}^aaaS=|{8^Pau$#Gm&%N#z=}}d_|I2SWKHf-0{)AFU(*3G{VVOSqz&*>(-{0o#>jj`nunzXj1wtIUS`!4hN+pA}lV5~Zi zY;0^Kg+dTWjz3`4r!%j~PZc46wPc8d*^ESsc)M|T{X7`Sx51aUATC)L0)c`-9pow^ zrqFCfHVThymPs`OS^9`Ehe0c)(!aguU$gWF zF#j3g$Vw#h2KE5dPjHuTVVFK^SjTH2P&Ah7yHj|c48O9<`D2}fAJd*J`kiL{HUn(ZyiZh*}+80JezU3w|lf)Tk z%woPx>?ZvS_xbJlc>VHueSUpEK96^N4nhO}JX!kPF(R-(&^~{|NsYxovkdUYfO5?< zcjDVE8AsK?3JK{}Z^Ih@HDOom0HPgjRSP`TGwLitFW7O!D-LnT^%VFdH-dS+mM<(We5tmp#o~ zZRicL2e)BlVro8V`YOk$6ZJXFnA2@7tndU@2Z8tQGDabQ)sgU>YC)}}%r*+fqdD|5 zj?`LNQ$m7{CPt0M$hqpBM}D~bva+i;#L8ER3gq94JAUh7SqAb*a$=ufA$IEsS8Tlp7Eex*& z^MUkg1zQpk2r}sB>Prt;OLW(Q4S%T*DH1@M z&ptmTnDtWYp~IqatD{-_@kXL2iu6qu|Brw%n2HlQ(U+at7VvL zL-wv#Q2VLi*to_C!>2h7Wd+7P9RHrE(>l)jY|IPWe-e>P%cc_+g*J|pNCDE^Hka@- z5D+HXtbJN-Rxo-Xo?Y`ePC}noSt2<_0|dXE`Xz@vl!1SV4zdWNzEsB?!LZUwRt~cWM`EBd*{=W?qL`53l0Q#bWCJJom58_nyvQxYdtw9?*CvR5y!K!RvO>O zH#=?`^C?qnbD(~0o^u@N?-{<|@nu#9dk>3Fv!q+wKbS4oF%f9jns^U+jnzqY$=KvMclPQoX;crlZw$AC{GYe>m7I^nT_RrSroH!DO0g?{EK)` zo!?TiT3w6CqMioa`sk@PB<~)VtZC>fG*5gdDV?#bk7mF}%ngO)*~3a5knq-!m6-*dSs#@jkH;a!{vG|)?D9Rk__394Y~0U?99GHPT|f1XPxWJ@W(MV*R}`zM!FLkde%0<|ro2$TT?c1F^K5g$X6i;oXdi+n+WD z2mB{8!CXa>NE}uQOqn!$6@_ADCL%xt@T#g%uTKG7q*R8uxpG}B)tEy| zwq2U4P%$b^W>IHl+ktvxdq$b4KGS(=nyQip*fKzthZJm^iE7zLCuUIsN2RH(4nS_p za)9oXk!=&RQazK_ci1=6w^9UXN2b)@fr=#0wk_TvTP9pqPbVdBr3A1VSZW9uCW+a$ zNF9LqG-m|bSlGVwepRWBRnKltYRSI=kz%)HyVN!EH&eF?2kK21ek=U{KR}BF2x+(R+If1|Uq*VIk9wf%P-oNs16Z|v A%m4rY diff --git a/imagebuilder/service.py b/imagebuilder/service.py deleted file mode 100644 index 32fd184..0000000 --- a/imagebuilder/service.py +++ /dev/null @@ -1,44 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import sys -from oslo.config import cfg -from imagebuilder.openstack.common import log -from imagebuilder.openstack.common import gettextutils - - -cfg.CONF.register_opts([ - cfg.StrOpt('host', - default='0.0.0.0', - help='host address for imagebuilder REST API'), - cfg.IntOpt('port', - default=8080, - help='port to listen to for imagebuilder REST API'), - cfg.StrOpt('persistence_backend', - default='SQLAlchemy', - help='data manager to use: SQLAlchemy, Mongo') -]) - -def prepare_service(argv=None): - gettextutils.install('imagebuilder') - cfg.set_defaults(log.log_opts, - default_log_levels=['sqlalchemy=WARN', - 'eventlet.wsgi.server=WARN' - ]) - if argv is None: - argv = sys.argv - cfg.CONF(argv[1:], project='imagebuilder') - log.setup('imagebuilder') diff --git a/install_scripts/fedora-17-jeos.ks b/install_scripts/fedora-17-jeos.ks deleted file mode 100644 index 177c7ad..0000000 --- a/install_scripts/fedora-17-jeos.ks +++ /dev/null @@ -1,35 +0,0 @@ -url --url=http://mirrors.kernel.org/fedora/releases/17/Fedora/x86_64/os/ -# Without the Everything repo, we cannot install cloud-init -#repo --name="fedora-everything" --baseurl=http://mirrors.kernel.org/fedora/releases/17/Everything/x86_64/os/ -repo --name="fedora-everything" --mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=fedora-17&arch=x86_64 -install -graphical -vnc --password=${adminpw} -keyboard us -lang en_US.UTF-8 -skipx -network --device eth0 --bootproto dhcp -rootpw ${adminpw} -firewall --disabled -authconfig --enableshadow --enablemd5 -selinux --enforcing -timezone --utc America/New_York -bootloader --location=mbr -zerombr -clearpart --all --drives=vda - -part biosboot --fstype=biosboot --size=1 --ondisk=vda -part /boot --fstype ext4 --size=200 --ondisk=vda -part pv.2 --size=1 --grow --ondisk=vda -volgroup VolGroup00 --pesize=32768 pv.2 -logvol swap --fstype swap --name=LogVol01 --vgname=VolGroup00 --size=768 --grow --maxsize=1536 -logvol / --fstype ext4 --name=LogVol00 --vgname=VolGroup00 --size=1024 --grow -poweroff - -bootloader --location=mbr --timeout=5 --append="rhgb quiet" - -%packages -@base -cloud-init - -%end diff --git a/install_scripts/fedora-18-jeos-DVD.ks b/install_scripts/fedora-18-jeos-DVD.ks deleted file mode 100644 index 572843f..0000000 --- a/install_scripts/fedora-18-jeos-DVD.ks +++ /dev/null @@ -1,37 +0,0 @@ -cdrom -#url --url=http://mirrors.kernel.org/fedora/releases/18/Fedora/x86_64/os/ -# Without the Everything repo, we cannot install cloud-init -#repo --name="fedora-everything" --baseurl=http://mirrors.kernel.org/fedora/releases/18/Everything/x86_64/os/ -repo --name="fedora-everything" --mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=fedora-18&arch=x86_64 -install -graphical -vnc --password=${adminpw} -text -keyboard us -lang en_US.UTF-8 -skipx -network --device eth0 --bootproto dhcp -rootpw ${adminpw} -firewall --disabled -authconfig --enableshadow --enablemd5 -selinux --enforcing -timezone --utc America/New_York -bootloader --location=mbr -zerombr -clearpart --all --drives=vda - -part biosboot --fstype=biosboot --size=1 --ondisk=vda -part /boot --fstype ext4 --size=200 --ondisk=vda -part pv.2 --size=1 --grow --ondisk=vda -volgroup VolGroup00 --pesize=32768 pv.2 -logvol swap --fstype swap --name=LogVol01 --vgname=VolGroup00 --size=768 --grow --maxsize=1536 -logvol / --fstype ext4 --name=LogVol00 --vgname=VolGroup00 --size=1024 --grow -poweroff - -bootloader --location=mbr --timeout=5 --append="rhgb quiet" - -%packages -@core -cloud-init - -%end diff --git a/install_scripts/fedora-18-jeos.ks b/install_scripts/fedora-18-jeos.ks deleted file mode 100644 index e38b803..0000000 --- a/install_scripts/fedora-18-jeos.ks +++ /dev/null @@ -1,36 +0,0 @@ -url --url=http://mirrors.kernel.org/fedora/releases/18/Fedora/x86_64/os/ -# Without the Everything repo, we cannot install cloud-init -#repo --name="fedora-everything" --baseurl=http://mirrors.kernel.org/fedora/releases/18/Everything/x86_64/os/ -repo --name="fedora-everything" --mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=fedora-18&arch=x86_64 -install -graphical -vnc --password=${adminpw} -text -keyboard us -lang en_US.UTF-8 -skipx -network --device eth0 --bootproto dhcp -rootpw ${adminpw} -firewall --disabled -authconfig --enableshadow --enablemd5 -selinux --enforcing -timezone --utc America/New_York -bootloader --location=mbr -zerombr -clearpart --all --drives=vda - -part biosboot --fstype=biosboot --size=1 --ondisk=vda -part /boot --fstype ext4 --size=200 --ondisk=vda -part pv.2 --size=1 --grow --ondisk=vda -volgroup VolGroup00 --pesize=32768 pv.2 -logvol swap --fstype swap --name=LogVol01 --vgname=VolGroup00 --size=768 --grow --maxsize=1536 -logvol / --fstype ext4 --name=LogVol00 --vgname=VolGroup00 --size=1024 --grow -poweroff - -bootloader --location=mbr --timeout=5 --append="rhgb quiet" - -%packages -@core -cloud-init - -%end diff --git a/install_scripts/rhel-5-jeos.ks b/install_scripts/rhel-5-jeos.ks deleted file mode 100644 index 78ce130..0000000 --- a/install_scripts/rhel-5-jeos.ks +++ /dev/null @@ -1,35 +0,0 @@ -install -url --url= -#text -graphical -vnc --password=${adminpw} -key --skip -keyboard us -lang en_US.UTF-8 -skipx -network --device eth0 --bootproto dhcp -rootpw ${adminpw} -firewall --disabled -authconfig --enableshadow --enablemd5 -selinux --enforcing -timezone --utc America/New_York -bootloader --location=mbr --append="console=tty0 console=ttyS0,115200" -zerombr yes -clearpart --all - -part /boot --fstype ext3 --size=200 -part pv.2 --size=1 --grow -volgroup VolGroup00 --pesize=32768 pv.2 -logvol swap --fstype swap --name=LogVol01 --vgname=VolGroup00 --size=768 --grow --maxsize=1536 -logvol / --fstype ext3 --name=LogVol00 --vgname=VolGroup00 --size=1024 --grow -#reboot -poweroff -# Needed for cloud-init -repo --name="EPEL-5" --baseurl="http://mirrors.kernel.org/fedora-epel/5/x86_64/" - -%packages -@base -cloud-init - -%post - diff --git a/install_scripts/rhel-6-jeos.ks b/install_scripts/rhel-6-jeos.ks deleted file mode 100644 index 4ead60d..0000000 --- a/install_scripts/rhel-6-jeos.ks +++ /dev/null @@ -1,32 +0,0 @@ -install -url --url= -# Needed for cloud-init -repo --name="EPEL-6" --baseurl="http://mirrors.kernel.org/fedora-epel/6/x86_64/" -graphical -vnc --password=${adminpw} -key --skip -keyboard us -lang en_US.UTF-8 -skipx -network --device eth0 --bootproto dhcp -rootpw ${adminpw} -firewall --disabled -authconfig --enableshadow --enablemd5 -selinux --enforcing -timezone --utc America/New_York -bootloader --location=mbr --append="console=tty0 console=ttyS0,115200" -zerombr yes -clearpart --all - -part /boot --fstype ext4 --size=200 -part pv.2 --size=1 --grow -volgroup VolGroup00 --pesize=32768 pv.2 -#logvol swap --fstype swap --name=LogVol01 --vgname=VolGroup00 --size=768 --grow --maxsize=1536 -logvol / --fstype ext4 --name=LogVol00 --vgname=VolGroup00 --size=1024 --grow -poweroff - -%packages -@base -cloud-init - -%post diff --git a/install_scripts/ubuntu-10.04-jeos.preseed b/install_scripts/ubuntu-10.04-jeos.preseed deleted file mode 100644 index 2b17b5d..0000000 --- a/install_scripts/ubuntu-10.04-jeos.preseed +++ /dev/null @@ -1,54 +0,0 @@ -#ubuntu_baseurl=http://us.archive.ubuntu.com/ubuntu/dists/lucid/ -#This is 10.04 -d-i debian-installer/locale string en_US -d-i console-setup/ask_detect boolean false -d-i console-setup/layoutcode string us - -d-i netcfg/choose_interface select auto -d-i netcfg/get_hostname string unassigned-hostname -d-i netcfg/get_domain string unassigned-domain -d-i netcfg/wireless_wep string - -### Network console -# WARNING: Unlike Anaconda, the Ubuntu installer stops and waits for a connection -# to the network-console before continuing. The resulting console is not a mirror -# of the root console but a distinct interactive session. -# Uncommenting these is only really useful for debugging -#d-i anna/choose_modules string network-console -#d-i network-console/password password ${adminpw} -#d-i network-console/password-again password ${adminpw} - -d-i clock-setup/utc boolean true -d-i time/zone string US/Eastern - -d-i partman-auto/disk string /dev/vda -d-i partman-auto/method string regular -d-i partman-auto/choose_recipe select home -d-i partman/confirm_write_new_label boolean true -d-i partman/choose_partition select finish -d-i partman/confirm boolean true -d-i partman/confirm_nooverwrite boolean true - -d-i passwd/root-login boolean true -d-i passwd/make-user boolean false -d-i passwd/root-password password ${adminpw} -d-i passwd/root-password-again password ${adminpw} - -tasksel tasksel/first multiselect standard -d-i pkgsel/include/install-recommends boolean true -d-i pkgsel/include string ltsp-server-standalone openssh-server python-software-properties -d-i preseed/late_command string chroot /target /usr/sbin/ltsp-update-sshkeys - -d-i grub-installer/only_debian boolean true -d-i grub-installer/with_other_os boolean true - -d-i apt-setup/security_host string -base-config apt-setup/security-updates boolean false - -ubiquity ubiquity/summary note -#ubiquity ubiquity/reboot boolean true - -d-i debian-installer/exit/poweroff boolean true - -d-i finish-install/reboot_in_progress note - diff --git a/install_scripts/ubuntu-12.04-jeos.preseed b/install_scripts/ubuntu-12.04-jeos.preseed deleted file mode 100644 index 52bb0e8..0000000 --- a/install_scripts/ubuntu-12.04-jeos.preseed +++ /dev/null @@ -1,52 +0,0 @@ -#ubuntu_baseurl=http://us.archive.ubuntu.com/ubuntu/dists/precise/ -d-i debian-installer/locale string en_US -d-i console-setup/ask_detect boolean false -d-i console-setup/layoutcode string us - -d-i netcfg/choose_interface select auto -d-i netcfg/get_hostname string unassigned-hostname -d-i netcfg/get_domain string unassigned-domain -d-i netcfg/wireless_wep string - -### Network console -# WARNING: Unlike Anaconda, the Ubuntu installer stops and waits for a connection -# to the network-console before continuing. The resulting console is not a mirror -# of the root console but a distinct interactive session. -# Uncommenting these is only really useful for debugging -#d-i anna/choose_modules string network-console -#d-i network-console/password password ${adminpw} -#d-i network-console/password-again password ${adminpw} - -d-i clock-setup/utc boolean true -d-i time/zone string US/Eastern - -d-i partman-auto/disk string /dev/vda -d-i partman-auto/method string regular -d-i partman-auto/choose_recipe select home -d-i partman/confirm_write_new_label boolean true -d-i partman/choose_partition select finish -d-i partman/confirm boolean true -d-i partman/confirm_nooverwrite boolean true - -d-i passwd/root-login boolean true -d-i passwd/make-user boolean false -d-i passwd/root-password password ${adminpw} -d-i passwd/root-password-again password ${adminpw} - -tasksel tasksel/first multiselect standard -d-i pkgsel/include/install-recommends boolean true -d-i pkgsel/include string openssh-server python-software-properties - -d-i grub-installer/only_debian boolean true -d-i grub-installer/with_other_os boolean true - -d-i apt-setup/security_host string -base-config apt-setup/security-updates boolean false - -ubiquity ubiquity/summary note -#ubiquity ubiquity/reboot boolean true - -d-i debian-installer/exit/poweroff boolean true - -d-i finish-install/reboot_in_progress note - diff --git a/install_scripts/ubuntu-12.10-jeos.preseed b/install_scripts/ubuntu-12.10-jeos.preseed deleted file mode 100644 index 44bb49a..0000000 --- a/install_scripts/ubuntu-12.10-jeos.preseed +++ /dev/null @@ -1,52 +0,0 @@ -#ubuntu_baseurl=http://us.archive.ubuntu.com/ubuntu/dists/quantal/ -d-i debian-installer/locale string en_US -d-i console-setup/ask_detect boolean false -d-i console-setup/layoutcode string us - -d-i netcfg/choose_interface select auto -d-i netcfg/get_hostname string unassigned-hostname -d-i netcfg/get_domain string unassigned-domain -d-i netcfg/wireless_wep string - -### Network console -# WARNING: Unlike Anaconda, the Ubuntu installer stops and waits for a connection -# to the network-console before continuing. The resulting console is not a mirror -# of the root console but a distinct interactive session. -# Uncommenting these is only really useful for debugging -#d-i anna/choose_modules string network-console -#d-i network-console/password password ${adminpw} -#d-i network-console/password-again password ${adminpw} - -d-i clock-setup/utc boolean true -d-i time/zone string US/Eastern - -d-i partman-auto/disk string /dev/vda -d-i partman-auto/method string regular -d-i partman-auto/choose_recipe select home -d-i partman/confirm_write_new_label boolean true -d-i partman/choose_partition select finish -d-i partman/confirm boolean true -d-i partman/confirm_nooverwrite boolean true - -d-i passwd/root-login boolean true -d-i passwd/make-user boolean false -d-i passwd/root-password password ${adminpw} -d-i passwd/root-password-again password ${adminpw} - -tasksel tasksel/first multiselect standard -d-i pkgsel/include/install-recommends boolean true -d-i pkgsel/include string openssh-server python-software-properties software-properties-common - -d-i grub-installer/only_debian boolean true -d-i grub-installer/with_other_os boolean true - -d-i apt-setup/security_host string -base-config apt-setup/security-updates boolean false - -ubiquity ubiquity/summary note -#ubiquity ubiquity/reboot boolean true - -d-i debian-installer/exit/poweroff boolean true - -d-i finish-install/reboot_in_progress note - diff --git a/nova-install b/nova-install new file mode 100755 index 0000000..bd0b83f --- /dev/null +++ b/nova-install @@ -0,0 +1,147 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import sys +import signal +import argparse +from novaimagebuilder.Singleton import Singleton +from novaimagebuilder.OSInfo import OSInfo +from novaimagebuilder.Builder import Builder + +class Arguments(Singleton): + def _singleton_init(self, *args, **kwargs): + super(Arguments, self)._singleton_init() + self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + self.argparser = self._argparser_setup() + self.args = self.argparser.parse_args() + + def _argparser_setup(self): + app_name = sys.argv[0].rpartition('/')[2] + description_text = """Creates a new VM image in Nova using an OS's native installation tools.""" + + argparser = argparse.ArgumentParser(description=description_text, prog=app_name) + + argparser.add_argument('--os', help='The shortid of an OS. Required for both installation types.') + argparser.add_argument('--os_list', action='store_true', default=False, + help='Show the OS list available for image building.') + + install_location_group = argparser.add_mutually_exclusive_group() + install_location_group.add_argument('--install_iso', help='Location of the installation media ISO.') + install_location_group.add_argument('--install_tree', help='Location of an installation file tree.') + + argparser.add_argument('--install_script', type=argparse.FileType(), + help='Custom install script file to use instead of generating one.') + argparser.add_argument('--admin_pw', help='The password to set for the admin user in the image.') + argparser.add_argument('--license_key', help='License/product key to use if needed.') + argparser.add_argument('--arch', default='x86_64', + help='The architecture the image is built for. (default: %(default)s)') + argparser.add_argument('--disk_size', type=int, default=10, + help='Size of the image root disk in gigabytes. (default: %(default)s)') + argparser.add_argument('--instance_flavor', default='vanilla', + help='The type of instance to use for building the image. (default: %(default)s)') + argparser.add_argument('--name', help='A name to assign to the built image.', default='new-image') + + argparser.add_argument('--image_storage', choices=('glance', 'cinder', 'both'), default='glance', + help='Where to store the final image: glance, cinder, both (default: %(default)s)') + + argparser.add_argument('--debug', action='store_true', default=False, + help='Print debugging output to the logfile. (default: %(default)s)') + + return argparser + + +class Application(Singleton): + def _singleton_init(self, *args, **kwargs): + super(Application, self)._singleton_init() + self.arguments = Arguments().args + self.log = self._logger(debug=self.arguments.debug) + if not self.log: + print 'No logger!!! stopping...' + sys.exit(1) + signal.signal(signal.SIGTERM, self.signal_handler) + self.osinfo = OSInfo() + self.builder = None + + def _logger(self, debug=False): + if debug: + level = logging.DEBUG + else: + level = logging.WARNING + logging.basicConfig(level=level, format='%(asctime)s %(levelname)s %(name)s thread(%(threadName)s) Message: %(message)s') + logger = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + #filehandler = logging.FileHandler('/var/log/%s' % sys.argv[0].rpartition('/')[2]) + #formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s thread(%(threadName)s) Message: %(message)s') + #filehandler.setFormatter(formatter) + #logger.addHandler(filehandler) + return logger + + def signal_handler(self, signum, stack): + if signum == signal.SIGTERM: + logging.warning('caught signal SIGTERM, stopping...') + if self.builder: + self.builder.abort() + sys.exit(0) + + def main(self): + if self.arguments.os: + if self.arguments.install_iso: + location = self.arguments.install_iso + install_type = 'iso' + elif self.arguments.install_tree: + location = self.arguments.install_tree + install_type = 'tree' + else: + # if iso or tree is missing, print a message and exit non-zero + print('One of --install_iso or --install_tree must be given.') + return 1 + + install_config = {'admin_password': self.arguments.admin_pw, + 'license_key': self.arguments.license_key, + 'arch': self.arguments.arch, + 'disk_size': self.arguments.disk_size, + 'flavor': self.arguments.instance_flavor, + 'storage': self.arguments.image_storage, + 'name': self.arguments.name} + + self.builder = Builder(self.arguments.os, + install_location=location, + install_type=install_type, + install_script=self.arguments.install_script, + install_config=install_config) + + # TODO: create a better way to run this. + # The inactivity timeout is 180 seconds + self.builder.run() + self.builder.wait_for_completion(180) + + elif self.arguments.os_list: + # possible distro values from libosinfo (for reference): + # 'osx', 'openbsd', 'centos', 'win', 'mandrake', 'sled', 'sles', 'netbsd', 'winnt', 'fedora', 'solaris', + # 'rhel', 'opensuse', 'rhl', 'mes', 'ubuntu', 'debian', 'netware', 'msdos', 'gnome', 'opensolaris', + # 'freebsd', 'mandriva' + os_dict = self.osinfo.os_ids(distros={'fedora': 17, 'rhel': 5, 'ubuntu': 12, 'win': 6}) + if len(os_dict) > 0: + for os in sorted(os_dict.keys()): + print '%s - %s' % (os, os_dict[os]) + + else: + Arguments().argparser.parse_args(['--help']) + + +if __name__ == '__main__': + sys.exit(Application().main()) diff --git a/novaimagebuilder/BaseOS.py b/novaimagebuilder/BaseOS.py new file mode 100644 index 0000000..a71c376 --- /dev/null +++ b/novaimagebuilder/BaseOS.py @@ -0,0 +1,118 @@ +# encoding: utf-8 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from CacheManager import CacheManager +from StackEnvironment import StackEnvironment +from SyslinuxHelper import SyslinuxHelper +import inspect +import logging + + +class BaseOS(object): + + """ + + @param osinfo_dict: + @param install_type: + @param install_media_location: + @param install_config: + @param install_script: + """ + + def __init__(self, osinfo_dict, install_type, install_media_location, install_config, install_script = None): + self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + self.env = StackEnvironment() + self.cache = CacheManager() + self.syslinux = SyslinuxHelper() + self.osinfo_dict = osinfo_dict + self.install_type = install_type + self.install_media_location = install_media_location + self.install_config = install_config + self.install_script = install_script + self.iso_volume_delete = False + # Subclasses can pull in the above and then do OS specific tasks to fill in missing + # information and determine if the resulting install is possible + + def os_ver_arch(self): + """ + + + @return: + """ + return self.osinfo_dict['shortid'] + "-" + self.install_config['arch'] + + def prepare_install_instance(self): + """ + + + @return: + """ + raise NotImplementedError("Function (%s) not implemented" % (inspect.stack()[0][3])) + + def start_install_instance(self): + """ + + + @return: + """ + raise NotImplementedError("Function (%s) not implemented" % (inspect.stack()[0][3])) + + def update_status(self): + """ + + + @return: + """ + raise NotImplementedError("Function (%s) not implemented" % (inspect.stack()[0][3])) + + def wants_iso_content(self): + """ + + + @return: + """ + raise NotImplementedError("Function (%s) not implemented" % (inspect.stack()[0][3])) + + def iso_content_dict(self): + """ + + + @return: + """ + raise NotImplementedError("Function (%s) not implemented" % (inspect.stack()[0][3])) + + def url_content_dict(self): + """ + + + @return: + """ + raise NotImplementedError("Function (%s) not implemented" % (inspect.stack()[0][3])) + + def abort(self): + """ + + + @return: + """ + raise NotImplementedError("Function (%s) not implemented" % (inspect.stack()[0][3])) + + def cleanup(self): + """ + + + @return: + """ + raise NotImplementedError("Function (%s) not implemented" % (inspect.stack()[0][3])) diff --git a/novaimagebuilder/Builder.py b/novaimagebuilder/Builder.py new file mode 100644 index 0000000..a507f25 --- /dev/null +++ b/novaimagebuilder/Builder.py @@ -0,0 +1,176 @@ +# coding=utf-8 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from OSInfo import OSInfo +from StackEnvironment import StackEnvironment +from time import sleep + + +class Builder(object): + def __init__(self, osid, install_location=None, install_type=None, install_script=None, install_config={}): + """ + Builder selects the correct OS object to delegate build activity to. + + @param osid: The shortid for an OS record. + @param install_location: The location of an ISO or install tree. + @param install_type: The type of installation (iso or tree) + @param install_script: A custom install script to be used instead of what OSInfo can generate + @param install_config: A dict of various info that may be needed for the build. + (admin_pw, license_key, arch, disk_size, flavor, storage, name) + """ + super(Builder, self).__init__() + self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + self.install_location = install_location + self.install_type = install_type + self.install_script = install_script + self.install_config = install_config + self.os = OSInfo().os_for_shortid(osid) + self.os_delegate = self._delegate_for_os(self.os) + self.env = StackEnvironment() + + def _delegate_for_os(self, os): + """ + Select and instantiate the correct OS class for build delegation. + + @param os: The dictionary of OS info for a give OS shortid + + @return: An instance of an OS class that will control a VM for the image installation + """ + # TODO: Change the way we select what class to instantiate to something that we do not have to touch + # every time we add another OS class + os_classes = {'fedora': 'RedHatOS', 'rhel': 'RedHatOS', 'win': 'WindowsOS', 'ubuntu': 'UbuntuOS'} + os_classname = os_classes.get(os['distro']) + + if os_classname: + try: + os_module = __import__("novaimagebuilder." + os_classname, fromlist=[os_classname]) + os_class = getattr(os_module, os_classname) + #import pdb; pdb.set_trace() + return os_class(osinfo_dict=self.os, + install_type=self.install_type, + install_media_location=self.install_location, + install_config=self.install_config, + install_script=self.install_script) + except ImportError as e: + self.log.exception(e) + return None + else: + raise Exception("No delegate found for distro (%s)" % os['distro']) + + def run(self): + """ + Starts the installation of an OS in an image via the appropriate OS class + + @return: Status of the installation. + """ + self.os_delegate.prepare_install_instance() + self.os_delegate.start_install_instance() + return self.os_delegate.update_status() + + def wait_for_completion(self, inactivity_timeout): + """ + Waits for the install_instance to enter SHUTDOWN state then launches a snapshot + + @param inactivity_timeout amount of time to wait for activity before declaring the installation a failure in 10s of seconds (6 is 60 seconds) + + @return: Success or Failure + """ + # TODO: Timeouts, activity checking + instance = self._wait_for_shutoff(self.os_delegate.install_instance, inactivity_timeout) + # Snapshot with self.install_config['name'] + if instance: + finished_image_id = instance.instance.create_image(self.install_config['name']) + self._wait_for_glance_snapshot(finished_image_id) + self._terminate_instance(instance.id) + if self.os_delegate.iso_volume_delete: + self.env.cinder.volumes.get(self.os_delegate.iso_volume).delete() + self.log.debug("Deleted install ISO volume from cinder: %s" % self.os_delegate.iso_volume) + # Leave instance running if install did not finish + + def _wait_for_shutoff(self, instance, inactivity_timeout): + inactivity_countdown = inactivity_timeout + for i in range(1200): + status = instance.status + if status == "SHUTOFF": + self.log.debug("Instance (%s) has entered SHUTOFF state" % instance.id) + return instance + if i % 10 == 0: + self.log.debug("Waiting for instance status SHUTOFF - current status (%s): %d/1200" % (status, i)) + if not instance.is_active(): + inactivity_countdown -= 1 + else: + inactivity_countdown = inactivity_timeout + if inactivity_countdown == 0: + self.log.debug("Install instance has become inactive. Instance will remain running so you can investigate what happened.") + return + sleep(1) + + + def _wait_for_glance_snapshot(self, image_id): + image = self.env.glance.images.get(image_id) + self.log.debug("Waiting for glance image id (%s) to become active" % image_id) + while True: + self.log.debug("Current image status: %s" % image.status) + sleep(2) + image = self.env.glance.images.get(image.id) + if image.status == "error": + raise Exception("Image entered error status while waiting for completion") + elif image.status == 'active': + break + # Remove any direct boot properties if they exist + properties = image.properties + for key in ['kernel_id', 'ramdisk_id', 'command_line']: + if key in properties: + del properties[key] + meta = {'properties': properties} + image.update(**meta) + + def _terminate_instance(self, instance_id): + nova = self.env.nova + instance = nova.servers.get(instance_id) + instance.delete() + self.log.debug("Waiting for instance id (%s) to be terminated/delete" % instance_id) + while True: + self.log.debug("Current instance status: %s" % instance.status) + sleep(5) + try: + instance = nova.servers.get(instance_id) + except Exception as e: + self.log.debug("Got exception (%s) assuming deletion complete" % e) + break + + def abort(self): + """ + Aborts the installation of an OS in an image. + + @return: Status of the installation. + """ + self.os_delegate.abort() + self.os_delegate.cleanup() + return self.os_delegate.update_status() + + def status(self): + """ + Returns the status of the installation. + + @return: Status of the installation. + """ + # TODO: replace this with a background thread that watches the status and cleans up as needed. + status = self.os_delegate.update_status() + if status in ('COMPLETE', 'FAILED'): + self.os_delegate.cleanup() + return status diff --git a/novaimagebuilder/CacheManager.py b/novaimagebuilder/CacheManager.py new file mode 100644 index 0000000..ef915ff --- /dev/null +++ b/novaimagebuilder/CacheManager.py @@ -0,0 +1,248 @@ +#!/usr/bin/python + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import json +import os.path +import pycurl +import guestfs +from Singleton import Singleton +from StackEnvironment import StackEnvironment + + +class CacheManager(Singleton): + """ + Class to manage the retrieval and storage of install source objects + Typically the source for these objects are ISO images or install trees + accessible via HTTP. Content is moved into glance and optionally cinder. + Some smaller pieces of content are also cached locally + + Currently items are keyed by os, version, arch and can have arbitrary + names. The name install_iso is special. OS plugins are allowed to + access a local copy before it is sent to glance, even if that local copy + will eventually be deleted. + """ + + # TODO: Currently assumes the target environment is static - allow this to change + # TODO: Sane handling of a pending cache item + # TODO: Configurable + CACHE_ROOT = "/var/lib/novaimagebuilder/" + #INDEX_LOCK = lock() + INDEX_FILE = "_cache_index" + + def _singleton_init(self): + self.env = StackEnvironment() + self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + self.index_filename = self.CACHE_ROOT + self.INDEX_FILE + if not os.path.isfile(self.index_filename): + self.log.debug("Creating cache index file (%s)" % self.index_filename) + # TODO: somehow prevent a race here + index_file = open(self.index_filename, 'w') + json.dump({ } , index_file) + index_file.close() + # This should be None except when we are actively working on it and hold a lock + self.index = None + + def lock_and_get_index(self): + """ + Obtain an exclusive lock on the cache index and then load it into the + "index" instance variable. Tasks done while holding this lock should be + very brief and non-blocking. Calls to this should be followed by either + write_index_and_unlock() or unlock_index() depending upon whether or not the + index has been modified. + """ + + #self.INDEX_LOCK.acquire() + index_file = open(self.index_filename) + self.index = json.load(index_file) + index_file.close() + + def write_index_and_unlock(self): + """ + Write contents of self.index back to the persistent file and then unlock it + """ + + index_file = open(self.index_filename, 'w') + json.dump(self.index , index_file) + index_file.close() + self.index = None + #self.INDEX_LOCK.release() + + def unlock_index(self): + """ + Release the cache index lock without updating the persistent file + """ + + self.index = None + #self.INDEX_LOCK.release() + + # INDEX looks like + # + # { "fedora-19-x86_64": { "install_iso": { "local": "/blah", "glance": "UUID", "cinder": "UUID" }, + # "install_iso_kernel": { "local" + + def _get_index_value(self, os_ver_arch, name, location): + if self.index is None: + raise Exception("Attempt made to read index values while a locked index is not present") + + if not os_ver_arch in self.index: + return None + + if not name in self.index[os_ver_arch]: + return None + + # If the specific location is not requested, return the whole location dict + if not location: + return self.index[os_ver_arch][name] + + if not location in self.index[os_ver_arch][name]: + return None + else: + return self.index[os_ver_arch][name][location] + + def _set_index_value(self, os_ver_arch, name, location, value): + if self.index is None: + raise Exception("Attempt made to read index values while a locked index is not present") + + if not os_ver_arch in self.index: + self.index[os_ver_arch] = {} + + if not name in self.index[os_ver_arch]: + self.index[os_ver_arch][name] = {} + + # If the specific location is not specified, assume value is the entire dict + if not location: + if type(value) is not dict: + raise Exception("When setting a value without a location, the value must be a dict") + self.index[os_ver_arch][name] = value + return + + self.index[os_ver_arch][name][location] = value + + def retrieve_and_cache_object(self, object_type, os_plugin, source_url, save_local): + """ + Download a file from a URL and store it in the cache. Uses the object_type and + data from the OS delegate/plugin to index the file correctly. Also treats the + object type "install-iso" as a special case, downloading it locally and then allowing + the OS delegate to request individual files from within the ISO for extraction and + caching. This is used to efficiently retrieve the kernel and ramdisk from Linux + install ISOs. + + @param object_type: A string indicating the type of object being retrieved + @param os_plugin: Instance of the delegate for the OS associated with the download + @param source_url: Location from which to retrieve the object/file + @param save_local: bool indicating whether a local copy of the object should be saved + @return dict containing the various cached locations of the file + local: Local path to file + glance: Glance object UUID + cinder: Cinder object UUID + """ + + self.lock_and_get_index() + existing_cache = self._get_index_value(os_plugin.os_ver_arch(), object_type, None) + if existing_cache: + self.log.debug("Found object in cache") + self.unlock_index() + return existing_cache + # TODO: special case when object is ISO and sub-artifacts are not cached + + # The object is not yet in the cache + # TODO: Some mechanism to indicate that retrieval is in progress + # additional calls to get the same object should block until this is done + self.unlock_index() + self.log.debug("Object not in cache") + + # TODO: If not save_local and the plugin doesn't need the iso, direct download in glance + object_name = os_plugin.os_ver_arch() + "-" + object_type + local_object_filename = self.CACHE_ROOT + object_name + if not os.path.isfile(local_object_filename): + self._http_download_file(source_url, local_object_filename) + else: + self.log.warning("Local file (%s) is already present - assuming it is valid" % local_object_filename) + + if object_type == "install-iso" and os_plugin.wants_iso_content(): + self.log.debug("The plugin wants to do something with the ISO - extracting stuff now") + icd = os_plugin.iso_content_dict() + if icd: + self.log.debug("Launching guestfs") + g = guestfs.GuestFS() + g.add_drive_ro(local_object_filename) + g.launch() + g.mount_options ("", "/dev/sda", "/") + for nested_obj_type in icd.keys(): + nested_obj_name = os_plugin.os_ver_arch() + "-" + nested_obj_type + nested_object_filename = self.CACHE_ROOT + nested_obj_name + self.log.debug("Downloading ISO file (%s) to local file (%s)" % (icd[nested_obj_type], + nested_object_filename)) + g.download(icd[nested_obj_type],nested_object_filename) + if nested_obj_type == "install-iso-kernel": + image_format = "aki" + elif nested_obj_type == "install-iso-initrd": + image_format = "ari" + else: + raise Exception("Nested object of unknown type requested") + (glance_id, cinder_id) = self._do_remote_uploads(nested_obj_name, nested_object_filename, + format=image_format, container_format=image_format, + use_cinder = False) + locations = {"local": nested_object_filename, "glance": str(glance_id), "cinder": str(cinder_id)} + self._do_index_updates(os_plugin.os_ver_arch(), object_type, locations) + g.shutdown() + g.close() + + (glance_id, cinder_id) = self._do_remote_uploads(object_name, local_object_filename) + locations = {"local": local_object_filename, "glance": str(glance_id), "cinder": str(cinder_id)} + self._do_index_updates(os_plugin.os_ver_arch(), object_type, locations) + + return locations + + def _do_index_updates(self, os_ver_arch, object_type, locations): + self.lock_and_get_index() + self._set_index_value(os_ver_arch, object_type, None, locations ) + self.write_index_and_unlock() + + def _do_remote_uploads(self, object_name, local_object_filename, format='raw', container_format='bare', + use_cinder=True): + if self.env.is_cinder() and use_cinder: + (glance_id, cinder_id) = self.env.upload_volume_to_cinder(object_name, local_path=local_object_filename, + format=format, container_format=container_format) + else: + cinder_id = None + glance_id = self.env.upload_image_to_glance(object_name, local_path=local_object_filename, + format=format, container_format=container_format) + return (glance_id, cinder_id) + + def _http_download_file(self, url, filename): + # Function to download a file from url to filename + # Borrowed and modified from Oz by Chris Lalancette + # https://github.com/clalancette/oz + + def _data(buf): + # Function that is called back from the pycurl perform() method to + # actually write data to disk. + os.write(fd, buf) + + fd = os.open(filename,os.O_CREAT | os.O_WRONLY | os.O_TRUNC) + + try: + c = pycurl.Curl() + c.setopt(c.URL, url) + c.setopt(c.CONNECTTIMEOUT, 15) + c.setopt(c.WRITEFUNCTION, _data) + c.setopt(c.FOLLOWLOCATION, 1) + c.perform() + c.close() + finally: + os.close(fd) \ No newline at end of file diff --git a/novaimagebuilder/ISOHelper.py b/novaimagebuilder/ISOHelper.py new file mode 100644 index 0000000..5c08d69 --- /dev/null +++ b/novaimagebuilder/ISOHelper.py @@ -0,0 +1,430 @@ +# Copyright (C) 2010,2011 Chris Lalancette +# Copyright (C) 2012,2013 Chris Lalancette +# Copyright (C) 2013 Ian McLeod + +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; +# version 2.1 of the License. + +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. + +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +import struct +import shutil +import os +import guestfs +import logging +import tempfile +import subprocess +import stat + +class ISOHelper(): + """ + Class for assisting with the respin of install ISOs. + At present the only purpose for this class is to allow the injection of a custom + autounattend.xml file to Windows install isos. + + This class is largely derived from the Guest.py, Windows.py and ozutil.py files + from the Oz project by Chris Lalancette: + + https://github.com/clalancette/oz + """ + + def __init__(self, original_iso, arch): + self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + self.orig_iso = original_iso + self.arch = arch + self.winarch = arch + if self.winarch == "x86_64": + self.winarch = "amd64" + self.iso_contents = tempfile.mkdtemp() + + + def _validate_primary_volume_descriptor(self, cdfd): + """ + Method to extract the primary volume descriptor from a CD. + """ + # check out the primary volume descriptor to make sure it is sane + cdfd.seek(16*2048) + fmt = "=B5sBB32s32sQLL32sHHHH" + (desc_type, identifier, version, unused1, system_identifier, volume_identifier, unused2, space_size_le, space_size_be, unused3, set_size_le, set_size_be, seqnum_le, seqnum_be) = struct.unpack(fmt, cdfd.read(struct.calcsize(fmt))) + + if desc_type != 0x1: + raise Exception("Invalid primary volume descriptor") + if identifier != "CD001": + raise Exception("invalid CD isoIdentification") + if unused1 != 0x0: + raise Exception("data in unused field") + if unused2 != 0x0: + raise Exception("data in 2nd unused field") + + def _geteltorito(self, outfile): + """ + Method to extract the El-Torito boot sector off of a CD and write it + to a file. + """ + if outfile is None: + raise Exception("output file is None") + + cdfd = open(self.orig_iso, "r") + + self._validate_primary_volume_descriptor(cdfd) + + # the 17th sector contains the boot specification and the offset of the + # boot sector + cdfd.seek(17*2048) + + # NOTE: With "native" alignment (the default for struct), there is + # some padding that happens that causes the unpacking to fail. + # Instead we force "standard" alignment, which has no padding + fmt = "=B5sB23s41sI" + (boot, isoIdent, version, toritoSpec, unused, bootP) = struct.unpack(fmt, + cdfd.read(struct.calcsize(fmt))) + if boot != 0x0: + raise Exception("invalid CD boot sector") + if isoIdent != "CD001": + raise Exception("invalid CD isoIdentification") + if version != 0x1: + raise Exception("invalid CD version") + if toritoSpec != "EL TORITO SPECIFICATION": + raise Exception("invalid CD torito specification") + + # OK, this looks like a bootable CD. Seek to the boot sector, and + # look for the header, 0x55, and 0xaa in the first 32 bytes + cdfd.seek(bootP*2048) + fmt = "=BBH24sHBB" + bootdata = cdfd.read(struct.calcsize(fmt)) + (header, platform, unused, manu, unused2, five, aa) = struct.unpack(fmt, + bootdata) + if header != 0x1: + raise Exception("invalid CD boot sector header") + if platform != 0x0 and platform != 0x1 and platform != 0x2: + raise Exception("invalid CD boot sector platform") + if unused != 0x0: + raise Exception("invalid CD unused boot sector field") + if five != 0x55 or aa != 0xaa: + raise Exception("invalid CD boot sector footer") + + def _checksum(data): + """ + Method to compute the checksum on the ISO. Note that this is *not* + a 1's complement checksum; when an addition overflows, the carry + bit is discarded, not added to the end. + """ + s = 0 + for i in range(0, len(data), 2): + w = ord(data[i]) + (ord(data[i+1]) << 8) + s = (s + w) & 0xffff + return s + + csum = _checksum(bootdata) + if csum != 0: + raise Exception("invalid CD checksum: expected 0, saw %d" % (csum)) + + # OK, everything so far has checked out. Read the default/initial + # boot entry + cdfd.seek(bootP*2048+32) + fmt = "=BBHBBHIB" + (boot, media, loadsegment, systemtype, unused, scount, imgstart, unused2) = struct.unpack(fmt, cdfd.read(struct.calcsize(fmt))) + + if boot != 0x88: + raise Exception("invalid CD initial boot indicator") + if unused != 0x0 or unused2 != 0x0: + raise Exception("invalid CD initial boot unused field") + + if media == 0 or media == 4: + count = scount + elif media == 1: + # 1.2MB floppy in sectors + count = 1200*1024/512 + elif media == 2: + # 1.44MB floppy in sectors + count = 1440*1024/512 + elif media == 3: + # 2.88MB floppy in sectors + count = 2880*1024/512 + else: + raise Exception("invalid CD media type") + + # finally, seek to "imgstart", and read "count" sectors, which + # contains the boot image + cdfd.seek(imgstart*2048) + + # The eltorito specification section 2.5 says: + # + # Sector Count. This is the number of virtual/emulated sectors the + # system will store at Load Segment during the initial boot + # procedure. + # + # and then Section 1.5 says: + # + # Virtual Disk - A series of sectors on the CD which INT 13 presents + # to the system as a drive with 200 byte virtual sectors. There + # are 4 virtual sectors found in each sector on a CD. + # + # (note that the bytes above are in hex). So we read count*512 + eltoritodata = cdfd.read(count*512) + cdfd.close() + + out = open(outfile, "w") + out.write(eltoritodata) + out.close() + + def _generate_new_iso_win_v5(self, output_iso): + """ + Method to create a new ISO based on the modified CD/DVD. + For Windows versions based on kernel 5.x (2000, XP, and 2003). + """ + self.log.debug("Recreating El Torito boot sector") + os.mkdir(os.path.join(self.iso_contents, "cdboot")) + self._geteltorito(os.path.join(self.iso_contents, "cdboot", "boot.bin")) + + self.log.debug("Generating new ISO") + self.subprocess_check_output(["genisoimage", + "-b", "cdboot/boot.bin", + "-no-emul-boot", "-boot-load-seg", + "1984", "-boot-load-size", "4", + "-iso-level", "2", "-J", "-l", "-D", + "-N", "-joliet-long", + "-relaxed-filenames", "-v", "-v", + "-V", "Custom", + "-o", output_iso, + self.iso_contents]) + + def _modify_iso_win_v5(self, install_script): + """ + Method to copy a Windows v5 install script into the appropriate location + """ + self.log.debug("Copying in Windows v5 winnt.sif file") + outname = os.path.join(self.iso_contents, self.winarch, "winnt.sif") + shutil.copy(install_script, outname) + + def _generate_new_iso_win_v6(self, output_iso): + """ + Method to create a new Windows v6 ISO based on the modified CD/DVD. + """ + self.log.debug("Recreating El Torito boot sector") + os.mkdir(os.path.join(self.iso_contents, "cdboot")) + self._geteltorito(os.path.join(self.iso_contents, "cdboot", "boot.bin")) + + self.log.debug("Generating new ISO") + # NOTE: Windows 2008 is very picky about which arguments to genisoimage + # will generate a bootable CD, so modify these at your own risk + self.subprocess_check_output(["genisoimage", + "-b", "cdboot/boot.bin", + "-no-emul-boot", "-c", "BOOT.CAT", + "-iso-level", "2", "-J", "-l", "-D", + "-N", "-joliet-long", + "-relaxed-filenames", "-v", "-v", + "-V", "Custom", "-udf", + "-o", output_iso, + self.iso_contents]) + + def _install_script_win_v6(self, install_script): + """ + Method to copy a Windows v6 install script into the appropriate location + """ + self.log.debug("Copying in Windows v6 autounattend.xml file") + outname = os.path.join(self.iso_contents, "autounattend.xml") + shutil.copy(install_script, outname) + + def _copy_iso(self): + """ + Method to copy the data out of an ISO onto the local filesystem. + """ + self.log.info("Copying ISO contents for modification") + try: + shutil.rmtree(self.iso_contents) + except OSError as err: + if err.errno != errno.ENOENT: + raise + os.makedirs(self.iso_contents) + + self.log.info("Setting up guestfs handle") + gfs = guestfs.GuestFS() + self.log.debug("Adding ISO image %s" % (self.orig_iso)) + gfs.add_drive_opts(self.orig_iso, readonly=1, format='raw') + self.log.debug("Launching guestfs") + gfs.launch() + try: + self.log.debug("Mounting ISO") + gfs.mount_options('ro', "/dev/sda", "/") + + self.log.debug("Checking if there is enough space on the filesystem") + isostat = gfs.statvfs("/") + outputstat = os.statvfs(self.iso_contents) + if (outputstat.f_bsize*outputstat.f_bavail) < (isostat['blocks']*isostat['bsize']): + raise Exception("Not enough room on %s to extract install media" % (self.iso_contents)) + + self.log.debug("Extracting ISO contents") + current = os.getcwd() + os.chdir(self.iso_contents) + try: + rd, wr = os.pipe() + + try: + # NOTE: it is very, very important that we use temporary + # files for collecting stdout and stderr here. There is a + # nasty bug in python subprocess; if your process produces + # more than 64k of data on an fd that is using + # subprocess.PIPE, the whole thing will hang. To avoid + # this, we use temporary fds to capture the data + stdouttmp = tempfile.TemporaryFile() + stderrtmp = tempfile.TemporaryFile() + + try: + tar = subprocess.Popen(["tar", "-x", "-v"], stdin=rd, + stdout=stdouttmp, + stderr=stderrtmp) + try: + gfs.tar_out("/", "/dev/fd/%d" % wr) + except: + # we need this here if gfs.tar_out throws an + # exception. In that case, we need to manually + # kill off the tar process and re-raise the + # exception, otherwise we hang forever + tar.kill() + raise + + # FIXME: we really should check tar.poll() here to get + # the return code, and print out stdout and stderr if + # we fail. This will make debugging problems easier + finally: + stdouttmp.close() + stderrtmp.close() + finally: + os.close(rd) + os.close(wr) + + # since we extracted from an ISO, there are no write bits + # on any of the directories. Fix that here + for dirpath, dirnames, filenames in os.walk(self.iso_contents): + st = os.stat(dirpath) + os.chmod(dirpath, st.st_mode|stat.S_IWUSR) + for name in filenames: + fullpath = os.path.join(dirpath, name) + try: + # if there are broken symlinks in the ISO, + # then the below might fail. This probably + # isn't fatal, so just allow it and go on + st = os.stat(fullpath) + os.chmod(fullpath, st.st_mode|stat.S_IWUSR) + except OSError as err: + if err.errno != errno.ENOENT: + raise + finally: + os.chdir(current) + finally: + gfs.sync() + gfs.umount_all() + gfs.kill_subprocess() + + def _cleanup_iso(self): + """ + Method to cleanup the local ISO contents. + """ + self.log.info("Cleaning up old ISO data") + # if we are running as non-root, then there might be some files left + # around that are not writable, which means that the rmtree below would + # fail. Recurse into the iso_contents tree, doing a chmod +w on + # every file and directory to make sure the rmtree succeeds + for dirpath, dirnames, filenames in os.walk(self.iso_contents): + os.chmod(dirpath, stat.S_IWUSR|stat.S_IXUSR|stat.S_IRUSR) + for name in filenames: + try: + # if there are broken symlinks in the ISO, + # then the below might fail. This probably + # isn't fatal, so just allow it and go on + os.chmod(os.path.join(dirpath, name), stat.S_IRUSR|stat.S_IWUSR) + except OSError as err: + if err.errno != errno.ENOENT: + raise + + self.rmtree_and_sync(self.iso_contents) + + + def rmtree_and_sync(self, directory): + """ + Function to remove a directory tree and do an fsync afterwards. Because + the removal of the directory tree can cause a lot of metadata updates, it + can cause a lot of disk activity. By doing the fsync, we ensure that any + metadata updates caused by us will not cause subsequent steps to fail. This + cannot help if the system is otherwise very busy, but it does ensure that + the problem is not self-inflicted. + """ + shutil.rmtree(directory) + fd = os.open(os.path.dirname(directory), os.O_RDONLY) + try: + os.fsync(fd) + finally: + os.close(fd) + + def subprocess_check_output(self, *popenargs, **kwargs): + """ + Function to call a subprocess and gather the output. + """ + if 'stdout' in kwargs: + raise ValueError('stdout argument not allowed, it will be overridden.') + if 'stderr' in kwargs: + raise ValueError('stderr argument not allowed, it will be overridden.') + + self.executable_exists(popenargs[0][0]) + + # NOTE: it is very, very important that we use temporary files for + # collecting stdout and stderr here. There is a nasty bug in python + # subprocess; if your process produces more than 64k of data on an fd that + # is using subprocess.PIPE, the whole thing will hang. To avoid this, we + # use temporary fds to capture the data + stdouttmp = tempfile.TemporaryFile() + stderrtmp = tempfile.TemporaryFile() + + process = subprocess.Popen(stdout=stdouttmp, stderr=stderrtmp, *popenargs, + **kwargs) + process.communicate() + retcode = process.poll() + + stdouttmp.seek(0, 0) + stdout = stdouttmp.read() + stdouttmp.close() + + stderrtmp.seek(0, 0) + stderr = stderrtmp.read() + stderrtmp.close() + + if retcode: + cmd = ' '.join(*popenargs) + raise SubprocessException("'%s' failed(%d): %s" % (cmd, retcode, stderr), retcode) + return (stdout, stderr, retcode) + + def executable_exists(self, program): + """ + Function to find out whether an executable exists in the PATH + of the user. If so, the absolute path to the executable is returned. + If not, an exception is raised. + """ + def is_exe(fpath): + """ + Helper method to check if a file exists and is executable + """ + return os.path.exists(fpath) and os.access(fpath, os.X_OK) + + if program is None: + raise Exception("Invalid program name passed") + + fpath, fname = os.path.split(program) + if fpath: + if is_exe(program): + return program + else: + for path in os.environ["PATH"].split(os.pathsep): + exe_file = os.path.join(path, program) + if is_exe(exe_file): + return exe_file + + raise Exception("Could not find %s" % (program)) diff --git a/novaimagebuilder/NovaInstance.py b/novaimagebuilder/NovaInstance.py new file mode 100644 index 0000000..7dd4f57 --- /dev/null +++ b/novaimagebuilder/NovaInstance.py @@ -0,0 +1,90 @@ +# coding=utf-8 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from time import sleep + +class NovaInstance: + + def __init__(self, instance, stack_env): + self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + self.last_disk_activity = 0 + self.last_net_activity = 0 + self.instance = instance + self.stack_env = stack_env + + @property + def id(self): + """ + + + @return: + """ + return self.instance.id + + @property + def status(self): + """ + + + @return: + """ + self.instance = self.stack_env.nova.servers.get(self.instance.id) + return self.instance.status + + def get_disk_and_net_activity(self): + """ + + + @return: + """ + disk_activity = 0 + net_activity = 0 + diagnostics = self.instance.diagnostics()[1] + if not diagnostics: + return 0, 0 + for key, value in diagnostics.items(): + if ('read' in key) or ('write' in key): + disk_activity += int(value) + if ('rx' in key) or ('tx' in key): + net_activity += int(value) + return disk_activity, net_activity + + def is_active(self): + """ + + @param inactivity_timeout: + @return: + """ + self.log.debug("checking for inactivity") + try: + current_disk_activity, current_net_activity = self.get_disk_and_net_activity() + except Exception, e: + saved_exception = e + # Since we can't get disk and net activity we assume + # instance is not active (usually before instance finished + # spawning. + return False + + if (current_disk_activity == self.last_disk_activity) and \ + (current_net_activity < (self.last_net_activity + 4096)): + # if we saw no read or write requests since the last iteration + return False + else: + # if we did see some activity, record it + self.last_disk_activity = current_disk_activity + self.last_net_activity = current_net_activity + return True diff --git a/novaimagebuilder/OSInfo.py b/novaimagebuilder/OSInfo.py new file mode 100644 index 0000000..8220466 --- /dev/null +++ b/novaimagebuilder/OSInfo.py @@ -0,0 +1,233 @@ +# coding=utf-8 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from gi.repository import Libosinfo as osinfo +from gi.repository import Gio + + +class OSInfo(object): + """ + OSInfo offers convenience methods for getting information out of libosinfo + + @param path: Path (str) to the libosinfo data to use. Defaults to /usr/share/libosinfo/db + """ + + def __init__(self, path='/usr/share/libosinfo/db'): + super(OSInfo, self).__init__() + self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + loader = osinfo.Loader() + loader.process_path(path) + self.db = loader.get_db() + + def os_id_for_shortid(self, shortid): + """ + Get the full libosinfo OS id for a given shortid. + + @param shortid: The short form id for an OS record in libosinfo. (Ex. fedora18) + @return: The id for an OS record in libosinfo. (Ex. http://fedoraproject.org/fedora/18) + """ + for an_os in self.db.get_os_list().get_elements(): + if an_os.get_short_id() == shortid: + return an_os.get_id() + + def os_for_shortid(self, shortid): + """ + Given the shortid for an OS, get a dictionary of information about that OS. + + Items in 'media_list' are libosinfo Media objects. Useful methods on these objects include: + get_url() - URL str to the media + get_initrd_path() - Path str to the initrd image within the install tree for Linux installers + get_kernel_path() - Path str to the kernel within the install tree for Linux installers + get_volume_id() - A regular expression for matching the volume ID of an ISO9660 image + get_installer() - Does the media provide an installer for the OS (True or False) + get_installer_redoots() - The number of reboots required to complete an installation + get_live() - Can an OS be booted directly from this media without installation (True or False) + + Items in the 'tree_list' are libosinfo Tree objects. Useful methods on these objects include: + get_url() - URL str to the install tree + get_boot_iso_path() - Path str to the boot image iso in the install tree + get_initrd_path() - Path str to the initrd image within the install tree for Linux trees + get_kernel_path() - Path str to the kernel within the install tree for Linux trees + + Items in the 'minimum_resources' and 'recommended_resources' lists are libosinfo Resources objects. Useful + methods on these objects include: + get_cpu() - The CPU frequency in Hz or -1 if a value is not available + get_n_cpus() - The number of CPUs or -1 if a value is not available + get_ram() - The amount of RAM in bytes or -1 if a value is not available + get_storage() - The amount of storage in bytes or -1 if a value is not available + + Further documentation on the libosinfo API should be found at http://libosinfo.org/api/ + + @param shortid: A str id for an OS such as rhel5 + @return: dict with keys: + name (str) + version (str) + distro (str) + family (str) + shortid (str) + id (str) + media_list (list of libosinfo.Media objects) + tree_list (list of libosinfo.Tree objects) + minimum_resources (list of libosinfo.Resources objects) + recommended_resources (list of libosinfo.Resources objects) + """ + os = self.db.get_os(self.os_id_for_shortid(shortid)) + + if os: + return {'name': os.get_name(), + 'version': os.get_version(), + 'distro': os.get_distro(), + 'family': os.get_family(), + 'shortid': os.get_short_id(), + 'id': os.get_id(), + 'media_list': os.get_media_list().get_elements(), + 'tree_list': os.get_tree_list().get_elements(), + 'minimum_resources': os.get_minimum_resources().get_elements(), + 'recommended_resources': os.get_recommended_resources().get_elements()} + else: + return None + + def os_for_iso(self, iso): + """ + Given an install ISO, get information about the OS. + + *** THIS IS ONLY PARTIALLY IMPLEMENTED, USE AT YOUR OWN RISK *** + + @param iso: URL of an install iso + @return: dict with keys: + name + version + distro + family + shortid + id + media_list + tree_list + minimum_resources + recommended_resources + """ + # TODO: Figure out the correct way to implement / use this method + media = osinfo.Media().create_from_location(iso) + return self.os_for_shortid(media.get_os().get_shortid()) + + def os_for_tree(self, tree): + """ + Given an install tree, get information about the OS. + + *** THIS IS ONLY PARTIALLY IMPLEMENTED, USE AT YOUR OWN RISK *** + + @param tree: URL of an install tree + @return: dict with keys: + name + version + distro + family + shortid + id + media_list + tree_list + minimum_resources + recommended_resources + """ + # TODO: Figure out the correct way to implement / use this method + install_tree = osinfo.Media().create_from_location(tree) + return self.os_for_shortid(install_tree.get_os().get_shortid()) + + def install_script(self, osid, configuration, profile='jeos'): + """ + Get an install script for a given OS. + + @param osid: Either the shortid or id for an OS (str) + @param configuration: A dict of install script customizations with the following keys: + admin_password (required) + arch (required) + license (optional, default: None) + target_disk (optional, default: None) + script_disk (optional, default: None) + preinstall_disk (optional, default: None) + postinstall_disk (optional, default: None) + signed_drivers (optional, default: True) + keyboard (optional, default: 'en_US') + language (optional, default: 'en_US') + timezone (optional, default: 'America/New_York') + @param profile: The profile of the install. (str) 'jeos', 'desktop', etc + @return: install script as a str + """ + if not osid.startswith('http'): + osid = self.os_id_for_shortid(osid) + + os = self.db.get_os(osid) + + if os: + script = None + + # TODO: This seems to be broken. Need to file a bug. + #script = os.find_install_script(profile) + # TODO: remove this once find_install_script() is fixed + script_list = os.get_install_script_list().get_elements() + for a_script in script_list: + if a_script.get_profile() == profile: + script = a_script + + config = osinfo.InstallConfig() + config.set_admin_password(configuration['admin_password']) + config.set_hardware_arch(configuration['arch']) + if configuration.get('license'): + config.set_reg_product_key(configuration['license']) + if configuration.get('target_disk'): + config.set_target_disk(configuration['target_disk']) + if configuration.get('script_disk'): + config.set_script_disk(configuration['script_disk']) + if configuration.get('preinstall_disk'): + config.set_pre_install_drivers_disk(configuration['preinstall_disk']) + if configuration.get('postinstall_disk'): + config.set_post_install_drivers_disk(configuration['postinstall_disk']) + if configuration.get('signed_drivers'): + config.set_driver_signing(configuration['signed_drivers']) + if configuration.get('keyboard'): + config.set_l10n_keyboard(configuration['keyboard']) + if configuration.get('language'): + config.set_l10n_language(configuration['language']) + if configuration.get('timezone'): + config.set_l10n_timezone(configuration['timezone']) + + return script.generate(os, config, Gio.Cancellable()) + + else: + return None + + + def os_ids(self, distros=None): + """ + List the operating systems available from libosinfo. + + @param distros: A dict with keys being distro names and the values being the lowest version to list. + Ex. {'fedora': 17, 'rhel': 5, 'ubuntu':12, 'win':6} + @return: A dict with keys being OS shortid and values being OS name + """ + os_dict = {} + for os in self.db.get_os_list().get_elements(): + if distros: + distro = os.get_distro() + version = int(os.get_version().split('.')[0]) # Just compare major versions, ie 2 instead of 2.2.8 + for a_distro in distros: + if a_distro == distro and version >= distros[a_distro]: + os_dict[os.get_short_id()] = os.get_name() + else: + os_dict[os.get_short_id()] = os.get_name() + + return os_dict \ No newline at end of file diff --git a/novaimagebuilder/RedHatOS.py b/novaimagebuilder/RedHatOS.py new file mode 100644 index 0000000..662ac93 --- /dev/null +++ b/novaimagebuilder/RedHatOS.py @@ -0,0 +1,165 @@ +# encoding: utf-8 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from CacheManager import CacheManager +from BaseOS import BaseOS +from OSInfo import OSInfo + +class RedHatOS(BaseOS): + + def __init__(self, osinfo_dict, install_type, install_media_location, install_config, install_script = None): + super(RedHatOS, self).__init__(osinfo_dict, install_type, install_media_location, install_config, install_script) + + #TODO: Check for direct boot - for now we are using environments + # where we know it is present + #if not self.env.is_direct_boot(): + # raise Exception("Direct Boot feature required - Installs using syslinux stub not yet implemented") + + if install_type == "iso" and not self.env.is_cdrom(): + raise Exception("ISO installs require a Nova environment that can \ + support CDROM block device mapping") + if not install_script: + info = OSInfo() + install_script_string = info.install_script(self.osinfo_dict['shortid'], self.install_config) + install_script_string = install_script_string.replace('reboot','poweroff') + if self.install_type == 'tree': + install_script_string = install_script_string.replace('cdrom','') + if self.install_media_location: + url = self.install_media_location + else: + url = self.osinfo_dict['tree_list'][0].get_url() + + self.install_script = "url --url=%s\n%s" % (url, + install_script_string) + else: + self.install_script = install_script_string + + + def prepare_install_instance(self): + """ Method to prepare all necessary local and remote images for an + install. This method may require significant local disk or CPU + resource. + """ + + self.cmdline = "ks=http://169.254.169.254/latest/user-data" + + #If direct boot option is available, prepare kernel and ramdisk + if self.env.is_direct_boot(): + if self.install_type == "iso": + iso_locations = self.cache.retrieve_and_cache_object( + "install-iso", self, self.install_media_location, True) + self.iso_volume = iso_locations['cinder'] + self.iso_aki = self.cache.retrieve_and_cache_object( + "install-iso-kernel", self, None, True)['glance'] + self.iso_ari = self.cache.retrieve_and_cache_object( + "install-iso-initrd", self, None, True)['glance'] + self.log.debug ("Prepared cinder iso (%s), aki (%s) and ari \ + (%s) for install instance" % (self.iso_volume, + self.iso_aki, self.iso_ari)) + if self.install_type == "tree": + kernel_location = "%s%s" % (self.install_media_location, + self.url_content_dict()["install-url-kernel"]) + ramdisk_location = "%s%s" % (self.install_media_location, + self.url_content_dict()["install-url-initrd"]) + self.tree_aki = self.cache.retrieve_and_cache_object( + "install-url-kernel", self, kernel_location, + True)['glance'] + self.tree_ari = self.cache.retrieve_and_cache_object( + "install-url-kernel", self, ramdisk_location, + True)['glance'] + self.log.debug ("Prepared cinder aki (%s) and ari (%s) for \ + install instance" % (self.iso_volume, self.iso_aki, + self.iso_ari)) + + #Else, download kernel and ramdisk and prepare syslinux image with the two + else: + if self.install_type == "iso": + iso_locations = self.cache.retrieve_and_cache_object( + "install-iso", self, self.install_media_location, True) + self.iso_volume = iso_locations['cinder'] + self.iso_aki = self.cache.retrieve_and_cache_object( + "install-iso-kernel", self, None, True)['local'] + self.iso_ari = self.cache.retrieve_and_cache_object( + "install-iso-initrd", self, None, True)['local'] + self.boot_disk_id = self.syslinux.create_syslinux_stub( + "%s syslinux" % self.os_ver_arch(), self.cmdline, + self.iso_aki, self.iso_ari) + self.log.debug("Prepared syslinux image by extracting kernel \ + and ramdisk from ISO") + + if self.install_type == "tree": + kernel_location = "%s%s" % (self.install_media_location, + self.url_content_dict()["install-url-kernel"]) + ramdisk_location = "%s%s" % (self.install_media_location, + self.url_content_dict()["install-url-initrd"]) + self.url_aki = self.cache.retrieve_and_cache_object( + "install-url-kernel", self, kernel_location, + True)['local'] + self.url_ari = self.cache.retrieve_and_cache_object( + "install-url-initrd", self, ramdisk_location, + True)['local'] + self.boot_disk_id = self.syslinux.create_syslinux_stub( + "%s syslinux" % self.os_ver_arch(), self.cmdline, + self.url_aki, self.url_ari) + self.log.debug("Prepared syslinux image by extracting kernel \ + and ramdisk from ISO") + + + def start_install_instance(self): + if self.env.is_direct_boot(): + self.log.debug("Launching direct boot ISO install instance") + if self.install_type == "iso": + self.install_instance = self.env.launch_instance( + root_disk=('blank', 10), + install_iso=('cinder', self.iso_volume), + aki=self.iso_aki, ari=self.iso_ari, + cmdline=self.cmdline, userdata=self.install_script) + + if self.install_type == "tree": + self.install_instance = self.env.launch_instance( + root_disk=('blank', 10), aki=self.iso_aki, + ari=self.iso_ari, cmdline=self.cmdline, + userdata=self.install_script) + + else: + if self.install_type == "tree": + self.log.debug("Launching syslinux install instance") + self.install_instance = self.env.launch_instance(root_disk=( + 'glance', self.boot_disk_id), userdata=self.install_script) + + if self.install_type == "iso": + self.install_instance = self.env.launch_instance(root_disk=( + 'glance', self.boot_disk_id), install_iso=('cinder', + self.iso_volume), userdata=self.install_script) + + def update_status(self): + return "RUNNING" + + def wants_iso_content(self): + return True + + def iso_content_dict(self): + return { "install-iso-kernel": "/images/pxeboot/vmlinuz", + "install-iso-initrd": "/images/pxeboot/initrd.img" } + + def url_content_dict(self): + return { "install-url-kernel": "/images/pxeboot/vmlinuz", + "install-url-initrd": "/images/pxeboot/initrd.img" } + + def abort(self): + pass + + def cleanup(self): + pass diff --git a/imagebuilder/api/controllers/__init__.py b/novaimagebuilder/Singleton.py similarity index 52% rename from imagebuilder/api/controllers/__init__.py rename to novaimagebuilder/Singleton.py index 3f9fdd9..cc0e576 100644 --- a/imagebuilder/api/controllers/__init__.py +++ b/novaimagebuilder/Singleton.py @@ -1,5 +1,4 @@ -# -# Copyright 2013 Red Hat, Inc. +# Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,10 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. +class Singleton(object): + _instance = None -from pecan.rest import RestController -from osib import OSIB + def __new__(cls, *args, **kwargs): + if cls._instance is None: + instance = super(Singleton, cls).__new__(cls) + instance._singleton_init(*args, **kwargs) + cls._instance = instance + return cls._instance + def __init__(self, *args, **kwargs): + pass -class RootController(RestController): - osib = OSIB() \ No newline at end of file + def _singleton_init(self, *args, **kwargs): + """Initialize a singleton instance before it is registered.""" + pass diff --git a/novaimagebuilder/StackEnvironment.py b/novaimagebuilder/StackEnvironment.py new file mode 100644 index 0000000..5551cc1 --- /dev/null +++ b/novaimagebuilder/StackEnvironment.py @@ -0,0 +1,502 @@ +# encoding: utf-8 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from keystoneclient.v2_0 import client as keystone_client +from novaclient.v1_1 import client as nova_client +from glanceclient import client as glance_client +from cinderclient import client as cinder_client +from Singleton import Singleton +from time import sleep +from novaclient.v1_1.contrib.list_extensions import ListExtManager +import os +from NovaInstance import NovaInstance +import logging + + +class StackEnvironment(Singleton): + + """ + StackEnvironment + """ + + def _singleton_init(self): + super(StackEnvironment, self)._singleton_init() + self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + # We want the following environment variables set: OS_USERNAME, OS_PASSWORD, OS_TENANT, OS_AUTH_URL + try: + username = os.environ['OS_USERNAME'] + password = os.environ['OS_PASSWORD'] + tenant = os.environ['OS_TENANT_NAME'] + auth_url = os.environ['OS_AUTH_URL'] + except Exception, e: + raise Exception("Unable to retrieve auth info from environment \ + variables. exception: %s" % e.message) + + try: + self.keystone = keystone_client.Client(username=username, + password=password, tenant_name=tenant, auth_url=auth_url) + self.keystone.authenticate() + except Exception, e: + raise Exception('Error authenticating with keystone. Original \ + exception: %s' % e.message) + try: + self.nova = nova_client.Client(username, password, tenant, + auth_url=auth_url, insecure=True) + except Exception, e: + raise Exception('Error connecting to Nova. Nova is required for \ + building images. Original exception: %s' % e.message) + try: + glance_url = self.keystone.service_catalog.get_endpoints()['image'][0]['adminURL'] + self.glance = glance_client.Client('1', endpoint=glance_url, + token=self.keystone.auth_token) + except Exception, e: + raise Exception('Error connecting to glance. Glance is required for\ + building images. Original exception: %s' % e.message) + + try: + self.cinder = cinder_client.Client('1', username, password, tenant, + auth_url) + except: + self.cinder = None + + @property + def keystone_server(self): + """ + + + @return: keystone client + """ + return self.keystone + + @property + def glance_server(self): + """ + + + @return: glance client + """ + return self.glance + + @property + def cinder_server(self): + """ + + + @return: cinder client or None + """ + return self.cinder + + def upload_image_to_glance(self, name, local_path=None, location=None, format='raw', min_disk=0, min_ram=0, + container_format='bare', is_public=True, properties={}): + """ + + @param name: human readable name for image in glance + @param local_path: path to an image file + @param location: URL for image file + @param format: 'raw', 'vhd', 'vmdk', 'vdi', 'iso', 'qcow2', 'aki', + 'ari', 'ami' + @param min_disk: integer of minimum disk size in GB that a nova instance + needs to launch using this image + @param min_ram: integer of minimum amount of RAM in GB that a nova + instance needs to launch using this image + @param container_format: currently not used by OpenStack components, so + 'bare' is a good default + @param is_public: boolean to mark an image as being publically + available + @param properties: dictionary where keys are property names such as + ramdisk_id and kernel_id and values are the property values + @return: glance image id @raise Exception: + """ + image_meta = {'container_format': container_format, 'disk_format': + format, 'is_public': is_public, 'min_disk': min_disk, 'min_ram': + min_ram, 'name': name, 'properties': properties} + try: + image_meta['data'] = open(local_path, "r") + except Exception, e: + if location: + image_meta['location'] = location + else: + raise e + + image = self.glance.images.create(name=name) + self.log.debug("Started uploading to Glance") + image.update(**image_meta) + while image.status != 'active': + image = self.glance.images.get(image.id) + if image.status == 'error': + raise Exception('Error uploading image to Glance.') + sleep(1) + self.log.debug("Finished uploading to Glance") + return image.id + + def upload_volume_to_cinder(self, name, volume_size=None, local_path=None, + location=None, format='raw', container_format='bare', + is_public=True, keep_image=True): + """ + + @param name: human readable name for volume in cinder + @param volume_size: integer size in GB of volume + @param local_path: path to an image file + @param location: URL to an image file + @param format: 'raw', 'vhd', 'vmdk', 'vdi', 'iso', 'qcow2', 'aki', + 'ari', 'ami' + @param container_format: currently not used by OpenStack components, so + 'bare' is a good default + @param is_public: boolean to mark an image as being publically + available + @param keep_image: currently not implemented + @return: tuple (glance image id, cinder volume id) + """ + image_id = self.upload_image_to_glance(name, local_path=local_path, + location=location, format=format, is_public=is_public) + volume_id = self._migrate_from_glance_to_cinder(image_id, volume_size) + if not keep_image: + #TODO: spawn a thread to delete image after volume is created + return volume_id + return (image_id, volume_id) + + def create_volume_from_image(self, image_id, volume_size=None): + """ + + @param image_id: uuid of glance image + @param volume_size: integer size in GB of volume to be created + @return: cinder volume id + """ + return self._migrate_from_glance_to_cinder(image_id, volume_size) + + def delete_image(self, image_id): + """ + + @param image_id: glance image id + """ + self.glance.images.get(image_id).delete() + + def delete_volume(self, volume_id): + """ + + @param volume_id: cinder volume id + """ + self.cinder.volumes.get(volume_id).delete() + + def _migrate_from_glance_to_cinder(self, image_id, volume_size): + image = self.glance.images.get(image_id) + if not volume_size: + # Gigabytes rounded up + volume_size = int(image.size/(1024*1024*1024)+1) + + self.log.debug("Started copying to Cinder") + volume = self.cinder.volumes.create(volume_size, + display_name=image.name, imageRef=image.id) + while volume.status != 'available': + volume = self.cinder.volumes.get(volume.id) + if volume.status == 'error': + volume.delete() + raise Exception('Error occured copying glance image %s to \ + volume %s' % (image_id, volume.id)) + sleep(1) + self.log.debug("Finished copying to Cinder") + return volume.id + + def get_volume_status(self, volume_id): + """ + + @param volume_id: cinder volume id + @return: 'active', 'error', 'saving', 'deleted' (possibly more states + exist, but dkliban could not find documentation where they are all + listed) + """ + volume = self.cinder.volumes.get(volume_id) + return volume.status + + def get_image_status(self, image_id): + """ + + @param image_id: glance image id + @return: 'queued', 'saving', 'active', 'killed', 'deleted', or + 'pending_delete' + """ + image = self.glance.images.get(image_id) + return image.status + + def _create_blank_image(self, size): + rc = os.system("qemu-img create -f qcow2 blank_image.tmp %dG" % size) + if rc == 0: + return + else: + raise Exception("Unable to create blank image") + + + def _remove_blank_image(self): + rc = os.system("rm blank_image.tmp") + if rc == 0: + return + else: + raise Exception("Unable to create blank image") + + def launch_instance(self, root_disk=None, install_iso=None, + secondary_iso=None, floppy=None, aki=None, ari=None, cmdline=None, + userdata=None): + """ + + @param root_disk: tuple where first element is 'blank', 'cinder', or + 'glance' and second element is size, or cinder volume id, or glance + image id. + @param install_iso: install media represented by tuple where first + element is 'cinder' or 'glance' and second element is cinder volume id + or glance image id. + @param secondary_iso: media containing extra drivers represented by + tuple where first element is 'cinder' or 'glance' and second element is + cinder volume id or glance image id. + @param floppy: media to be mounted as a floppy represented by tuple + where first element is 'cinder' or 'glance' and second element is + cinder volume id or glance image id. + @param aki: glance image id for kernel + @param ari: glance image id for ramdisk + @param cmdline: string command line argument for anaconda + @param userdata: string containing kickstart file or preseed file + @return: NovaInstance launched @raise Exception: + """ + if root_disk: + #if root disk needs to be created + if root_disk[0] == 'blank': + root_disk_size = root_disk[1] + #Create a blank qcow2 image and uploads it + self._create_blank_image(root_disk_size) + if aki and ari and cmdline: + root_disk_properties = {'kernel_id': aki, + 'ramdisk_id': ari, 'command_line': cmdline} + else: + root_disk_properties = {} + root_disk_image_id = self.upload_image_to_glance( + 'blank %dG disk' % root_disk_size, + local_path='./blank_image.tmp', format='qcow2', + properties=root_disk_properties) + self._remove_blank_image() + elif root_disk[0] == 'glance': + root_disk_image_id = root_disk[1] + else: + raise Exception("Boot disk must be of type 'blank' or 'glance'") + + if install_iso: + if install_iso[0] == 'cinder': + install_iso_id = install_iso[1] + elif install_iso[0] == 'glance': + install_iso_id = self.create_volume_from_image(install_iso[1]) + else: + raise Exception("Install ISO must be of type 'cinder' or \ + 'glance'") + if secondary_iso: + if secondary_iso[0] == 'cinder': + secondary_iso_id = secondary_iso[1] + elif secondary_iso[0] == 'glance': + secondary_iso_id = self.create_volume_from_image(secondary_iso_id) + else: + raise Exception("Secondary ISO must be of type 'cinder' or\ + 'glance'") + if floppy: + if floppy[0] == 'cinder': + floppy_id = floppy[1] + elif floppy[0] == 'glance': + floppy_id = self.create_volume_from_image(floppy[1]) + else: + raise Exception("Floppy must be of type 'cinder' or 'glance'") + + # if direct boot is not available (Havana): + if not self.is_direct_boot(): + instance = None + # 0 crdom drives are needed + if not install_iso and not secondary_iso and not floppy: + instance = self._launch_network_install(root_disk_image_id, + userdata) + # 1 cdrom drive is needed + elif install_iso and not secondary_iso and not floppy: + instance = self._launch_single_cdrom_install(root_disk_image_id, + userdata, install_iso_id) + # 2 cdrom drives are needed + elif install_iso and secondary_iso and not floppy: + instance = self._launch_instance_with_dual_cdrom(root_disk_image_id, + install_iso_id, secondary_iso_id) + if instance: + return NovaInstance(instance, self) + + #blank root disk with ISO, ISO2 and Floppy - Windows + if install_iso and secondary_iso and floppy: + + instance = self._launch_windows_install(root_disk_image_id, + install_iso_id, secondary_iso_id, floppy_id) + return NovaInstance(instance, self) + + #blank root disk with aki, ari and cmdline. install iso is optional. + if aki and ari and cmdline and userdata: + + instance = self._launch_direct_boot(root_disk_image_id, userdata, + install_iso=install_iso_id) + return NovaInstance(instance, self) + + def _launch_network_install(self, root_disk, userdata): + #TODO: check the kickstart file in userdata for sanity + self.log.debug("Starting instance for network install") + image = self.glance.images.get(root_disk) + instance = self.nova.servers.create("Install from network", image, "2", + userdata=userdata) + return instance + + def _launch_single_cdrom_install(self, root_disk, userdata, install_iso): + image = self.glance.images.get(root_disk) + self.log.debug("Starting instance for single cdrom install") + if install_iso: + if self.is_cdrom(): + block_device_mapping_v2 = [ + {"source_type": "volume", + "destination_type": "volume", + "uuid": install_iso, + "boot_index": "1", + "device_type": "cdrom", + "disk_bus": "ide", + }, + ] + instance = self.nova.servers.create("Install with single cdrom", + image, "2", + block_device_mapping_v2=block_device_mapping_v2, + userdata=userdata) + return instance + else: + #TODO: use BDM mappings from grizzly to launch instance + pass + else: + raise Exception("Install ISO image id is required for single cdrom\ + drive installations.") + + def _launch_instance_with_dual_cdrom(self, root_disk, install_iso, + secondary_iso): + + block_device_mapping_v2 = [ + {"source_type": "volume", + "destination_type": "volume", + "uuid": install_iso, + "boot_index": "1", + "device_type": "cdrom", + "disk_bus": "ide", + }, + {"source_type": "volume", + "destination_type": "volume", + "uuid": secondary_iso, + "boot_index": "2", + "device_type": "cdrom", + "disk_bus": "ide", + }, + ] + + image = self.glance.images.get(root_disk) + instance = self.nova.servers.create("Install with dual cdroms", image, "2", + meta={}, block_device_mapping_v2=block_device_mapping_v2) + return instance + + def _launch_direct_boot(self, root_disk, userdata, install_iso=None): + image = self.glance.images.get(root_disk) + if install_iso: + #assume that install iso is already a cinder volume + block_device_mapping_v2 = [ + {"source_type": "volume", + "destination_type": "volume", + "uuid": install_iso, + "boot_index": "1", + "device_type": "cdrom", + "disk_bus": "ide", + }, + ] + else: + #must be a network install + block_device_mapping_v2 = None + instance = self.nova.servers.create("direct-boot-linux", image, "2", + block_device_mapping_v2=block_device_mapping_v2, + userdata=userdata) + return instance + + def _launch_windows_install(self, root_disk, install_cdrom, drivers_cdrom, + autounattend_floppy): + + block_device_mapping_v2 = [ + {"source_type": "volume", + "destination_type": "volume", + "uuid": install_cdrom, + "boot_index": "1", + "device_type": "cdrom", + "disk_bus": "ide", + }, + {"source_type": "volume", + "destination_type": "volume", + "uuid": drivers_cdrom, + "boot_index": "3", + "device_type": "cdrom", + "disk_bus": "ide", + }, + {"source_type": "volume", + "destination_type": "volume", + "uuid": autounattend_floppy, + "boot_index": "2", + "device_type": "floppy", + }, + ] + + image = self.glance.images.get(root_disk) + instance = self.nova.servers.create("windows-volume-backed", image, "2", + meta={}, block_device_mapping_v2=block_device_mapping_v2) + return instance + + def is_cinder(self): + """ + Checks if cinder is available. + + @return: True if cinder service is available + """ + if not self.cinder: + return False + else: + return True + + def is_cdrom(self): + """ + Checks if nova allows mapping a volume as cdrom drive. + This is only available starting with Havana + + @return: True if volume can be attached as cdrom + """ + nova_extension_manager = ListExtManager(self.nova) + for ext in nova_extension_manager.show_all(): + if ext.name == "VolumeAttachmentUpdate" and ext.is_loaded(): + return True + return False + + def is_floppy(self): + #TODO: check if floppy is available. + """ + Checks if nova allows mapping a volume as a floppy drive. + This will not be available until Icehouse + + @return: Currently this always returns True. + """ + return False + + def is_direct_boot(self): + #TODO: check if direct boot is available + """ + Checks if nova allows booting an instance with a command line argument + This will not be available until Icehouse + + @return: Currently this always returns False + """ + return False diff --git a/novaimagebuilder/SyslinuxHelper.py b/novaimagebuilder/SyslinuxHelper.py new file mode 100644 index 0000000..b38ba32 --- /dev/null +++ b/novaimagebuilder/SyslinuxHelper.py @@ -0,0 +1,181 @@ +# coding=utf-8 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from tempfile import NamedTemporaryFile, TemporaryFile, mkdtemp +import guestfs +import shutil +import os +import subprocess +from StackEnvironment import StackEnvironment + +class SyslinuxHelper: + + def __init__(self): + self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + self.env = StackEnvironment() + + def create_syslinux_stub(self, image_name, cmdline, kernel_filename, ramdisk_filename): + """ + + @param cmdline: kernel command line + @param kernel_filename: path to kernel file + @param ramdisk_filename: path to ramdisk file + @return glance image id + """ + + raw_fs_image = NamedTemporaryFile(delete=False) + raw_image_name = raw_fs_image.name + tmp_content_dir = None + glance_image_id = None + try: + qcow2_image_name = "%s.qcow2" % raw_image_name + + # 200 MB sparse file + self.log.debug("Creating sparse 200 MB file") + outsize = 1024 * 1024 * 200 + raw_fs_image.truncate(outsize) + raw_fs_image.close() + + # Partition, format and add DOS MBR + g = guestfs.GuestFS() + g.add_drive(raw_image_name) + g.launch() + g.part_disk("/dev/sda","msdos") + g.part_set_mbr_id("/dev/sda",1,0xb) + g.mkfs("vfat", "/dev/sda1") + g.part_set_bootable("/dev/sda", 1, 1) + dosmbr = open("/usr/share/syslinux/mbr.bin").read() + ws = g.pwrite_device("/dev/sda", dosmbr, 0) + if ws != len(dosmbr): + raise Exception("Failed to write entire MBR") + # Install syslinux + g.syslinux("/dev/sda1") + + #Insert kernel, ramdisk and syslinux.cfg file + tmp_content_dir = mkdtemp() + + kernel_dest = os.path.join(tmp_content_dir,"vmlinuz") + shutil.copy(kernel_filename, kernel_dest) + + initrd_dest = os.path.join(tmp_content_dir,"initrd.img") + shutil.copy(ramdisk_filename, initrd_dest) + + syslinux_conf="""default customhd + timeout 30 + prompt 1 + label customhd + kernel vmlinuz + append initrd=initrd.img %s + """ % (cmdline) + + f = open(os.path.join(tmp_content_dir, "syslinux.cfg"),"w") + f.write(syslinux_conf) + f.close() + + # copy the tmp content to the image + g.mount_options ("", "/dev/sda1", "/") + for filename in os.listdir(tmp_content_dir): + g.upload(os.path.join(tmp_content_dir,filename),"/" + filename) + g.sync() + g.close() + try: + self.log.debug("Converting syslinux stub image from raw to qcow2") + self._subprocess_check_output(["qemu-img","convert","-c","-O","qcow2",raw_image_name, qcow2_image_name]) + self.log.debug("Uploading syslinux qcow2 image to glance") + glance_image_id = self.env.upload_image_to_glance(image_name, local_path=qcow2_image_name, format='qcow2') + except Exception, e: + self.log.debug("Exception while converting syslinux image to qcow2: %s" % e) + self.log.debug("Uploading syslinux raw image to glance.") + glance_image_id = self.env.upload_image_to_glance(image_name, local_path=raw_image_name, format='raw') + + finally: + self.log.debug("Removing temporary file.") + if os.path.exists(raw_image_name): + os.remove(raw_image_name) + if os.path.exists(qcow2_image_name): + os.remove(qcow2_image_name) + if tmp_content_dir: + shutil.rmtree(tmp_content_dir) + + return glance_image_id + + ### Utility functions borrowed from Oz and lightly modified + def _executable_exists(self, program): + """ + Function to find out whether an executable exists in the PATH + of the user. If so, the absolute path to the executable is returned. + If not, an exception is raised. + """ + def is_exe(fpath): + """ + Helper method to check if a file exists and is executable + """ + return os.path.exists(fpath) and os.access(fpath, os.X_OK) + + if program is None: + raise Exception("Invalid program name passed") + + fpath, fname = os.path.split(program) + if fpath: + if is_exe(program): + return program + else: + for path in os.environ["PATH"].split(os.pathsep): + exe_file = os.path.join(path, program) + if is_exe(exe_file): + return exe_file + + raise Exception("Could not find %s" % (program)) + + + def _subprocess_check_output(self, *popenargs, **kwargs): + """ + Function to call a subprocess and gather the output. + Addresses a lack of check_output() prior to Python 2.7 + """ + if 'stdout' in kwargs: + raise ValueError('stdout argument not allowed, it will be overridden.') + if 'stderr' in kwargs: + raise ValueError('stderr argument not allowed, it will be overridden.') + + self._executable_exists(popenargs[0][0]) + + # NOTE: it is very, very important that we use temporary files for + # collecting stdout and stderr here. There is a nasty bug in python + # subprocess; if your process produces more than 64k of data on an fd that + # is using subprocess.PIPE, the whole thing will hang. To avoid this, we + # use temporary fds to capture the data + stdouttmp = TemporaryFile() + stderrtmp = TemporaryFile() + + process = subprocess.Popen(stdout=stdouttmp, stderr=stderrtmp, *popenargs, + **kwargs) + process.communicate() + retcode = process.poll() + + stdouttmp.seek(0, 0) + stdout = stdouttmp.read() + stdouttmp.close() + + stderrtmp.seek(0, 0) + stderr = stderrtmp.read() + stderrtmp.close() + + if retcode: + cmd = ' '.join(*popenargs) + raise Exception("'%s' failed(%d): %s" % (cmd, retcode, stderr), retcode) + return (stdout, stderr, retcode) diff --git a/novaimagebuilder/WindowsOS.py b/novaimagebuilder/WindowsOS.py new file mode 100644 index 0000000..a86fdfa --- /dev/null +++ b/novaimagebuilder/WindowsOS.py @@ -0,0 +1,141 @@ +# encoding: utf-8 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import guestfs +import uuid +from CacheManager import CacheManager +from ISOHelper import ISOHelper +from BaseOS import BaseOS +from tempfile import NamedTemporaryFile +from shutil import copyfile +from os import remove + +class WindowsOS(BaseOS): + + BLANK_FLOPPY = "/usr/share/novaimagebuilder/disk.img" + + def __init__(self, osinfo_dict, install_type, install_media_location, install_config, install_script = None): + super(WindowsOS, self).__init__(osinfo_dict, install_type, install_media_location, install_config, install_script) + + #TODO: Check for direct boot - for now we are using environments + # where we know it is present + #if not self.env.is_direct_boot(): + # raise Exception("Direct Boot feature required - Installs using syslinux stub not yet implemented") + + if install_type != "iso": + raise Exception("Only ISO installs supported for Windows installs") + + if not self.env.is_cdrom(): + raise Exception("ISO installs require a Nova environment that can support CDROM block device mapping") + + + # TODO: Remove these + self.install_artifacts = [ ] + + + def prepare_install_instance(self): + """ Method to prepare all necessary local and remote images for an install + This method may require significant local disk or CPU resource + """ + # These must be created and cached beforehand + # TODO: Automate + driver_locations = self.cache.retrieve_and_cache_object("driver-iso", self, None, True) + self.driver_iso_volume = driver_locations['cinder'] + iso_locations = self.cache.retrieve_and_cache_object("install-iso", + self, self.install_media_location, True) + if self.env.is_floppy(): + self.iso_volume = iso_locations['cinder'] + self._prepare_floppy() + self.log.debug ("Prepared cinder iso (%s), driver_iso (%s) and\ + floppy (%s) for install instance" % (self.iso_volume, + self.driver_iso_volume, self.floppy_volume)) + else: + self._respin_iso(iso_locations['local'], "x86_64") + self.iso_volume_delete = True + + + def start_install_instance(self): + if self.install_type == "iso": + self.log.debug("Launching windows install instance") + if self.env.is_floppy(): + self.install_instance = self.env.launch_instance(root_disk=('blank', 10), + install_iso=('cinder', self.iso_volume), + secondary_iso=('cinder',self.driver_iso_volume), + floppy=('cinder',self.floppy_volume)) + else: + self.install_instance = self.env.launch_instance(root_disk=('blank', 10), install_iso=('cinder', self.iso_volume), secondary_iso=('cinder', self.driver_iso_volume)) + + def _respin_iso(self, iso_path, arch): + try: + new_install_iso = NamedTemporaryFile(delete=False) + new_install_iso_name = new_install_iso.name + new_install_iso.close() + ih = ISOHelper(iso_path, arch) + ih._copy_iso() + ih._install_script_win_v6(self.install_script.name) + ih._generate_new_iso_win_v6(new_install_iso_name) + image_name = "install-iso-%s-%s" % (self.osinfo_dict['shortid'], + str(uuid.uuid4())[:8]) + self.iso_volume = self.env.upload_volume_to_cinder(image_name, + local_path=new_install_iso_name, keep_image=False) + finally: + if new_install_iso_name: + remove(new_install_iso_name) + + def _prepare_floppy(self): + self.log.debug("Preparing floppy with autounattend.xml") + unattend_floppy_name = None + unattend_file = None + try: + # Use tempfile to get a known unique temporary location for floppy image copy + unattend_floppy = NamedTemporaryFile(delete=False) + unattend_floppy_name = unattend_floppy.name + unattend_floppy.close() + copyfile(self.BLANK_FLOPPY, unattend_floppy_name) + # Create a real file copy of the unattend content for use by guestfs + unattend_file = NamedTemporaryFile() + unattend_file.write(self.install_script.read()) + unattend_file.flush() + # Copy unattend into floppy via guestfs + g = guestfs.GuestFS() + g.add_drive(unattend_floppy_name) + g.launch() + g.mount_options ("", "/dev/sda", "/") + g.upload(unattend_file.name,"/autounattend.xml") + shutdown_result = g.shutdown() + g.close() + # Upload it to glance and copy to cinder + # Unique-ish name + image_name = "unattend-floppy-%s-%s" % ( self.osinfo_dict['shortid'], str(uuid.uuid4())[:8] ) + self.floppy_volume = self.env.upload_volume_to_cinder(image_name, local_path=unattend_floppy_name, keep_image = False) + self.install_artifacts.append( ('cinder', self.floppy_volume ) ) + finally: + if unattend_floppy_name: + remove(unattend_floppy_name) + if unattend_file: + unattend_file.close() + + def update_status(self): + return "RUNNING" + + def wants_iso_content(self): + return False + + def abort(self): + pass + + def cleanup(self): + # TODO: Remove self.install_artifacts + pass diff --git a/novaimagebuilder/__init__.py b/novaimagebuilder/__init__.py new file mode 100644 index 0000000..b73d93e --- /dev/null +++ b/novaimagebuilder/__init__.py @@ -0,0 +1 @@ +__author__ = 'sloranz' diff --git a/openstack-common.conf b/openstack-common.conf deleted file mode 100644 index 3a31982..0000000 --- a/openstack-common.conf +++ /dev/null @@ -1,4 +0,0 @@ -[DEFAULT] -module=log -base=imagebuilder - diff --git a/tests/MockCacheManager.py b/tests/MockCacheManager.py new file mode 100644 index 0000000..931be40 --- /dev/null +++ b/tests/MockCacheManager.py @@ -0,0 +1,155 @@ +# encoding: utf-8 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import os.path +import uuid +import json +from novaimagebuilder.Singleton import Singleton + + +class MockCacheManager(Singleton): + """ + Mock implementation of CacheManager for unit testing. + + * To test against locked or unlocked state, set the attribute 'locked' to True or False. + + * To test with a populated index, set the attribute 'index' to a populated dict. + """ + + CACHE_ROOT = "/tmp/MockCacheManager/" + + def _singleton_init(self, *args, **kwargs): + self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + self.index = {} + self.inedx_update = {} + self.locked = False + + if not os.path.exists(self.CACHE_ROOT): + os.mkdir(self.CACHE_ROOT) + + def lock_and_get_index(self): + """ + Sets the 'locked' attribute to True. + + """ + if self.locked: + pass # Should be throwing an exception + else: + self.locked = True + + def write_index_and_unlock(self): + """ + Updates the 'index' dict with whatever is in 'index_update' and sets 'locked' to False. + + """ + if self.locked: + if len(self.index_update) > 0: + self.index.update(self.index_update) + self.index_update = {} + self.locked = False + else: + pass # Should throw an exception telling user to lock first + + def unlock_index(self): + """ + Sets 'index_update' to an empty dict and sets 'locked' to False. + + """ + self.index_update = {} + self.locked = False + + def retrieve_and_cache_object(self, object_type, os_plugin, source_url, save_local): + """ + Writes out a mock cache file to '/tmp/MockCacheManager' with the same naming convention used by + CacheManager. + + @param object_type: A string indicating the type of object being retrieved + @param os_plugin: Instance of the delegate for the OS associated with the download + @param source_url: Location from which to retrieve the object/file + @param save_local: bool indicating whether a local copy of the object should be saved + @return: dict containing the various cached locations of the file + local: Local path to file (contents are this dict) + glance: Glance object UUID (does not correlate to a real Glance object) + cinder: Cinder object UUID (dose not correlate to a real Cinder object) + """ + self.lock_and_get_index() + existing_cache = self._get_index_value(os_plugin.os_ver_arch(), object_type, None) + if existing_cache: + self.log.debug("Found object in cache") + self.unlock_index() + return existing_cache + + self.unlock_index() + self.log.debug("Object not in cache") + + object_name = os_plugin.os_ver_arch() + "-" + object_type + local_object_filename = self.CACHE_ROOT + object_name + locations = {"local": local_object_filename, "glance": str(uuid.uuid4()), "cinder": str(uuid.uuid4())} + + if not os.path.isfile(local_object_filename): + object_file = open(local_object_filename, 'w') + json.dump(locations, object_file) + object_file.close() + else: + self.log.warning("Local file (%s) is already present - assuming it is valid" % local_object_filename) + + self._do_index_updates(os_plugin.os_ver_arch(), object_type, locations) + return locations + + def _get_index_value(self, os_ver_arch, name, location): + if self.index is None: + raise Exception("Attempt made to read index values while a locked index is not present") + + if not os_ver_arch in self.index: + return None + + if not name in self.index[os_ver_arch]: + return None + + # If the specific location is not requested, return the whole location dict + if not location: + return self.index[os_ver_arch][name] + + if not location in self.index[os_ver_arch][name]: + return None + else: + return self.index[os_ver_arch][name][location] + + def _set_index_value(self, os_ver_arch, name, location, value): + if self.index is None: + raise Exception("Attempt made to read index values while a locked index is not present") + + if not os_ver_arch in self.index: + self.index_update[os_ver_arch] = {} + + if not name in self.index[os_ver_arch]: + self.index_update[os_ver_arch][name] = {} + + # If the specific location is not specified, assume value is the entire dict + if not location: + if type(value) is not dict: + raise Exception("When setting a value without a location, the value must be a dict") + self.index_update[os_ver_arch][name] = value + return + + self.index[os_ver_arch][name][location] = value + + def _do_index_updates(self, os_ver_arch, object_type, locations): + self.lock_and_get_index() + self._set_index_value(os_ver_arch, object_type, None, locations ) + self.write_index_and_unlock() \ No newline at end of file diff --git a/tests/MockNovaInstance.py b/tests/MockNovaInstance.py new file mode 100644 index 0000000..7be1820 --- /dev/null +++ b/tests/MockNovaInstance.py @@ -0,0 +1,47 @@ +# encoding: utf-8 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import uuid + + +class MockNovaInstance(object): + + INSTANCE_STATUS_LIST = ('status placeholder') + + def __init__(self, instance, stack_env): + self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + self.last_disk_activity = 0 + self.last_net_activity = 0 + self.instance = instance + self.instance_id = str(uuid.uuid4()) + self.instance_status_index = 0 + self.stack_env = stack_env + self.active = True + + @property + def id(self): + return self.instance_id + + @property + def status(self): + return self.INSTANCE_STATUS_LIST[self.instance_status_index] + + def get_disk_and_net_activity(self): + return self.last_disk_activity, self.last_net_activity + + def is_active(self): + return self.active \ No newline at end of file diff --git a/tests/MockOS.py b/tests/MockOS.py new file mode 100644 index 0000000..58db66c --- /dev/null +++ b/tests/MockOS.py @@ -0,0 +1,62 @@ +# coding=utf-8 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from MockStackEnvironment import MockStackEnvironment +from MockCacheManager import MockCacheManager + + +class MockOS(object): + def __init__(self, osinfo_dict, install_type, install_media_location, install_config, install_script=None): + self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + self.status = 'RUNNING' # Possible return values: INPROGRESS, FAILED, COMPLETE + self.env = MockStackEnvironment() + self.cache = MockCacheManager() + self.osinfo_dict = osinfo_dict + self.install_type = install_type + self.install_media_location = install_media_location + self.install_config = install_config + self.install_script = install_script + self.iso_content_flag = False + self.iso_content_dict = {} + self.url_content_dict = {} + + def os_ver_arch(self): + return self.osinfo_dict['shortid'] + "-" + self.install_config['arch'] + + def prepare_install_instance(self): + pass + + def start_install_instance(self): + pass + + def update_status(self): + return self.status + + def wants_iso_content(self): + return self.iso_content_flag + + def iso_content_dict(self): + return self.iso_content_dict + + def url_content_dict(self): + return self.url_content_dict + + def abort(self): + pass + + def cleanup(self): + pass \ No newline at end of file diff --git a/tests/MockStackEnvironment.py b/tests/MockStackEnvironment.py new file mode 100644 index 0000000..268261b --- /dev/null +++ b/tests/MockStackEnvironment.py @@ -0,0 +1,113 @@ +# encoding: utf-8 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO: add failures + +import uuid +import logging +from novaimagebuilder.Singleton import Singleton +from MockNovaInstance import MockNovaInstance + + +class MockStackEnvironment(Singleton): + + # From http://docs.openstack.org/api/openstack-block-storage/2.0/content/Volumes.html + # this does not match the docstring in novaimagebuilder.StackEnvironment.get_volume_status() + VOLUME_STATUS_LIST = ('CREATING', + 'AVAILABLE', + 'ATTACHING', + 'IN-USE', + 'DELETING', + 'ERROR', + 'ERROR_DELETING', + 'BACKING-UP', + 'RESTORING-BACKUP', + 'ERROR_RESTORING') + + # From the docstring in novaimagebuilder.StackEnvironment.get_image_status() + IMAGE_STATUS_LIST = ('QUEUED', 'SAVING', 'ACTIVE', 'KILLED', 'DELETED', 'PENDING_DELETE') + + def _singleton_init(self): + super(MockStackEnvironment, self)._singleton_init() + self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + # Attributes controlling Mock behavior + self.cinder = False + self.cdrom = False + self.floppy = False + self.direct_boot = False + self.keystone_srvr = None + self.glance_srvr = None + self.cinder_srvr = None + self.failure = {'status': False, 'timeout': 0} + self.volume_status_index = 1 + self.image_status_index = 2 + + @property + def keystone_server(self): + return self.keystone_srvr + + @property + def glance_server(self): + return self.glance_srvr + + @property + def cinder_server(self): + return self.cinder_srvr + + def is_cinder(self): + return self.cinder + + def is_cdrom(self): + return self.cdrom + + def is_floppy(self): + return self.floppy + + def is_direct_boot(self): + return self.direct_boot + + def upload_image_to_glance(self, name, local_path=None, location=None, format='raw', min_disk=0, min_ram=0, + container_format='bare', is_public=True): + #self.log.debug("Doing mock glance upload") + #self.log.debug("File: (%s) - Name (%s) - Format (%s) - Container (%s)" % + # (local_path, name, format, container_format)) + return uuid.uuid4() + + def upload_volume_to_cinder(self, name, volume_size=None, local_path=None, location=None, format='raw', + container_format='bare', is_public=True, keep_image=True): + #self.log.debug("Doing mock glance upload and cinder copy") + #self.log.debug("File: (%s) - Name (%s) - Format (%s) - Container (%s)" % + # (local_path, name, format, container_format)) + return uuid.uuid4(), uuid.uuid4() + + def create_volume_from_image(self, image_id, volume_size=None): + return uuid.uuid4(), uuid.uuid4() + + def delete_image(self, image_id): + pass + + def delete_volume(self, volume_id): + pass + + def get_volume_status(self, volume_id): + return self.VOLUME_STATUS_LIST[self.volume_status_index] + + def get_image_status(self, image_id): + return self.IMAGE_STATUS_LIST[self.image_status_index] + + def launch_instance(self, root_disk=None, install_iso=None, secondary_iso=None, floppy=None, aki=None, ari=None, + cmdline=None, userdata=None): + return MockNovaInstance(object(), self) \ No newline at end of file diff --git a/imagebuilder/openstack/__init__.py b/tests/__init__.py similarity index 100% rename from imagebuilder/openstack/__init__.py rename to tests/__init__.py diff --git a/tests/test_OSInfo.py b/tests/test_OSInfo.py new file mode 100644 index 0000000..7a5e6b7 --- /dev/null +++ b/tests/test_OSInfo.py @@ -0,0 +1,85 @@ +# coding=utf-8 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import TestCase +from novaimagebuilder.OSInfo import OSInfo + + +class TestOSInfo(TestCase): + def setUp(self): + self.osinfo = OSInfo() + + def test_os_id_for_shortid(self): + os_list = self.osinfo.db.get_os_list().get_elements() + for os in os_list: + self.assertEqual(self.osinfo.os_id_for_shortid(os.get_short_id()), os.get_id()) + + def test_os_for_shortid(self): + os = self.osinfo.os_for_shortid('fedora18') + expected_keys = {'name': str, 'version': str, 'distro': str, 'family': str, 'shortid': str, 'id': str, + 'media_list': list, 'tree_list': list, 'minimum_resources': list, + 'recommended_resources': list} + + self.assertIsNotNone(os) + self.assertIsInstance(os, dict) + # check that the correct items are in the dict (as defined in OSInfo) + # and that the values are the correct type + for key in expected_keys.keys(): + self.assertIn(key, os) + self.assertIsInstance(os[key], expected_keys[key]) + + def test_os_for_iso(self): + # TODO: implement test + self.skipTest('%s is only partially implemented and unused.' % __name__) + + def test_os_for_tree(self): + # TODO: implement test + self.skipTest('%s is only partially implemented and unused.' % __name__) + + def test_install_script(self): + config = {'admin_password': 'test_pw', + 'arch': 'test_arch', + 'license': 'test_license_key', + 'target_disk': 'C', + 'script_disk': 'A', + 'preinstall_disk': 'test-preinstall', + 'postinstall_disk': 'test-postinstall', + 'signed_drivers': False, + 'keyboard': 'en_TEST', + 'laguage': 'en_TEST', + 'timezone': 'America/Chicago'} + + fedora_script = self.osinfo.install_script('fedora18', config) + windows_script = self.osinfo.install_script('win2k8r2', config) + + # TODO: actually check that config values were set in the script(s) + self.assertIsNotNone(fedora_script) + self.assertIsInstance(fedora_script, str) + + self.assertIsNotNone(windows_script) + self.assertIsInstance(windows_script, str) + + self.assertNotEqual(fedora_script, windows_script) + + def test_os_ids(self): + all_ids = self.osinfo.os_ids() + fedora_ids = self.osinfo.os_ids({'fedora': 17}) + + self.assertIsNotNone(all_ids) + self.assertIsNotNone(fedora_ids) + self.assertIsInstance(all_ids, dict) + self.assertIsInstance(fedora_ids, dict) + self.assertLess(len(fedora_ids), len(all_ids)) \ No newline at end of file diff --git a/imagebuilder/api/controllers/osib/__init__.py b/tests/test_cacheManager.py similarity index 65% rename from imagebuilder/api/controllers/osib/__init__.py rename to tests/test_cacheManager.py index 051b664..4ffcf58 100644 --- a/imagebuilder/api/controllers/osib/__init__.py +++ b/tests/test_cacheManager.py @@ -1,4 +1,5 @@ -# +# coding=utf-8 + # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,10 +14,18 @@ # See the License for the specific language governing permissions and # limitations under the License. - -from pecan.rest import RestController -from v1 import V1Controller +from unittest import TestCase -class OSIB(RestController): - v1 = V1Controller() \ No newline at end of file +class TestCacheManager(TestCase): + def test_lock_and_get_index(self): + self.fail() + + def test_write_index_and_unlock(self): + self.fail() + + def test_unlock_index(self): + self.fail() + + def test_retrieve_and_cache_object(self): + self.fail() \ No newline at end of file diff --git a/tests/testcache.py b/tests/testcache.py new file mode 100755 index 0000000..743e505 --- /dev/null +++ b/tests/testcache.py @@ -0,0 +1,47 @@ +#!/usr/bin/python +# coding=utf-8 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +sys.path.append("../novaimagebuilder") +from MockStackEnvironment import MockStackEnvironment as StackEnvironment +from novaimagebuilder.CacheManager import CacheManager +import logging + + +logging.basicConfig(level=logging.DEBUG, + format='%(asctime)s %(levelname)s %(name)s thread(%(threadName)s) Message: %(message)s') + +class MockOSPlugin(object): + + def __init__(self, os_ver_arch = "fedora19-x86_64", wants_iso = True ): + self.nameverarch = os_ver_arch + self.wantscdrom = wants_iso + + def os_ver_arch(self): + return self.nameverarch + + def wants_iso(self): + return self.wants_iso + +print "---- the following should do a glance and cinder upload" + +mosp = MockOSPlugin(os_ver_arch = "fedora18-x86_64", wants_iso = False) +mse = StackEnvironment("username","password","tenant","auth_url") +cm = CacheManager(mse) + +cm.retrieve_and_cache_object("install-iso", mosp, "http://repos.fedorapeople.org/repos/aeolus/imagefactory/testing/repos/rhel/imagefactory.repo", + True)