Add a openstack specific datasource

Openstack has a unique derivative datasource that is gaining usage.
Previously the config drive datasource provided part of this functionality
as well as the ec2 datasource, but since new functionality is being added
to openstack's special datasource it seems beneficial to combine the used
parts into a new datasource just made for handling openstack deployments
that use the openstack metadata service (possibly in combination with the
ec2 metadata service).

This patch factors out the common logic shared between the config drive
and the openstack metadata datasource and places that in a shared helper
file and then creates a new openstack datasource that readers from the
openstack metadata service and refactors the config drive datasource to
use this common logic.
This commit is contained in:
Joshua Harlow 2014-02-13 13:54:43 -05:00 committed by Scott Moser
commit d1cd60e990
14 changed files with 1149 additions and 430 deletions

View File

@ -29,6 +29,8 @@
- Add CloudSigma datasource [Kiril Vladimiroff] - Add CloudSigma datasource [Kiril Vladimiroff]
- Add initial support for Gentoo and Arch distributions [Nate House] - Add initial support for Gentoo and Arch distributions [Nate House]
- Add GCE datasource [Vaidas Jablonskis] - Add GCE datasource [Vaidas Jablonskis]
- Add native Openstack datasource which reads openstack metadata
rather than relying on EC2 data in openstack metadata service.
0.7.4: 0.7.4:
- fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a - fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a
partitioned block device with target filesystem on ephemeral0.1. partitioned block device with target filesystem on ephemeral0.1.

View File

@ -16,12 +16,9 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import httplib
from urlparse import (urlparse, urlunparse)
import functools import functools
import httplib
import json import json
import urllib
from cloudinit import log as logging from cloudinit import log as logging
from cloudinit import url_helper from cloudinit import url_helper
@ -40,16 +37,6 @@ def maybe_json_object(text):
return False return False
def combine_url(base, add_on):
base_parsed = list(urlparse(base))
path = base_parsed[2]
if path and not path.endswith("/"):
path += "/"
path += urllib.quote(str(add_on), safe="/:")
base_parsed[2] = path
return urlunparse(base_parsed)
# See: http://bit.ly/TyoUQs # See: http://bit.ly/TyoUQs
# #
class MetadataMaterializer(object): class MetadataMaterializer(object):
@ -121,14 +108,14 @@ class MetadataMaterializer(object):
(leaves, children) = self._parse(blob) (leaves, children) = self._parse(blob)
child_contents = {} child_contents = {}
for c in children: for c in children:
child_url = combine_url(base_url, c) child_url = url_helper.combine_url(base_url, c)
if not child_url.endswith("/"): if not child_url.endswith("/"):
child_url += "/" child_url += "/"
child_blob = str(self._caller(child_url)) child_blob = str(self._caller(child_url))
child_contents[c] = self._materialize(child_blob, child_url) child_contents[c] = self._materialize(child_blob, child_url)
leaf_contents = {} leaf_contents = {}
for (field, resource) in leaves.items(): for (field, resource) in leaves.items():
leaf_url = combine_url(base_url, resource) leaf_url = url_helper.combine_url(base_url, resource)
leaf_blob = str(self._caller(leaf_url)) leaf_blob = str(self._caller(leaf_url))
leaf_contents[field] = self._decode_leaf_blob(field, leaf_blob) leaf_contents[field] = self._decode_leaf_blob(field, leaf_blob)
joined = {} joined = {}
@ -153,8 +140,8 @@ def _skip_retry_on_codes(status_codes, _request_args, cause):
def get_instance_userdata(api_version='latest', def get_instance_userdata(api_version='latest',
metadata_address='http://169.254.169.254', metadata_address='http://169.254.169.254',
ssl_details=None, timeout=5, retries=5): ssl_details=None, timeout=5, retries=5):
ud_url = combine_url(metadata_address, api_version) ud_url = url_helper.combine_url(metadata_address, api_version)
ud_url = combine_url(ud_url, 'user-data') ud_url = url_helper.combine_url(ud_url, 'user-data')
user_data = '' user_data = ''
try: try:
# It is ok for userdata to not exist (thats why we are stopping if # It is ok for userdata to not exist (thats why we are stopping if
@ -178,8 +165,8 @@ def get_instance_userdata(api_version='latest',
def get_instance_metadata(api_version='latest', def get_instance_metadata(api_version='latest',
metadata_address='http://169.254.169.254', metadata_address='http://169.254.169.254',
ssl_details=None, timeout=5, retries=5): ssl_details=None, timeout=5, retries=5):
md_url = combine_url(metadata_address, api_version) md_url = url_helper.combine_url(metadata_address, api_version)
md_url = combine_url(md_url, 'meta-data') md_url = url_helper.combine_url(md_url, 'meta-data')
caller = functools.partial(util.read_file_or_url, caller = functools.partial(util.read_file_or_url,
ssl_details=ssl_details, timeout=timeout, ssl_details=ssl_details, timeout=timeout,
retries=retries) retries=retries)

View File

@ -37,6 +37,7 @@ CFG_BUILTIN = {
'OVF', 'OVF',
'MAAS', 'MAAS',
'GCE', 'GCE',
'OpenStack'
'Ec2', 'Ec2',
'CloudSigma', 'CloudSigma',
'CloudStack', 'CloudStack',

View File

@ -18,181 +18,79 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
import json
import os import os
from cloudinit import log as logging from cloudinit import log as logging
from cloudinit import sources from cloudinit import sources
from cloudinit import util from cloudinit import util
from cloudinit.sources.helpers import openstack
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
# Various defaults/constants... # Various defaults/constants...
DEFAULT_IID = "iid-dsconfigdrive" DEFAULT_IID = "iid-dsconfigdrive"
DEFAULT_MODE = 'pass' DEFAULT_MODE = 'pass'
CFG_DRIVE_FILES_V1 = [
"etc/network/interfaces",
"root/.ssh/authorized_keys",
"meta.js",
]
DEFAULT_METADATA = { DEFAULT_METADATA = {
"instance-id": DEFAULT_IID, "instance-id": DEFAULT_IID,
} }
VALID_DSMODES = ("local", "net", "pass", "disabled") VALID_DSMODES = ("local", "net", "pass", "disabled")
FS_TYPES = ('vfat', 'iso9660')
LABEL_TYPES = ('config-2',)
OPTICAL_DEVICES = tuple(('/dev/sr%s' % i for i in range(0, 2)))
class ConfigDriveHelper(object): class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
def __init__(self, distro):
self.distro = distro
def on_first_boot(self, data):
if not data:
data = {}
if 'network_config' in data:
LOG.debug("Updating network interfaces from config drive")
self.distro.apply_network(data['network_config'])
files = data.get('files')
if files:
LOG.debug("Writing %s injected files", len(files))
try:
write_files(files)
except IOError:
util.logexc(LOG, "Failed writing files")
class DataSourceConfigDrive(sources.DataSource):
def __init__(self, sys_cfg, distro, paths): def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths) super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
self.source = None self.source = None
self.dsmode = 'local' self.dsmode = 'local'
self.seed_dir = os.path.join(paths.seed_dir, 'config_drive') self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
self.version = None self.version = None
self.ec2_metadata = None self.ec2_metadata = None
self.helper = ConfigDriveHelper(distro) self.files = {}
def __str__(self): def __str__(self):
root = sources.DataSource.__str__(self) root = sources.DataSource.__str__(self)
mstr = "%s [%s,ver=%s]" % (root, mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
self.dsmode,
self.version)
mstr += "[source=%s]" % (self.source) mstr += "[source=%s]" % (self.source)
return mstr return mstr
def _ec2_name_to_device(self, name):
if not self.ec2_metadata:
return None
bdm = self.ec2_metadata.get('block-device-mapping', {})
for (ent_name, device) in bdm.items():
if name == ent_name:
return device
return None
def _os_name_to_device(self, name):
device = None
try:
criteria = 'LABEL=%s' % (name)
if name in ['swap']:
criteria = 'TYPE=%s' % (name)
dev_entries = util.find_devs_with(criteria)
if dev_entries:
device = dev_entries[0]
except util.ProcessExecutionError:
pass
return device
def _validate_device_name(self, device):
if not device:
return None
if not device.startswith("/"):
device = "/dev/%s" % device
if os.path.exists(device):
return device
# Durn, try adjusting the mapping
remapped = self._remap_device(os.path.basename(device))
if remapped:
LOG.debug("Remapped device name %s => %s", device, remapped)
return remapped
return None
def device_name_to_device(self, name):
# Translate a 'name' to a 'physical' device
if not name:
return None
# Try the ec2 mapping first
names = [name]
if name == 'root':
names.insert(0, 'ami')
if name == 'ami':
names.append('root')
device = None
LOG.debug("Using ec2 metadata lookup to find device %s", names)
for n in names:
device = self._ec2_name_to_device(n)
device = self._validate_device_name(device)
if device:
break
# Try the openstack way second
if not device:
LOG.debug("Using os lookup to find device %s", names)
for n in names:
device = self._os_name_to_device(n)
device = self._validate_device_name(device)
if device:
break
# Ok give up...
if not device:
return None
else:
LOG.debug("Using cfg drive lookup mapped to device %s", device)
return device
def get_data(self): def get_data(self):
found = None found = None
md = {} md = {}
results = {} results = {}
if os.path.isdir(self.seed_dir): if os.path.isdir(self.seed_dir):
try: try:
results = read_config_drive_dir(self.seed_dir) results = read_config_drive(self.seed_dir)
found = self.seed_dir found = self.seed_dir
except NonConfigDriveDir: except openstack.NonReadable:
util.logexc(LOG, "Failed reading config drive from %s", util.logexc(LOG, "Failed reading config drive from %s",
self.seed_dir) self.seed_dir)
if not found: if not found:
devlist = find_candidate_devs() for dev in find_candidate_devs():
for dev in devlist:
try: try:
results = util.mount_cb(dev, read_config_drive_dir) results = util.mount_cb(dev, read_config_drive)
found = dev found = dev
break except openstack.NonReadable:
except (NonConfigDriveDir, util.MountFailedError):
pass pass
except BrokenConfigDriveDir: except util.MountFailedError:
util.logexc(LOG, "broken config drive: %s", dev) pass
except openstack.BrokenMetadata:
util.logexc(LOG, "Broken config drive: %s", dev)
if found:
break
if not found: if not found:
return False return False
md = results['metadata'] md = results.get('metadata', {})
md = util.mergemanydict([md, DEFAULT_METADATA]) md = util.mergemanydict([md, DEFAULT_METADATA])
# Perform some metadata 'fixups'
#
# OpenStack uses the 'hostname' key
# while most of cloud-init uses the metadata
# 'local-hostname' key instead so if it doesn't
# exist we need to make sure its copied over.
for (tgt, src) in [('local-hostname', 'hostname')]:
if tgt not in md and src in md:
md[tgt] = md[src]
user_dsmode = results.get('dsmode', None) user_dsmode = results.get('dsmode', None)
if user_dsmode not in VALID_DSMODES + (None,): if user_dsmode not in VALID_DSMODES + (None,):
LOG.warn("user specified invalid mode: %s" % user_dsmode) LOG.warn("User specified invalid mode: %s", user_dsmode)
user_dsmode = None user_dsmode = None
dsmode = get_ds_mode(cfgdrv_ver=results['cfgdrive_ver'], dsmode = get_ds_mode(cfgdrv_ver=results['version'],
ds_cfg=self.ds_cfg.get('dsmode'), ds_cfg=self.ds_cfg.get('dsmode'),
user=user_dsmode) user=user_dsmode)
@ -209,7 +107,7 @@ class DataSourceConfigDrive(sources.DataSource):
prev_iid = get_previous_iid(self.paths) prev_iid = get_previous_iid(self.paths)
cur_iid = md['instance-id'] cur_iid = md['instance-id']
if prev_iid != cur_iid and self.dsmode == "local": if prev_iid != cur_iid and self.dsmode == "local":
self.helper.on_first_boot(results) on_first_boot(results, distro=self.distro)
# dsmode != self.dsmode here if: # dsmode != self.dsmode here if:
# * dsmode = "pass", pass means it should only copy files and then # * dsmode = "pass", pass means it should only copy files and then
@ -225,16 +123,11 @@ class DataSourceConfigDrive(sources.DataSource):
self.metadata = md self.metadata = md
self.ec2_metadata = results.get('ec2-metadata') self.ec2_metadata = results.get('ec2-metadata')
self.userdata_raw = results.get('userdata') self.userdata_raw = results.get('userdata')
self.version = results['cfgdrive_ver'] self.version = results['version']
self.files.update(results.get('files', {}))
self.vendordata_raw = results.get('vendordata')
return True return True
def get_public_ssh_keys(self):
name = "public_keys"
if self.version == 1:
name = "public-keys"
return sources.normalize_pubkey_data(self.metadata.get(name))
class DataSourceConfigDriveNet(DataSourceConfigDrive): class DataSourceConfigDriveNet(DataSourceConfigDrive):
def __init__(self, sys_cfg, distro, paths): def __init__(self, sys_cfg, distro, paths):
@ -242,222 +135,6 @@ class DataSourceConfigDriveNet(DataSourceConfigDrive):
self.dsmode = 'net' self.dsmode = 'net'
class NonConfigDriveDir(Exception):
pass
class BrokenConfigDriveDir(Exception):
pass
def find_candidate_devs():
"""Return a list of devices that may contain the config drive.
The returned list is sorted by search order where the first item has
should be searched first (highest priority)
config drive v1:
Per documentation, this is "associated as the last available disk on the
instance", and should be VFAT.
Currently, we do not restrict search list to "last available disk"
config drive v2:
Disk should be:
* either vfat or iso9660 formated
* labeled with 'config-2'
"""
# Query optical drive to get it in blkid cache for 2.6 kernels
util.find_devs_with(path="/dev/sr0")
util.find_devs_with(path="/dev/sr1")
by_fstype = (util.find_devs_with("TYPE=vfat") +
util.find_devs_with("TYPE=iso9660"))
by_label = util.find_devs_with("LABEL=config-2")
# give preference to "last available disk" (vdb over vda)
# note, this is not a perfect rendition of that.
by_fstype.sort(reverse=True)
by_label.sort(reverse=True)
# combine list of items by putting by-label items first
# followed by fstype items, but with dupes removed
combined = (by_label + [d for d in by_fstype if d not in by_label])
# We are looking for a block device or partition with necessary label or
# an unpartitioned block device.
combined = [d for d in combined
if d in by_label or not util.is_partition(d)]
return combined
def read_config_drive_dir(source_dir):
last_e = NonConfigDriveDir("Not found")
for finder in (read_config_drive_dir_v2, read_config_drive_dir_v1):
try:
data = finder(source_dir)
return data
except NonConfigDriveDir as exc:
last_e = exc
raise last_e
def read_config_drive_dir_v2(source_dir, version="2012-08-10"):
if (not os.path.isdir(os.path.join(source_dir, "openstack", version)) and
os.path.isdir(os.path.join(source_dir, "openstack", "latest"))):
LOG.warn("version '%s' not available, attempting to use 'latest'" %
version)
version = "latest"
datafiles = (
('metadata',
"openstack/%s/meta_data.json" % version, True, json.loads),
('userdata', "openstack/%s/user_data" % version, False, None),
('ec2-metadata', "ec2/latest/meta-data.json", False, json.loads),
)
results = {'userdata': None}
for (name, path, required, process) in datafiles:
fpath = os.path.join(source_dir, path)
data = None
found = False
if os.path.isfile(fpath):
try:
data = util.load_file(fpath)
except IOError:
raise BrokenConfigDriveDir("Failed to read: %s" % fpath)
found = True
elif required:
raise NonConfigDriveDir("Missing mandatory path: %s" % fpath)
if found and process:
try:
data = process(data)
except Exception as exc:
raise BrokenConfigDriveDir(("Failed to process "
"path: %s") % fpath)
if found:
results[name] = data
# instance-id is 'uuid' for openstack. just copy it to instance-id.
if 'instance-id' not in results['metadata']:
try:
results['metadata']['instance-id'] = results['metadata']['uuid']
except KeyError:
raise BrokenConfigDriveDir("No uuid entry in metadata")
if 'random_seed' in results['metadata']:
random_seed = results['metadata']['random_seed']
try:
results['metadata']['random_seed'] = base64.b64decode(random_seed)
except (ValueError, TypeError) as exc:
raise BrokenConfigDriveDir("Badly formatted random_seed: %s" % exc)
def read_content_path(item):
# do not use os.path.join here, as content_path starts with /
cpath = os.path.sep.join((source_dir, "openstack",
"./%s" % item['content_path']))
return util.load_file(cpath)
files = {}
try:
for item in results['metadata'].get('files', {}):
files[item['path']] = read_content_path(item)
# the 'network_config' item in metadata is a content pointer
# to the network config that should be applied.
# in folsom, it is just a '/etc/network/interfaces' file.
item = results['metadata'].get("network_config", None)
if item:
results['network_config'] = read_content_path(item)
except Exception as exc:
raise BrokenConfigDriveDir("Failed to read file %s: %s" % (item, exc))
# to openstack, user can specify meta ('nova boot --meta=key=value') and
# those will appear under metadata['meta'].
# if they specify 'dsmode' they're indicating the mode that they intend
# for this datasource to operate in.
try:
results['dsmode'] = results['metadata']['meta']['dsmode']
except KeyError:
pass
results['files'] = files
results['cfgdrive_ver'] = 2
return results
def read_config_drive_dir_v1(source_dir):
"""
read source_dir, and return a tuple with metadata dict, user-data,
files and version (1). If not a valid dir, raise a NonConfigDriveDir
"""
found = {}
for af in CFG_DRIVE_FILES_V1:
fn = os.path.join(source_dir, af)
if os.path.isfile(fn):
found[af] = fn
if len(found) == 0:
raise NonConfigDriveDir("%s: %s" % (source_dir, "no files found"))
md = {}
keydata = ""
if "etc/network/interfaces" in found:
fn = found["etc/network/interfaces"]
md['network_config'] = util.load_file(fn)
if "root/.ssh/authorized_keys" in found:
fn = found["root/.ssh/authorized_keys"]
keydata = util.load_file(fn)
meta_js = {}
if "meta.js" in found:
fn = found['meta.js']
content = util.load_file(fn)
try:
# Just check if its really json...
meta_js = json.loads(content)
if not isinstance(meta_js, (dict)):
raise TypeError("Dict expected for meta.js root node")
except (ValueError, TypeError) as e:
raise NonConfigDriveDir("%s: %s, %s" %
(source_dir, "invalid json in meta.js", e))
md['meta_js'] = content
# keydata in meta_js is preferred over "injected"
keydata = meta_js.get('public-keys', keydata)
if keydata:
lines = keydata.splitlines()
md['public-keys'] = [l for l in lines
if len(l) and not l.startswith("#")]
# config-drive-v1 has no way for openstack to provide the instance-id
# so we copy that into metadata from the user input
if 'instance-id' in meta_js:
md['instance-id'] = meta_js['instance-id']
results = {'cfgdrive_ver': 1, 'metadata': md}
# allow the user to specify 'dsmode' in a meta tag
if 'dsmode' in meta_js:
results['dsmode'] = meta_js['dsmode']
# config-drive-v1 has no way of specifying user-data, so the user has
# to cheat and stuff it in a meta tag also.
results['userdata'] = meta_js.get('user-data')
# this implementation does not support files
# (other than network/interfaces and authorized_keys)
results['files'] = []
return results
def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None): def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None):
"""Determine what mode should be used. """Determine what mode should be used.
valid values are 'pass', 'disabled', 'local', 'net' valid values are 'pass', 'disabled', 'local', 'net'
@ -483,6 +160,21 @@ def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None):
return "net" return "net"
def read_config_drive(source_dir, version="2012-08-10"):
reader = openstack.ConfigDriveReader(source_dir)
finders = [
(reader.read_v2, [], {'version': version}),
(reader.read_v1, [], {}),
]
excps = []
for (functor, args, kwargs) in finders:
try:
return functor(*args, **kwargs)
except openstack.NonReadable as e:
excps.append(e)
raise excps[-1]
def get_previous_iid(paths): def get_previous_iid(paths):
# interestingly, for this purpose the "previous" instance-id is the current # interestingly, for this purpose the "previous" instance-id is the current
# instance-id. cloud-init hasn't moved them over yet as this datasource # instance-id. cloud-init hasn't moved them over yet as this datasource
@ -494,17 +186,79 @@ def get_previous_iid(paths):
return None return None
def write_files(files): def on_first_boot(data, distro=None):
for (name, content) in files.iteritems(): """Performs any first-boot actions using data read from a config-drive."""
if name[0] != os.sep: if not isinstance(data, dict):
name = os.sep + name raise TypeError("Config-drive data expected to be a dict; not %s"
util.write_file(name, content, mode=0660) % (type(data)))
net_conf = data.get("network_config", '')
if net_conf and distro:
LOG.debug("Updating network interfaces from config drive")
distro.apply_network(net_conf)
files = data.get('files', {})
if files:
LOG.debug("Writing %s injected files", len(files))
for (filename, content) in files.iteritems():
if not filename.startswith(os.sep):
filename = os.sep + filename
try:
util.write_file(filename, content, mode=0660)
except IOError:
util.logexc(LOG, "Failed writing file: %s", filename)
def find_candidate_devs(probe_optical=True):
"""Return a list of devices that may contain the config drive.
The returned list is sorted by search order where the first item has
should be searched first (highest priority)
config drive v1:
Per documentation, this is "associated as the last available disk on the
instance", and should be VFAT.
Currently, we do not restrict search list to "last available disk"
config drive v2:
Disk should be:
* either vfat or iso9660 formated
* labeled with 'config-2'
"""
# query optical drive to get it in blkid cache for 2.6 kernels
if probe_optical:
for device in OPTICAL_DEVICES:
try:
util.find_devs_with(path=device)
except util.ProcessExecutionError:
pass
by_fstype = []
for fs_type in FS_TYPES:
by_fstype.extend(util.find_devs_with("TYPE=%s" % (fs_type)))
by_label = []
for label in LABEL_TYPES:
by_label.extend(util.find_devs_with("LABEL=%s" % (label)))
# give preference to "last available disk" (vdb over vda)
# note, this is not a perfect rendition of that.
by_fstype.sort(reverse=True)
by_label.sort(reverse=True)
# combine list of items by putting by-label items first
# followed by fstype items, but with dupes removed
candidates = (by_label + [d for d in by_fstype if d not in by_label])
# We are looking for a block device or partition with necessary label or
# an unpartitioned block device (ex sda, not sda1)
devices = [d for d in candidates
if d in by_label or not util.is_partition(d)]
return devices
# Used to match classes to dependencies # Used to match classes to dependencies
datasources = [ datasources = [
(DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )), (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )),
(DataSourceConfigDriveNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), (DataSourceConfigDriveNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
] ]

View File

@ -0,0 +1,162 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2014 Yahoo! Inc.
#
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
from cloudinit.sources.helpers import openstack
LOG = logging.getLogger(__name__)
# Various defaults/constants...
DEF_MD_URL = "http://169.254.169.254"
DEFAULT_IID = "iid-dsopenstack"
DEFAULT_METADATA = {
"instance-id": DEFAULT_IID,
}
VALID_DSMODES = ("net", "disabled")
class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
self.dsmode = 'net'
self.metadata_address = None
self.ssl_details = util.fetch_ssl_details(self.paths)
self.version = None
self.files = {}
self.ec2_metadata = None
if not self.ds_cfg:
self.ds_cfg = {}
def __str__(self):
root = sources.DataSource.__str__(self)
mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
return mstr
def _get_url_settings(self):
# TODO(harlowja): this is shared with ec2 datasource, we should just
# move it to a shared location instead...
# Note: the defaults here are different though.
# max_wait < 0 indicates do not wait
max_wait = -1
timeout = 10
try:
max_wait = int(self.ds_cfg.get("max_wait", max_wait))
except Exception:
util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
try:
timeout = max(0, int(self.ds_cfg.get("timeout", timeout)))
except Exception:
util.logexc(LOG, "Failed to get timeout, using %s", timeout)
return (max_wait, timeout)
def wait_for_metadata_service(self):
urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])
filtered = [x for x in urls if util.is_resolvable_url(x)]
if set(filtered) != set(urls):
LOG.debug("Removed the following from metadata urls: %s",
list((set(urls) - set(filtered))))
if len(filtered):
urls = filtered
else:
LOG.warn("Empty metadata url list! using default list")
urls = [DEF_MD_URL]
md_urls = []
url2base = {}
for url in urls:
md_url = url_helper.combine_url(url, 'openstack',
openstack.OS_LATEST,
'meta_data.json')
md_urls.append(md_url)
url2base[md_url] = url
(max_wait, timeout) = self._get_url_settings()
start_time = time.time()
avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait,
timeout=timeout)
if avail_url:
LOG.debug("Using metadata source: '%s'", url2base[avail_url])
else:
LOG.debug("Giving up on OpenStack md from %s after %s seconds",
md_urls, int(time.time() - start_time))
self.metadata_address = url2base.get(avail_url)
return bool(avail_url)
def get_data(self):
try:
if not self.wait_for_metadata_service():
return False
except IOError:
return False
try:
results = util.log_time(LOG.debug,
'Crawl of openstack metadata service',
read_metadata_service,
args=[self.metadata_address],
kwargs={'ssl_details': self.ssl_details,
'version': openstack.OS_LATEST})
except openstack.NonReadable:
return False
except (openstack.BrokenMetadata, IOError):
util.logexc(LOG, "Broken metadata address %s",
self.metadata_address)
return False
user_dsmode = results.get('dsmode', None)
if user_dsmode not in VALID_DSMODES + (None,):
LOG.warn("User specified invalid mode: %s", user_dsmode)
user_dsmode = None
if user_dsmode == 'disabled':
return False
md = results.get('metadata', {})
md = util.mergemanydict([md, DEFAULT_METADATA])
self.metadata = md
self.ec2_metadata = results.get('ec2-metadata')
self.userdata_raw = results.get('userdata')
self.version = results['version']
self.files.update(results.get('files', {}))
self.vendordata_raw = results.get('vendordata')
return True
def read_metadata_service(base_url, version=None, ssl_details=None):
reader = openstack.MetadataReader(base_url, ssl_details=ssl_details)
return reader.read_v2(version=version)
# Used to match classes to dependencies
datasources = [
(DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)

View File

@ -0,0 +1,14 @@
# vi: ts=4 expandtab
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,436 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import abc
import base64
import copy
import os
from cloudinit import ec2_utils
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
# For reference: http://tinyurl.com/laora4c
LOG = logging.getLogger(__name__)
FILES_V1 = {
# Path <-> (metadata key name, translator function, default value)
'etc/network/interfaces': ('network_config', lambda x: x, ''),
'meta.js': ('meta_js', util.load_json, {}),
"root/.ssh/authorized_keys": ('authorized_keys', lambda x: x, ''),
}
KEY_COPIES = (
# Cloud-init metadata names <-> (metadata key, is required)
('local-hostname', 'hostname', False),
('instance-id', 'uuid', True),
)
OS_VERSIONS = (
'2012-08-10', # folsom
'2013-04-04', # grizzly
'2013-10-17', # havana
)
OS_LATEST = 'latest'
class NonReadable(IOError):
pass
class BrokenMetadata(IOError):
pass
class SourceMixin(object):
def _ec2_name_to_device(self, name):
if not self.ec2_metadata:
return None
bdm = self.ec2_metadata.get('block-device-mapping', {})
for (ent_name, device) in bdm.items():
if name == ent_name:
return device
return None
def get_public_ssh_keys(self):
name = "public_keys"
if self.version == 1:
name = "public-keys"
return sources.normalize_pubkey_data(self.metadata.get(name))
def _os_name_to_device(self, name):
device = None
try:
criteria = 'LABEL=%s' % (name)
if name == 'swap':
criteria = 'TYPE=%s' % (name)
dev_entries = util.find_devs_with(criteria)
if dev_entries:
device = dev_entries[0]
except util.ProcessExecutionError:
pass
return device
def _validate_device_name(self, device):
if not device:
return None
if not device.startswith("/"):
device = "/dev/%s" % device
if os.path.exists(device):
return device
# Durn, try adjusting the mapping
remapped = self._remap_device(os.path.basename(device))
if remapped:
LOG.debug("Remapped device name %s => %s", device, remapped)
return remapped
return None
def device_name_to_device(self, name):
# Translate a 'name' to a 'physical' device
if not name:
return None
# Try the ec2 mapping first
names = [name]
if name == 'root':
names.insert(0, 'ami')
if name == 'ami':
names.append('root')
device = None
LOG.debug("Using ec2 style lookup to find device %s", names)
for n in names:
device = self._ec2_name_to_device(n)
device = self._validate_device_name(device)
if device:
break
# Try the openstack way second
if not device:
LOG.debug("Using openstack style lookup to find device %s", names)
for n in names:
device = self._os_name_to_device(n)
device = self._validate_device_name(device)
if device:
break
# Ok give up...
if not device:
return None
else:
LOG.debug("Mapped %s to device %s", name, device)
return device
class BaseReader(object):
__metaclass__ = abc.ABCMeta
def __init__(self, base_path):
self.base_path = base_path
@abc.abstractmethod
def _path_join(self, base, *add_ons):
pass
@abc.abstractmethod
def _path_exists(self, path):
pass
@abc.abstractmethod
def _path_read(self, path):
pass
@abc.abstractmethod
def _read_ec2_metadata(self):
pass
def _read_content_path(self, item):
path = item.get('content_path', '').lstrip("/")
path_pieces = path.split("/")
valid_pieces = [p for p in path_pieces if len(p)]
if not valid_pieces:
raise BrokenMetadata("Item %s has no valid content path" % (item))
path = self._path_join(self.base_path, "openstack", *path_pieces)
return self._path_read(path)
def _find_working_version(self, version):
search_versions = [version] + list(OS_VERSIONS)
for potential_version in search_versions:
if not potential_version:
continue
path = self._path_join(self.base_path, "openstack",
potential_version)
if self._path_exists(path):
if potential_version != version:
LOG.warn("Version '%s' not available, attempting to use"
" version '%s' instead", version,
potential_version)
return potential_version
LOG.warn("Version '%s' not available, attempting to use '%s'"
" instead", version, OS_LATEST)
return OS_LATEST
def read_v2(self, version=None):
"""Reads a version 2 formatted location.
Return a dict with metadata, userdata, ec2-metadata, dsmode,
network_config, files and version (2).
If not a valid location, raise a NonReadable exception.
"""
def datafiles(version):
files = {}
files['metadata'] = (
# File path to read
self._path_join("openstack", version, 'meta_data.json'),
# Is it required?
True,
# Translator function (applied after loading)
util.load_json,
)
files['userdata'] = (
self._path_join("openstack", version, 'user_data'),
False,
lambda x: x,
)
files['vendordata'] = (
self._path_join("openstack", version, 'vendor_data.json'),
False,
util.load_json,
)
return files
version = self._find_working_version(version)
results = {
'userdata': '',
'version': 2,
}
data = datafiles(version)
for (name, (path, required, translator)) in data.iteritems():
path = self._path_join(self.base_path, path)
data = None
found = False
if self._path_exists(path):
try:
data = self._path_read(path)
except IOError:
raise NonReadable("Failed to read: %s" % path)
found = True
else:
if required:
raise NonReadable("Missing mandatory path: %s" % path)
if found and translator:
try:
data = translator(data)
except Exception as e:
raise BrokenMetadata("Failed to process "
"path %s: %s" % (path, e))
if found:
results[name] = data
metadata = results['metadata']
if 'random_seed' in metadata:
random_seed = metadata['random_seed']
try:
metadata['random_seed'] = base64.b64decode(random_seed)
except (ValueError, TypeError) as e:
raise BrokenMetadata("Badly formatted metadata"
" random_seed entry: %s" % e)
# load any files that were provided
files = {}
metadata_files = metadata.get('files', [])
for item in metadata_files:
if 'path' not in item:
continue
path = item['path']
try:
files[path] = self._read_content_path(item)
except Exception as e:
raise BrokenMetadata("Failed to read provided "
"file %s: %s" % (path, e))
results['files'] = files
# The 'network_config' item in metadata is a content pointer
# to the network config that should be applied. It is just a
# ubuntu/debian '/etc/network/interfaces' file.
net_item = metadata.get("network_config", None)
if net_item:
try:
results['network_config'] = self._read_content_path(net_item)
except IOError as e:
raise BrokenMetadata("Failed to read network"
" configuration: %s" % (e))
# To openstack, user can specify meta ('nova boot --meta=key=value')
# and those will appear under metadata['meta'].
# if they specify 'dsmode' they're indicating the mode that they intend
# for this datasource to operate in.
try:
results['dsmode'] = metadata['meta']['dsmode']
except KeyError:
pass
# Read any ec2-metadata (if applicable)
results['ec2-metadata'] = self._read_ec2_metadata()
# Perform some misc. metadata key renames...
for (target_key, source_key, is_required) in KEY_COPIES:
if is_required and source_key not in metadata:
raise BrokenMetadata("No '%s' entry in metadata" % source_key)
if source_key in metadata:
metadata[target_key] = metadata.get(source_key)
return results
class ConfigDriveReader(BaseReader):
def __init__(self, base_path):
super(ConfigDriveReader, self).__init__(base_path)
def _path_join(self, base, *add_ons):
components = [base] + list(add_ons)
return os.path.join(*components)
def _path_exists(self, path):
return os.path.exists(path)
def _path_read(self, path):
return util.load_file(path)
def _read_ec2_metadata(self):
path = self._path_join(self.base_path,
'ec2', 'latest', 'meta-data.json')
if not self._path_exists(path):
return {}
else:
try:
return util.load_json(self._path_read(path))
except Exception as e:
raise BrokenMetadata("Failed to process "
"path %s: %s" % (path, e))
def read_v1(self):
"""Reads a version 1 formatted location.
Return a dict with metadata, userdata, dsmode, files and version (1).
If not a valid path, raise a NonReadable exception.
"""
found = {}
for name in FILES_V1.keys():
path = self._path_join(self.base_path, name)
if self._path_exists(path):
found[name] = path
if len(found) == 0:
raise NonReadable("%s: no files found" % (self.base_path))
md = {}
for (name, (key, translator, default)) in FILES_V1.iteritems():
if name in found:
path = found[name]
try:
contents = self._path_read(path)
except IOError:
raise BrokenMetadata("Failed to read: %s" % path)
try:
md[key] = translator(contents)
except Exception as e:
raise BrokenMetadata("Failed to process "
"path %s: %s" % (path, e))
else:
md[key] = copy.deepcopy(default)
keydata = md['authorized_keys']
meta_js = md['meta_js']
# keydata in meta_js is preferred over "injected"
keydata = meta_js.get('public-keys', keydata)
if keydata:
lines = keydata.splitlines()
md['public-keys'] = [l for l in lines
if len(l) and not l.startswith("#")]
# config-drive-v1 has no way for openstack to provide the instance-id
# so we copy that into metadata from the user input
if 'instance-id' in meta_js:
md['instance-id'] = meta_js['instance-id']
results = {
'version': 1,
'metadata': md,
}
# allow the user to specify 'dsmode' in a meta tag
if 'dsmode' in meta_js:
results['dsmode'] = meta_js['dsmode']
# config-drive-v1 has no way of specifying user-data, so the user has
# to cheat and stuff it in a meta tag also.
results['userdata'] = meta_js.get('user-data', '')
# this implementation does not support files other than
# network/interfaces and authorized_keys...
results['files'] = {}
return results
class MetadataReader(BaseReader):
def __init__(self, base_url, ssl_details=None, timeout=5, retries=5):
super(MetadataReader, self).__init__(base_url)
self.ssl_details = ssl_details
self.timeout = float(timeout)
self.retries = int(retries)
def _path_read(self, path):
response = url_helper.readurl(path,
retries=self.retries,
ssl_details=self.ssl_details,
timeout=self.timeout)
return response.contents
def _path_exists(self, path):
def should_retry_cb(request, cause):
try:
code = int(cause.code)
if code >= 400:
return False
except (TypeError, ValueError):
# Older versions of requests didn't have a code.
pass
return True
try:
response = url_helper.readurl(path,
retries=self.retries,
ssl_details=self.ssl_details,
timeout=self.timeout,
exception_cb=should_retry_cb)
return response.ok()
except IOError:
return False
def _path_join(self, base, *add_ons):
return url_helper.combine_url(base, *add_ons)
def _read_ec2_metadata(self):
return ec2_utils.get_instance_metadata(ssl_details=self.ssl_details,
timeout=self.timeout,
retries=self.retries)

View File

@ -234,7 +234,7 @@ class Init(object):
copy.deepcopy(self.ds_deps), copy.deepcopy(self.ds_deps),
cfg_list, cfg_list,
pkg_list) pkg_list)
LOG.debug("Loaded datasource %s - %s", dsname, ds) LOG.info("Loaded datasource %s - %s", dsname, ds)
self.datasource = ds self.datasource = ds
# Ensure we adjust our path members datasource # Ensure we adjust our path members datasource
# now that we have one (thus allowing ipath to be used) # now that we have one (thus allowing ipath to be used)

View File

@ -22,6 +22,7 @@
import httplib import httplib
import time import time
import urllib
import requests import requests
from requests import exceptions from requests import exceptions
@ -38,6 +39,7 @@ NOT_FOUND = httplib.NOT_FOUND
# Check if requests has ssl support (added in requests >= 0.8.8) # Check if requests has ssl support (added in requests >= 0.8.8)
SSL_ENABLED = False SSL_ENABLED = False
CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0) CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0)
_REQ_VER = None
try: try:
from distutils.version import LooseVersion from distutils.version import LooseVersion
import pkg_resources import pkg_resources
@ -61,6 +63,23 @@ def _cleanurl(url):
return urlunparse(parsed_url) return urlunparse(parsed_url)
def combine_url(base, *add_ons):
def combine_single(url, add_on):
url_parsed = list(urlparse(url))
path = url_parsed[2]
if path and not path.endswith("/"):
path += "/"
path += urllib.quote(str(add_on), safe="/:")
url_parsed[2] = path
return urlunparse(url_parsed)
url = base
for add_on in add_ons:
url = combine_single(url, add_on)
return url
# Made to have same accessors as UrlResponse so that the # Made to have same accessors as UrlResponse so that the
# read_file_or_url can return this or that object and the # read_file_or_url can return this or that object and the
# 'user' of those objects will not need to know the difference. # 'user' of those objects will not need to know the difference.
@ -129,6 +148,26 @@ class UrlError(IOError):
self.headers = {} self.headers = {}
def _get_ssl_args(url, ssl_details):
ssl_args = {}
scheme = urlparse(url).scheme # pylint: disable=E1101
if scheme == 'https' and ssl_details:
if not SSL_ENABLED:
LOG.warn("SSL is not supported in requests v%s, "
"cert. verification can not occur!", _REQ_VER)
else:
if 'ca_certs' in ssl_details and ssl_details['ca_certs']:
ssl_args['verify'] = ssl_details['ca_certs']
else:
ssl_args['verify'] = True
if 'cert_file' in ssl_details and 'key_file' in ssl_details:
ssl_args['cert'] = [ssl_details['cert_file'],
ssl_details['key_file']]
elif 'cert_file' in ssl_details:
ssl_args['cert'] = str(ssl_details['cert_file'])
return ssl_args
def readurl(url, data=None, timeout=None, retries=0, sec_between=1, def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
headers=None, headers_cb=None, ssl_details=None, headers=None, headers_cb=None, ssl_details=None,
check_status=True, allow_redirects=True, exception_cb=None): check_status=True, allow_redirects=True, exception_cb=None):
@ -136,21 +175,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
req_args = { req_args = {
'url': url, 'url': url,
} }
scheme = urlparse(url).scheme # pylint: disable=E1101 req_args.update(_get_ssl_args(url, ssl_details))
if scheme == 'https' and ssl_details:
if not SSL_ENABLED:
LOG.warn("SSL is not enabled, cert. verification can not occur!")
else:
if 'ca_certs' in ssl_details and ssl_details['ca_certs']:
req_args['verify'] = ssl_details['ca_certs']
else:
req_args['verify'] = True
if 'cert_file' in ssl_details and 'key_file' in ssl_details:
req_args['cert'] = [ssl_details['cert_file'],
ssl_details['key_file']]
elif 'cert_file' in ssl_details:
req_args['cert'] = str(ssl_details['cert_file'])
req_args['allow_redirects'] = allow_redirects req_args['allow_redirects'] = allow_redirects
req_args['method'] = 'GET' req_args['method'] = 'GET'
if timeout is not None: if timeout is not None:
@ -181,12 +206,11 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
def _cb(url): def _cb(url):
return headers return headers
headers_cb = _cb headers_cb = _cb
if data: if data:
# Do this after the log (it might be large)
req_args['data'] = data req_args['data'] = data
if sec_between is None: if sec_between is None:
sec_between = -1 sec_between = -1
excps = [] excps = []
# Handle retrying ourselves since the built-in support # Handle retrying ourselves since the built-in support
# doesn't handle sleeping between tries... # doesn't handle sleeping between tries...
@ -223,7 +247,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
# ssl exceptions are not going to get fixed by waiting a # ssl exceptions are not going to get fixed by waiting a
# few seconds # few seconds
break break
if exception_cb and not exception_cb(filtered_req_args, excps[-1]): if exception_cb and not exception_cb(req_args.copy(), excps[-1]):
break break
if i + 1 < manual_tries and sec_between > 0: if i + 1 < manual_tries and sec_between > 0:
LOG.debug("Please wait %s seconds while we wait to try again", LOG.debug("Please wait %s seconds while we wait to try again",
@ -242,6 +266,7 @@ def wait_for_url(urls, max_wait=None, timeout=None,
max_wait: roughly the maximum time to wait before giving up max_wait: roughly the maximum time to wait before giving up
The max time is *actually* len(urls)*timeout as each url will The max time is *actually* len(urls)*timeout as each url will
be tried once and given the timeout provided. be tried once and given the timeout provided.
a number <= 0 will always result in only one try
timeout: the timeout provided to urlopen timeout: the timeout provided to urlopen
status_cb: call method with string message when a url is not available status_cb: call method with string message when a url is not available
headers_cb: call method with single argument of url to get headers headers_cb: call method with single argument of url to get headers

View File

@ -32,6 +32,7 @@ import glob
import grp import grp
import gzip import gzip
import hashlib import hashlib
import json
import os import os
import os.path import os.path
import platform import platform
@ -362,6 +363,15 @@ def multi_log(text, console=True, stderr=True,
log.log(log_level, text) log.log(log_level, text)
def load_json(text, root_types=(dict,)):
decoded = json.loads(text)
if not isinstance(decoded, tuple(root_types)):
expected_types = ", ".join([str(t) for t in root_types])
raise TypeError("(%s) root types expected, got %s instead"
% (expected_types, type(decoded)))
return decoded
def is_ipv4(instr): def is_ipv4(instr):
"""determine if input string is a ipv4 address. return boolean.""" """determine if input string is a ipv4 address. return boolean."""
toks = instr.split('.') toks = instr.split('.')

View File

@ -12,10 +12,27 @@ from cloudinit import util
import shutil import shutil
# Handle how 2.6 doesn't have the assertIn or assertNotIn # Used for detecting different python versions
PY2 = False
PY26 = False
PY27 = False
PY3 = False
_PY_VER = sys.version_info _PY_VER = sys.version_info
_PY_MAJOR, _PY_MINOR = _PY_VER[0:2] _PY_MAJOR, _PY_MINOR = _PY_VER[0:2]
if (_PY_MAJOR, _PY_MINOR) <= (2, 6): if (_PY_MAJOR, _PY_MINOR) <= (2, 6):
if (_PY_MAJOR, _PY_MINOR) == (2, 6):
PY26 = True
if (_PY_MAJOR, _PY_MINOR) >= (2, 0):
PY2 = True
else:
if (_PY_MAJOR, _PY_MINOR) == (2, 7):
PY27 = True
PY2 = True
if (_PY_MAJOR, _PY_MINOR) >= (3, 0):
PY3 = True
if PY26:
# For now add these on, taken from python 2.7 + slightly adjusted # For now add these on, taken from python 2.7 + slightly adjusted
class TestCase(unittest.TestCase): class TestCase(unittest.TestCase):
def assertIn(self, member, container, msg=None): def assertIn(self, member, container, msg=None):
@ -29,6 +46,12 @@ if (_PY_MAJOR, _PY_MINOR) <= (2, 6):
standardMsg = standardMsg % (member, container) standardMsg = standardMsg % (member, container)
self.fail(self._formatMessage(msg, standardMsg)) self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, value, msg=None):
if value is not None:
standardMsg = '%r is not None'
standardMsg = standardMsg % (value)
self.fail(self._formatMessage(msg, standardMsg))
else: else:
class TestCase(unittest.TestCase): class TestCase(unittest.TestCase):
pass pass

View File

@ -9,6 +9,7 @@ from mocker import MockerTestCase
from cloudinit import helpers from cloudinit import helpers
from cloudinit import settings from cloudinit import settings
from cloudinit.sources import DataSourceConfigDrive as ds from cloudinit.sources import DataSourceConfigDrive as ds
from cloudinit.sources.helpers import openstack
from cloudinit import util from cloudinit import util
from tests.unittests import helpers as unit_helpers from tests.unittests import helpers as unit_helpers
@ -71,7 +72,7 @@ class TestConfigDriveDataSource(MockerTestCase):
def test_ec2_metadata(self): def test_ec2_metadata(self):
populate_dir(self.tmp, CFG_DRIVE_FILES_V2) populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
found = ds.read_config_drive_dir(self.tmp) found = ds.read_config_drive(self.tmp)
self.assertTrue('ec2-metadata' in found) self.assertTrue('ec2-metadata' in found)
ec2_md = found['ec2-metadata'] ec2_md = found['ec2-metadata']
self.assertEqual(EC2_META, ec2_md) self.assertEqual(EC2_META, ec2_md)
@ -81,7 +82,7 @@ class TestConfigDriveDataSource(MockerTestCase):
cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
None, None,
helpers.Paths({})) helpers.Paths({}))
found = ds.read_config_drive_dir(self.tmp) found = ds.read_config_drive(self.tmp)
cfg_ds.metadata = found['metadata'] cfg_ds.metadata = found['metadata']
name_tests = { name_tests = {
'ami': '/dev/vda1', 'ami': '/dev/vda1',
@ -112,7 +113,7 @@ class TestConfigDriveDataSource(MockerTestCase):
cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
None, None,
helpers.Paths({})) helpers.Paths({}))
found = ds.read_config_drive_dir(self.tmp) found = ds.read_config_drive(self.tmp)
os_md = found['metadata'] os_md = found['metadata']
cfg_ds.metadata = os_md cfg_ds.metadata = os_md
name_tests = { name_tests = {
@ -140,7 +141,7 @@ class TestConfigDriveDataSource(MockerTestCase):
cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
None, None,
helpers.Paths({})) helpers.Paths({}))
found = ds.read_config_drive_dir(self.tmp) found = ds.read_config_drive(self.tmp)
ec2_md = found['ec2-metadata'] ec2_md = found['ec2-metadata']
os_md = found['metadata'] os_md = found['metadata']
cfg_ds.ec2_metadata = ec2_md cfg_ds.ec2_metadata = ec2_md
@ -171,7 +172,7 @@ class TestConfigDriveDataSource(MockerTestCase):
cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
None, None,
helpers.Paths({})) helpers.Paths({}))
found = ds.read_config_drive_dir(self.tmp) found = ds.read_config_drive(self.tmp)
exists_mock = self.mocker.replace(os.path.exists, exists_mock = self.mocker.replace(os.path.exists,
spec=False, passthrough=False) spec=False, passthrough=False)
exists_mock(mocker.ARGS) exists_mock(mocker.ARGS)
@ -200,10 +201,11 @@ class TestConfigDriveDataSource(MockerTestCase):
populate_dir(self.tmp, CFG_DRIVE_FILES_V2) populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
found = ds.read_config_drive_dir(self.tmp) found = ds.read_config_drive(self.tmp)
expected_md = copy(OSTACK_META) expected_md = copy(OSTACK_META)
expected_md['instance-id'] = expected_md['uuid'] expected_md['instance-id'] = expected_md['uuid']
expected_md['local-hostname'] = expected_md['hostname']
self.assertEqual(USER_DATA, found['userdata']) self.assertEqual(USER_DATA, found['userdata'])
self.assertEqual(expected_md, found['metadata']) self.assertEqual(expected_md, found['metadata'])
@ -219,10 +221,11 @@ class TestConfigDriveDataSource(MockerTestCase):
populate_dir(self.tmp, data) populate_dir(self.tmp, data)
found = ds.read_config_drive_dir(self.tmp) found = ds.read_config_drive(self.tmp)
expected_md = copy(OSTACK_META) expected_md = copy(OSTACK_META)
expected_md['instance-id'] = expected_md['uuid'] expected_md['instance-id'] = expected_md['uuid']
expected_md['local-hostname'] = expected_md['hostname']
self.assertEqual(expected_md, found['metadata']) self.assertEqual(expected_md, found['metadata'])
@ -235,8 +238,8 @@ class TestConfigDriveDataSource(MockerTestCase):
populate_dir(self.tmp, data) populate_dir(self.tmp, data)
self.assertRaises(ds.BrokenConfigDriveDir, self.assertRaises(openstack.BrokenMetadata,
ds.read_config_drive_dir, self.tmp) ds.read_config_drive, self.tmp)
def test_seed_dir_no_configdrive(self): def test_seed_dir_no_configdrive(self):
"""Verify that no metadata raises NonConfigDriveDir.""" """Verify that no metadata raises NonConfigDriveDir."""
@ -247,14 +250,14 @@ class TestConfigDriveDataSource(MockerTestCase):
data["openstack/latest/random-file.txt"] = "random-content" data["openstack/latest/random-file.txt"] = "random-content"
data["content/foo"] = "foocontent" data["content/foo"] = "foocontent"
self.assertRaises(ds.NonConfigDriveDir, self.assertRaises(openstack.NonReadable,
ds.read_config_drive_dir, my_d) ds.read_config_drive, my_d)
def test_seed_dir_missing(self): def test_seed_dir_missing(self):
"""Verify that missing seed_dir raises NonConfigDriveDir.""" """Verify that missing seed_dir raises NonConfigDriveDir."""
my_d = os.path.join(self.tmp, "nonexistantdirectory") my_d = os.path.join(self.tmp, "nonexistantdirectory")
self.assertRaises(ds.NonConfigDriveDir, self.assertRaises(openstack.NonReadable,
ds.read_config_drive_dir, my_d) ds.read_config_drive, my_d)
def test_find_candidates(self): def test_find_candidates(self):
devs_with_answers = {} devs_with_answers = {}
@ -304,7 +307,7 @@ class TestConfigDriveDataSource(MockerTestCase):
def cfg_ds_from_dir(seed_d): def cfg_ds_from_dir(seed_d):
found = ds.read_config_drive_dir(seed_d) found = ds.read_config_drive(seed_d)
cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None,
helpers.Paths({})) helpers.Paths({}))
populate_ds_from_read_config(cfg_ds, seed_d, found) populate_ds_from_read_config(cfg_ds, seed_d, found)
@ -319,7 +322,7 @@ def populate_ds_from_read_config(cfg_ds, source, results):
cfg_ds.metadata = results.get('metadata') cfg_ds.metadata = results.get('metadata')
cfg_ds.ec2_metadata = results.get('ec2-metadata') cfg_ds.ec2_metadata = results.get('ec2-metadata')
cfg_ds.userdata_raw = results.get('userdata') cfg_ds.userdata_raw = results.get('userdata')
cfg_ds.version = results.get('cfgdrive_ver') cfg_ds.version = results.get('version')
def populate_dir(seed_dir, files): def populate_dir(seed_dir, files):

View File

@ -0,0 +1,301 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2014 Yahoo! Inc.
#
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
import json
import re
from StringIO import StringIO
from urlparse import urlparse
from tests.unittests import helpers as test_helpers
from cloudinit import helpers
from cloudinit import settings
from cloudinit.sources import DataSourceOpenStack as ds
from cloudinit.sources.helpers import openstack
from cloudinit import util
import httpretty as hp
BASE_URL = "http://169.254.169.254"
PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
EC2_META = {
'ami-id': 'ami-00000001',
'ami-launch-index': '0',
'ami-manifest-path': 'FIXME',
'hostname': 'sm-foo-test.novalocal',
'instance-action': 'none',
'instance-id': 'i-00000001',
'instance-type': 'm1.tiny',
'local-hostname': 'sm-foo-test.novalocal',
'local-ipv4': '0.0.0.0',
'public-hostname': 'sm-foo-test.novalocal',
'public-ipv4': '0.0.0.1',
'reservation-id': 'r-iru5qm4m',
}
USER_DATA = '#!/bin/sh\necho This is user data\n'
VENDOR_DATA = {
'magic': '',
}
OSTACK_META = {
'availability_zone': 'nova',
'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
{'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}],
'hostname': 'sm-foo-test.novalocal',
'meta': {'dsmode': 'local', 'my-meta': 'my-value'},
'name': 'sm-foo-test',
'public_keys': {'mykey': PUBKEY},
'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c',
}
CONTENT_0 = 'This is contents of /etc/foo.cfg\n'
CONTENT_1 = '# this is /etc/bar/bar.cfg\n'
OS_FILES = {
'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META),
'openstack/2012-08-10/user_data': USER_DATA,
'openstack/content/0000': CONTENT_0,
'openstack/content/0001': CONTENT_1,
'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
'openstack/latest/user_data': USER_DATA,
'openstack/latest/vendor_data.json': json.dumps(VENDOR_DATA),
}
EC2_FILES = {
'latest/user-data': USER_DATA,
}
def _register_uris(version, ec2_files, ec2_meta, os_files):
"""Registers a set of url patterns into httpretty that will mimic the
same data returned by the openstack metadata service (and ec2 service)."""
def match_ec2_url(uri, headers):
path = uri.path.lstrip("/")
if path in ec2_files:
return (200, headers, ec2_files.get(path))
if path == 'latest/meta-data':
buf = StringIO()
for (k, v) in ec2_meta.items():
if isinstance(v, (list, tuple)):
buf.write("%s/" % (k))
else:
buf.write("%s" % (k))
buf.write("\n")
return (200, headers, buf.getvalue())
if path.startswith('latest/meta-data'):
value = None
pieces = path.split("/")
if path.endswith("/"):
pieces = pieces[2:-1]
value = util.get_cfg_by_path(ec2_meta, pieces)
else:
pieces = pieces[2:]
value = util.get_cfg_by_path(ec2_meta, pieces)
if value is not None:
return (200, headers, str(value))
return (404, headers, '')
def get_request_callback(method, uri, headers):
uri = urlparse(uri)
path = uri.path.lstrip("/")
if path in os_files:
return (200, headers, os_files.get(path))
return match_ec2_url(uri, headers)
hp.register_uri(hp.GET, re.compile(r'http://169.254.169.254/.*'),
body=get_request_callback)
class TestOpenStackDataSource(test_helpers.TestCase):
VERSION = 'latest'
@hp.activate
def test_successful(self):
_register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
f = ds.read_metadata_service(BASE_URL, version=self.VERSION)
self.assertEquals(VENDOR_DATA, f.get('vendordata'))
self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertEquals(2, len(f['files']))
self.assertEquals(USER_DATA, f.get('userdata'))
self.assertEquals(EC2_META, f.get('ec2-metadata'))
self.assertEquals(2, f.get('version'))
metadata = f['metadata']
self.assertEquals('nova', metadata.get('availability_zone'))
self.assertEquals('sm-foo-test.novalocal', metadata.get('hostname'))
self.assertEquals('sm-foo-test.novalocal',
metadata.get('local-hostname'))
self.assertEquals('sm-foo-test', metadata.get('name'))
self.assertEquals('b0fa911b-69d4-4476-bbe2-1c92bff6535c',
metadata.get('uuid'))
self.assertEquals('b0fa911b-69d4-4476-bbe2-1c92bff6535c',
metadata.get('instance-id'))
@hp.activate
def test_no_ec2(self):
_register_uris(self.VERSION, {}, {}, OS_FILES)
f = ds.read_metadata_service(BASE_URL, version=self.VERSION)
self.assertEquals(VENDOR_DATA, f.get('vendordata'))
self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertEquals(USER_DATA, f.get('userdata'))
self.assertEquals({}, f.get('ec2-metadata'))
self.assertEquals(2, f.get('version'))
@hp.activate
def test_bad_metadata(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith('meta_data.json'):
os_files.pop(k, None)
_register_uris(self.VERSION, {}, {}, os_files)
self.assertRaises(openstack.NonReadable, ds.read_metadata_service,
BASE_URL, version=self.VERSION)
@hp.activate
def test_bad_uuid(self):
os_files = copy.deepcopy(OS_FILES)
os_meta = copy.deepcopy(OSTACK_META)
os_meta.pop('uuid')
for k in list(os_files.keys()):
if k.endswith('meta_data.json'):
os_files[k] = json.dumps(os_meta)
_register_uris(self.VERSION, {}, {}, os_files)
self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service,
BASE_URL, version=self.VERSION)
@hp.activate
def test_userdata_empty(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith('user_data'):
os_files.pop(k, None)
_register_uris(self.VERSION, {}, {}, os_files)
f = ds.read_metadata_service(BASE_URL, version=self.VERSION)
self.assertEquals(VENDOR_DATA, f.get('vendordata'))
self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertFalse(f.get('userdata'))
@hp.activate
def test_vendordata_empty(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith('vendor_data.json'):
os_files.pop(k, None)
_register_uris(self.VERSION, {}, {}, os_files)
f = ds.read_metadata_service(BASE_URL, version=self.VERSION)
self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertFalse(f.get('vendordata'))
@hp.activate
def test_vendordata_invalid(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith('vendor_data.json'):
os_files[k] = '{' # some invalid json
_register_uris(self.VERSION, {}, {}, os_files)
self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service,
BASE_URL, version=self.VERSION)
@hp.activate
def test_metadata_invalid(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith('meta_data.json'):
os_files[k] = '{' # some invalid json
_register_uris(self.VERSION, {}, {}, os_files)
self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service,
BASE_URL, version=self.VERSION)
@hp.activate
def test_datasource(self):
_register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
None,
helpers.Paths({}))
self.assertIsNone(ds_os.version)
found = ds_os.get_data()
self.assertTrue(found)
self.assertEquals(2, ds_os.version)
md = dict(ds_os.metadata)
md.pop('instance-id', None)
md.pop('local-hostname', None)
self.assertEquals(OSTACK_META, md)
self.assertEquals(EC2_META, ds_os.ec2_metadata)
self.assertEquals(USER_DATA, ds_os.userdata_raw)
self.assertEquals(2, len(ds_os.files))
self.assertEquals(VENDOR_DATA, ds_os.vendordata_raw)
@hp.activate
def test_bad_datasource_meta(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith('meta_data.json'):
os_files[k] = '{' # some invalid json
_register_uris(self.VERSION, {}, {}, os_files)
ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
None,
helpers.Paths({}))
self.assertIsNone(ds_os.version)
found = ds_os.get_data()
self.assertFalse(found)
self.assertIsNone(ds_os.version)
@hp.activate
def test_no_datasource(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith('meta_data.json'):
os_files.pop(k)
_register_uris(self.VERSION, {}, {}, os_files)
ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
None,
helpers.Paths({}))
ds_os.ds_cfg = {
'max_wait': 0,
'timeout': 0,
}
self.assertIsNone(ds_os.version)
found = ds_os.get_data()
self.assertFalse(found)
self.assertIsNone(ds_os.version)
@hp.activate
def test_disabled_datasource(self):
os_files = copy.deepcopy(OS_FILES)
os_meta = copy.deepcopy(OSTACK_META)
os_meta['meta'] = {
'dsmode': 'disabled',
}
for k in list(os_files.keys()):
if k.endswith('meta_data.json'):
os_files[k] = json.dumps(os_meta)
_register_uris(self.VERSION, {}, {}, os_files)
ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
None,
helpers.Paths({}))
ds_os.ds_cfg = {
'max_wait': 0,
'timeout': 0,
}
self.assertIsNone(ds_os.version)
found = ds_os.get_data()
self.assertFalse(found)
self.assertIsNone(ds_os.version)

View File

@ -1,6 +1,7 @@
from tests.unittests import helpers from tests.unittests import helpers
from cloudinit import ec2_utils as eu from cloudinit import ec2_utils as eu
from cloudinit import url_helper as uh
import httpretty as hp import httpretty as hp
@ -48,11 +49,11 @@ class TestEc2Util(helpers.TestCase):
body="\n".join(['hostname', body="\n".join(['hostname',
'instance-id', 'instance-id',
'ami-launch-index'])) 'ami-launch-index']))
hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'), hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
status=200, body='ec2.fake.host.name.com') status=200, body='ec2.fake.host.name.com')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'), hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
status=200, body='123') status=200, body='123')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'ami-launch-index'), hp.register_uri(hp.GET, uh.combine_url(base_url, 'ami-launch-index'),
status=200, body='1') status=200, body='1')
md = eu.get_instance_metadata(self.VERSION, retries=0) md = eu.get_instance_metadata(self.VERSION, retries=0)
self.assertEquals(md['hostname'], 'ec2.fake.host.name.com') self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
@ -66,14 +67,14 @@ class TestEc2Util(helpers.TestCase):
body="\n".join(['hostname', body="\n".join(['hostname',
'instance-id', 'instance-id',
'public-keys/'])) 'public-keys/']))
hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'), hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
status=200, body='ec2.fake.host.name.com') status=200, body='ec2.fake.host.name.com')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'), hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
status=200, body='123') status=200, body='123')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'), hp.register_uri(hp.GET, uh.combine_url(base_url, 'public-keys/'),
status=200, body='0=my-public-key') status=200, body='0=my-public-key')
hp.register_uri(hp.GET, hp.register_uri(hp.GET,
eu.combine_url(base_url, 'public-keys/0/openssh-key'), uh.combine_url(base_url, 'public-keys/0/openssh-key'),
status=200, body='ssh-rsa AAAA.....wZEf my-public-key') status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1) md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
self.assertEquals(md['hostname'], 'ec2.fake.host.name.com') self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
@ -87,18 +88,18 @@ class TestEc2Util(helpers.TestCase):
body="\n".join(['hostname', body="\n".join(['hostname',
'instance-id', 'instance-id',
'public-keys/'])) 'public-keys/']))
hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'), hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
status=200, body='ec2.fake.host.name.com') status=200, body='ec2.fake.host.name.com')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'), hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
status=200, body='123') status=200, body='123')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'), hp.register_uri(hp.GET, uh.combine_url(base_url, 'public-keys/'),
status=200, status=200,
body="\n".join(['0=my-public-key', '1=my-other-key'])) body="\n".join(['0=my-public-key', '1=my-other-key']))
hp.register_uri(hp.GET, hp.register_uri(hp.GET,
eu.combine_url(base_url, 'public-keys/0/openssh-key'), uh.combine_url(base_url, 'public-keys/0/openssh-key'),
status=200, body='ssh-rsa AAAA.....wZEf my-public-key') status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
hp.register_uri(hp.GET, hp.register_uri(hp.GET,
eu.combine_url(base_url, 'public-keys/1/openssh-key'), uh.combine_url(base_url, 'public-keys/1/openssh-key'),
status=200, body='ssh-rsa AAAA.....wZEf my-other-key') status=200, body='ssh-rsa AAAA.....wZEf my-other-key')
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1) md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
self.assertEquals(md['hostname'], 'ec2.fake.host.name.com') self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
@ -112,20 +113,20 @@ class TestEc2Util(helpers.TestCase):
body="\n".join(['hostname', body="\n".join(['hostname',
'instance-id', 'instance-id',
'block-device-mapping/'])) 'block-device-mapping/']))
hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'), hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
status=200, body='ec2.fake.host.name.com') status=200, body='ec2.fake.host.name.com')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'), hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
status=200, body='123') status=200, body='123')
hp.register_uri(hp.GET, hp.register_uri(hp.GET,
eu.combine_url(base_url, 'block-device-mapping/'), uh.combine_url(base_url, 'block-device-mapping/'),
status=200, status=200,
body="\n".join(['ami', 'ephemeral0'])) body="\n".join(['ami', 'ephemeral0']))
hp.register_uri(hp.GET, hp.register_uri(hp.GET,
eu.combine_url(base_url, 'block-device-mapping/ami'), uh.combine_url(base_url, 'block-device-mapping/ami'),
status=200, status=200,
body="sdb") body="sdb")
hp.register_uri(hp.GET, hp.register_uri(hp.GET,
eu.combine_url(base_url, uh.combine_url(base_url,
'block-device-mapping/ephemeral0'), 'block-device-mapping/ephemeral0'),
status=200, status=200,
body="sdc") body="sdc")