merge from trunk

This commit is contained in:
Scott Moser 2014-01-23 15:17:17 -05:00
commit adf5fe0f3f
54 changed files with 1387 additions and 576 deletions

View File

@ -1,5 +1,26 @@
0.7.5:
- open 0.7.5
- Add a debug log message around import failures
- add a 'debug' module for easily printing out some information about
datasource and cloud-init [Shraddha Pandhe]
- support running apt with 'eatmydata' via configuration token
apt_get_wrapper (LP: #1236531).
- convert paths provided in config-drive 'files' to string before writing
(LP: #1260072).
- Azure: minor changes in logging output. ensure filenames are strings (not
unicode).
- config/cloud.cfg.d/05_logging.cfg: provide a default 'output' setting, to
redirect cloud-init stderr and stdout /var/log/cloud-init-output.log.
- drop support for resizing partitions with parted entirely (LP: #1212492).
This was broken as it was anyway.
- add support for vendordata.
- drop dependency on boto for crawling ec2 metadata service.
- add 'Requires' on sudo (for OpenNebula datasource) in rpm specs, and
'Recommends' in the debian/control.in [Vlastimil Holer]
- if mount_info reports /dev/root is a device path for /, then convert
that to a device via help of kernel cmdline.
- configdrive: consider partitions as possible datasources if they have
theh correct filesystem label. [Paul Querna]
0.7.4:
- fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a
partitioned block device with target filesystem on ephemeral0.1.

View File

@ -8,6 +8,8 @@ YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f )
CHANGELOG_VERSION=$(shell $(CWD)/tools/read-version)
CODE_VERSION=$(shell python -c "from cloudinit import version; print version.version_string()")
PIP_INSTALL := pip install
ifeq ($(distro),)
distro = redhat
endif
@ -23,7 +25,16 @@ pylint:
pyflakes:
pyflakes $(PY_FILES)
test:
pip-requirements:
@echo "Installing cloud-init dependencies..."
$(PIP_INSTALL) -r "$@.txt" -q
pip-test-requirements:
@echo "Installing cloud-init test dependencies..."
$(PIP_INSTALL) -r "$@.txt" -q
test: clean_pyc
@echo "Running tests..."
@nosetests $(noseopts) tests/
check_version:
@ -32,12 +43,14 @@ check_version:
"not equal to code version $(CODE_VERSION)"; exit 2; \
else true; fi
clean_pyc:
@find . -type f -name "*.pyc" -delete
2to3:
2to3 $(PY_FILES)
clean:
rm -rf /var/log/cloud-init.log \
/var/lib/cloud/
clean: clean_pyc
rm -rf /var/log/cloud-init.log /var/lib/cloud/
yaml:
@$(CWD)/tools/validate-yaml.py $(YAML_FILES)
@ -49,4 +62,4 @@ deb:
./packages/bddeb
.PHONY: test pylint pyflakes 2to3 clean pep8 rpm deb yaml check_version
.PHONY: pip-test-requirements pip-requirements clean_pyc

View File

@ -261,8 +261,8 @@ def main_init(name, args):
# Attempt to consume the data per instance.
# This may run user-data handlers and/or perform
# url downloads and such as needed.
(ran, _results) = init.cloudify().run('consume_userdata',
init.consume_userdata,
(ran, _results) = init.cloudify().run('consume_data',
init.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
if not ran:
@ -271,7 +271,7 @@ def main_init(name, args):
#
# See: https://bugs.launchpad.net/bugs/819507 for a little
# reason behind this...
init.consume_userdata(PER_ALWAYS)
init.consume_data(PER_ALWAYS)
except Exception:
util.logexc(LOG, "Consuming user data failed!")
return 1

View File

@ -0,0 +1,79 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2013 Yahoo! Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit import type_utils
from cloudinit import util
import copy
from StringIO import StringIO
def _make_header(text):
header = StringIO()
header.write("-" * 80)
header.write("\n")
header.write(text.center(80, ' '))
header.write("\n")
header.write("-" * 80)
header.write("\n")
return header.getvalue()
def handle(name, cfg, cloud, log, args):
verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True)
if args:
# if args are provided (from cmdline) then explicitly set verbose
out_file = args[0]
verbose = True
else:
out_file = util.get_cfg_by_path(cfg, ('debug', 'output'))
if not verbose:
log.debug(("Skipping module named %s,"
" verbose printing disabled"), name)
return
# Clean out some keys that we just don't care about showing...
dump_cfg = copy.deepcopy(cfg)
for k in ['log_cfgs']:
dump_cfg.pop(k, None)
all_keys = list(dump_cfg.keys())
for k in all_keys:
if k.startswith("_"):
dump_cfg.pop(k, None)
# Now dump it...
to_print = StringIO()
to_print.write(_make_header("Config"))
to_print.write(util.yaml_dumps(dump_cfg))
to_print.write("\n")
to_print.write(_make_header("MetaData"))
to_print.write(util.yaml_dumps(cloud.datasource.metadata))
to_print.write("\n")
to_print.write(_make_header("Misc"))
to_print.write("Datasource: %s\n" %
(type_utils.obj_name(cloud.datasource)))
to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro)))
to_print.write("Hostname: %s\n" % (cloud.get_hostname(True)))
to_print.write("Instance ID: %s\n" % (cloud.get_instance_id()))
to_print.write("Locale: %s\n" % (cloud.get_locale()))
to_print.write("Launch IDX: %s\n" % (cloud.launch_index))
contents = to_print.getvalue()
content_to_file = []
for line in contents.splitlines():
line = "ci-info: %s\n" % (line)
content_to_file.append(line)
if out_file:
util.write_file(out_file, "".join(content_to_file), 0644, "w")
else:
util.multi_log("".join(content_to_file), console=True, stderr=False)

View File

@ -80,30 +80,6 @@ class ResizeFailedException(Exception):
pass
class ResizeParted(object):
def available(self):
myenv = os.environ.copy()
myenv['LANG'] = 'C'
try:
(out, _err) = util.subp(["parted", "--help"], env=myenv)
if re.search(r"COMMAND.*resizepart\s+", out, re.DOTALL):
return True
except util.ProcessExecutionError:
pass
return False
def resize(self, diskdev, partnum, partdev):
before = get_size(partdev)
try:
util.subp(["parted", diskdev, "resizepart", partnum])
except util.ProcessExecutionError as e:
raise ResizeFailedException(e)
return (before, get_size(partdev))
class ResizeGrowPart(object):
def available(self):
myenv = os.environ.copy()
@ -318,6 +294,4 @@ def handle(_name, cfg, _cloud, log, _args):
else:
log.debug("'%s' %s: %s" % (entry, action, msg))
# LP: 1212444 FIXME re-order and favor ResizeParted
#RESIZERS = (('growpart', ResizeGrowPart),)
RESIZERS = (('growpart', ResizeGrowPart), ('parted', ResizeParted), ('gpart', ResizeGpart))
RESIZERS = (('growpart', ResizeGrowPart), ('gpart', ResizeGpart))

View File

@ -56,6 +56,25 @@ RESIZE_FS_PREFIXES_CMDS = [
NOBLOCK = "noblock"
def rootdev_from_cmdline(cmdline):
found = None
for tok in cmdline.split():
if tok.startswith("root="):
found = tok[5:]
break
if found is None:
return None
if found.startswith("/dev/"):
return found
if found.startswith("LABEL="):
return "/dev/disk/by-label/" + found[len("LABEL="):]
if found.startswith("UUID="):
return "/dev/disk/by-uuid/" + found[len("UUID="):]
return "/dev/" + found
def handle(name, cfg, _cloud, log, args):
if len(args) != 0:
resize_root = args[0]
@ -83,10 +102,20 @@ def handle(name, cfg, _cloud, log, args):
info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
log.debug("resize_info: %s" % info)
container = util.is_container()
if (devpth == "/dev/root" and not os.path.exists(devpth) and
not container):
devpth = rootdev_from_cmdline(util.get_cmdline())
if devpth is None:
log.warn("Unable to find device '/dev/root'")
return
log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth)
try:
statret = os.stat(devpth)
except OSError as exc:
if util.is_container() and exc.errno == errno.ENOENT:
if container and exc.errno == errno.ENOENT:
log.debug("Device '%s' did not exist in container. "
"cannot resize: %s" % (devpth, info))
elif exc.errno == errno.ENOENT:
@ -97,7 +126,7 @@ def handle(name, cfg, _cloud, log, args):
return
if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
if util.is_container():
if container:
log.debug("device '%s' not a block device in container."
" cannot resize: %s" % (devpth, info))
else:

View File

@ -0,0 +1,43 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2014 Canonical Ltd.
#
# Author: Ben Howard <ben.howard@canonical.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
SCRIPT_SUBDIR = 'vendor'
def handle(name, cfg, cloud, log, _args):
# This is written to by the vendor data handlers
# any vendor data shell scripts get placed in runparts_path
runparts_path = os.path.join(cloud.get_ipath_cur(), 'scripts',
SCRIPT_SUBDIR)
prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), [])
try:
util.runparts(runparts_path, exe_prefix=prefix)
except:
log.warn("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
raise

View File

@ -36,6 +36,10 @@ LOG = logging.getLogger(__name__)
APT_GET_COMMAND = ('apt-get', '--option=Dpkg::Options::=--force-confold',
'--option=Dpkg::options::=--force-unsafe-io',
'--assume-yes', '--quiet')
APT_GET_WRAPPER = {
'command': 'eatmydata',
'enabled': 'auto',
}
class Distro(distros.Distro):
@ -148,7 +152,13 @@ class Distro(distros.Distro):
# See: http://tiny.cc/kg91fw
# Or: http://tiny.cc/mh91fw
e['DEBIAN_FRONTEND'] = 'noninteractive'
cmd = list(self.get_option("apt_get_command", APT_GET_COMMAND))
wcfg = self.get_option("apt_get_wrapper", APT_GET_WRAPPER)
cmd = _get_wrapper_prefix(
wcfg.get('command', APT_GET_WRAPPER['command']),
wcfg.get('enabled', APT_GET_WRAPPER['enabled']))
cmd.extend(list(self.get_option("apt_get_command", APT_GET_COMMAND)))
if args and isinstance(args, str):
cmd.append(args)
@ -166,7 +176,9 @@ class Distro(distros.Distro):
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
util.subp(cmd, env=e, capture=False)
util.log_time(logfunc=LOG.debug,
msg="apt-%s [%s]" % (command, ' '.join(cmd)), func=util.subp,
args=(cmd,), kwargs={'env': e, 'capture': False})
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
@ -175,3 +187,15 @@ class Distro(distros.Distro):
def get_primary_arch(self):
(arch, _err) = util.subp(['dpkg', '--print-architecture'])
return str(arch).strip()
def _get_wrapper_prefix(cmd, mode):
if isinstance(cmd, str):
cmd = [str(cmd)]
if (util.is_true(mode) or
(str(mode).lower() == "auto" and cmd[0] and
util.which(cmd[0]))):
return cmd
else:
return []

View File

@ -0,0 +1,163 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This is a util function to translate debian based distro interface blobs as
# given in /etc/network/interfaces to an *somewhat* agnostic format for
# distributions that use other formats.
#
# TODO(harlowja) remove when we have python-netcf active...
#
# The format is the following:
# {
# <device-name>: {
# # All optional (if not existent in original format)
# "netmask": <ip>,
# "broadcast": <ip>,
# "gateway": <ip>,
# "address": <ip>,
# "bootproto": "static"|"dhcp",
# "dns-search": <hostname>,
# "hwaddress": <mac-address>,
# "auto": True (or non-existent),
# "dns-nameservers": [<ip/hostname>, ...],
# }
# }
#
# Things to note, comments are removed, if a ubuntu/debian interface is
# marked as auto then only then first segment (?) is retained, ie
# 'auto eth0 eth0:1' just marks eth0 as auto (not eth0:1).
#
# Example input:
#
# auto lo
# iface lo inet loopback
#
# auto eth0
# iface eth0 inet static
# address 10.0.0.1
# netmask 255.255.252.0
# broadcast 10.0.0.255
# gateway 10.0.0.2
# dns-nameservers 98.0.0.1 98.0.0.2
#
# Example output:
# {
# "lo": {
# "auto": true
# },
# "eth0": {
# "auto": true,
# "dns-nameservers": [
# "98.0.0.1",
# "98.0.0.2"
# ],
# "broadcast": "10.0.0.255",
# "netmask": "255.255.252.0",
# "bootproto": "static",
# "address": "10.0.0.1",
# "gateway": "10.0.0.2"
# }
# }
def translate_network(settings):
# Get the standard cmd, args from the ubuntu format
entries = []
for line in settings.splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
split_up = line.split(None, 1)
if len(split_up) <= 1:
continue
entries.append(split_up)
# Figure out where each iface section is
ifaces = []
consume = {}
for (cmd, args) in entries:
if cmd == 'iface':
if consume:
ifaces.append(consume)
consume = {}
consume[cmd] = args
else:
consume[cmd] = args
# Check if anything left over to consume
absorb = False
for (cmd, args) in consume.iteritems():
if cmd == 'iface':
absorb = True
if absorb:
ifaces.append(consume)
# Now translate
real_ifaces = {}
for info in ifaces:
if 'iface' not in info:
continue
iface_details = info['iface'].split(None)
dev_name = None
if len(iface_details) >= 1:
dev = iface_details[0].strip().lower()
if dev:
dev_name = dev
if not dev_name:
continue
iface_info = {}
if len(iface_details) >= 3:
proto_type = iface_details[2].strip().lower()
# Seems like this can be 'loopback' which we don't
# really care about
if proto_type in ['dhcp', 'static']:
iface_info['bootproto'] = proto_type
# These can just be copied over
for k in ['netmask', 'address', 'gateway', 'broadcast']:
if k in info:
val = info[k].strip().lower()
if val:
iface_info[k] = val
# Name server info provided??
if 'dns-nameservers' in info:
iface_info['dns-nameservers'] = info['dns-nameservers'].split()
# Name server search info provided??
if 'dns-search' in info:
iface_info['dns-search'] = info['dns-search'].split()
# Is any mac address spoofing going on??
if 'hwaddress' in info:
hw_info = info['hwaddress'].lower().strip()
hw_split = hw_info.split(None, 1)
if len(hw_split) == 2 and hw_split[0].startswith('ether'):
hw_addr = hw_split[1]
if hw_addr:
iface_info['hwaddress'] = hw_addr
real_ifaces[dev_name] = iface_info
# Check for those that should be started on boot via 'auto'
for (cmd, args) in entries:
if cmd == 'auto':
# Seems like auto can be like 'auto eth0 eth0:1' so just get the
# first part out as the device name
args = args.split(None)
if not args:
continue
dev_name = args[0].strip().lower()
if dev_name in real_ifaces:
real_ifaces[dev_name]['auto'] = True
return real_ifaces

View File

@ -25,6 +25,7 @@ from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
from cloudinit.distros import net_util
from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
@ -63,7 +64,7 @@ class Distro(distros.Distro):
def _write_network(self, settings):
# TODO(harlowja) fix this... since this is the ubuntu format
entries = rhel_util.translate_network(settings)
entries = net_util.translate_network(settings)
LOG.debug("Translated ubuntu style network settings %s into %s",
settings, entries)
# Make the intermediate format as the rhel format...

View File

@ -30,94 +30,6 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
# This is a util function to translate Debian based distro interface blobs as
# given in /etc/network/interfaces to an equivalent format for distributions
# that use ifcfg-* style (Red Hat and SUSE).
# TODO(harlowja) remove when we have python-netcf active...
def translate_network(settings):
# Get the standard cmd, args from the ubuntu format
entries = []
for line in settings.splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
split_up = line.split(None, 1)
if len(split_up) <= 1:
continue
entries.append(split_up)
# Figure out where each iface section is
ifaces = []
consume = {}
for (cmd, args) in entries:
if cmd == 'iface':
if consume:
ifaces.append(consume)
consume = {}
consume[cmd] = args
else:
consume[cmd] = args
# Check if anything left over to consume
absorb = False
for (cmd, args) in consume.iteritems():
if cmd == 'iface':
absorb = True
if absorb:
ifaces.append(consume)
# Now translate
real_ifaces = {}
for info in ifaces:
if 'iface' not in info:
continue
iface_details = info['iface'].split(None)
dev_name = None
if len(iface_details) >= 1:
dev = iface_details[0].strip().lower()
if dev:
dev_name = dev
if not dev_name:
continue
iface_info = {}
if len(iface_details) >= 3:
proto_type = iface_details[2].strip().lower()
# Seems like this can be 'loopback' which we don't
# really care about
if proto_type in ['dhcp', 'static']:
iface_info['bootproto'] = proto_type
# These can just be copied over
for k in ['netmask', 'address', 'gateway', 'broadcast']:
if k in info:
val = info[k].strip().lower()
if val:
iface_info[k] = val
# Name server info provided??
if 'dns-nameservers' in info:
iface_info['dns-nameservers'] = info['dns-nameservers'].split()
# Name server search info provided??
if 'dns-search' in info:
iface_info['dns-search'] = info['dns-search'].split()
# Is any mac address spoofing going on??
if 'hwaddress' in info:
hw_info = info['hwaddress'].lower().strip()
hw_split = hw_info.split(None, 1)
if len(hw_split) == 2 and hw_split[0].startswith('ether'):
hw_addr = hw_split[1]
if hw_addr:
iface_info['hwaddress'] = hw_addr
real_ifaces[dev_name] = iface_info
# Check for those that should be started on boot via 'auto'
for (cmd, args) in entries:
if cmd == 'auto':
# Seems like auto can be like 'auto eth0 eth0:1' so just get the
# first part out as the device name
args = args.split(None)
if not args:
continue
dev_name = args[0].strip().lower()
if dev_name in real_ifaces:
real_ifaces[dev_name]['auto'] = True
return real_ifaces
# Helper function to update a RHEL/SUSE /etc/sysconfig/* file
def update_sysconfig_file(fn, adjustments, allow_empty=False):
if not adjustments:

View File

@ -26,6 +26,7 @@ from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
from cloudinit.distros import net_util
from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
@ -54,7 +55,7 @@ class Distro(distros.Distro):
def _write_network(self, settings):
# Convert debian settings to ifcfg format
entries = rhel_util.translate_network(settings)
entries = net_util.translate_network(settings)
LOG.debug("Translated ubuntu style network settings %s into %s",
settings, entries)
# Make the intermediate format as the suse format...

View File

@ -16,48 +16,160 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import boto.utils as boto_utils
from urlparse import (urlparse, urlunparse)
# Versions of boto >= 2.6.0 (and possibly 2.5.2)
# try to lazily load the metadata backing, which
# doesn't work so well in cloud-init especially
# since the metadata is serialized and actions are
# performed where the metadata server may be blocked
# (thus the datasource will start failing) resulting
# in url exceptions when fields that do exist (or
# would have existed) do not exist due to the blocking
# that occurred.
import functools
import json
import urllib
# TODO(harlowja): https://github.com/boto/boto/issues/1401
# When boto finally moves to using requests, we should be able
# to provide it ssl details, it does not yet, so we can't provide them...
from cloudinit import log as logging
from cloudinit import util
LOG = logging.getLogger(__name__)
def _unlazy_dict(mp):
if not isinstance(mp, (dict)):
return mp
# Walk over the keys/values which
# forces boto to unlazy itself and
# has no effect on dictionaries that
# already have there items.
for (_k, v) in mp.items():
_unlazy_dict(v)
return mp
def maybe_json_object(text):
if not text:
return False
text = text.strip()
if text.startswith("{") and text.endswith("}"):
return True
return False
def get_instance_userdata(api_version, metadata_address):
# Note: boto.utils.get_instance_metadata returns '' for empty string
# so the change from non-true to '' is not specifically necessary, but
# this way cloud-init will get consistent behavior even if boto changed
# in the future to return a None on "no user-data provided".
ud = boto_utils.get_instance_userdata(api_version, None, metadata_address)
if not ud:
ud = ''
return ud
def combine_url(base, add_on):
base_parsed = list(urlparse(base))
path = base_parsed[2]
if path and not path.endswith("/"):
path += "/"
path += urllib.quote(str(add_on), safe="/:")
base_parsed[2] = path
return urlunparse(base_parsed)
def get_instance_metadata(api_version, metadata_address):
metadata = boto_utils.get_instance_metadata(api_version, metadata_address)
if not isinstance(metadata, (dict)):
metadata = {}
return _unlazy_dict(metadata)
# See: http://bit.ly/TyoUQs
#
class MetadataMaterializer(object):
def __init__(self, blob, base_url, caller):
self._blob = blob
self._md = None
self._base_url = base_url
self._caller = caller
def _parse(self, blob):
leaves = {}
children = []
if not blob:
return (leaves, children)
def has_children(item):
if item.endswith("/"):
return True
else:
return False
def get_name(item):
if item.endswith("/"):
return item.rstrip("/")
return item
for field in blob.splitlines():
field = field.strip()
field_name = get_name(field)
if not field or not field_name:
continue
if has_children(field):
if field_name not in children:
children.append(field_name)
else:
contents = field.split("=", 1)
resource = field_name
if len(contents) > 1:
# What a PITA...
(ident, sub_contents) = contents
ident = util.safe_int(ident)
if ident is not None:
resource = "%s/openssh-key" % (ident)
field_name = sub_contents
leaves[field_name] = resource
return (leaves, children)
def materialize(self):
if self._md is not None:
return self._md
self._md = self._materialize(self._blob, self._base_url)
return self._md
def _decode_leaf_blob(self, field, blob):
if not blob:
return blob
if maybe_json_object(blob):
try:
# Assume it's json, unless it fails parsing...
return json.loads(blob)
except (ValueError, TypeError) as e:
LOG.warn("Field %s looked like a json object, but it was"
" not: %s", field, e)
if blob.find("\n") != -1:
return blob.splitlines()
return blob
def _materialize(self, blob, base_url):
(leaves, children) = self._parse(blob)
child_contents = {}
for c in children:
child_url = combine_url(base_url, c)
if not child_url.endswith("/"):
child_url += "/"
child_blob = str(self._caller(child_url))
child_contents[c] = self._materialize(child_blob, child_url)
leaf_contents = {}
for (field, resource) in leaves.items():
leaf_url = combine_url(base_url, resource)
leaf_blob = str(self._caller(leaf_url))
leaf_contents[field] = self._decode_leaf_blob(field, leaf_blob)
joined = {}
joined.update(child_contents)
for field in leaf_contents.keys():
if field in joined:
LOG.warn("Duplicate key found in results from %s", base_url)
else:
joined[field] = leaf_contents[field]
return joined
def get_instance_userdata(api_version='latest',
metadata_address='http://169.254.169.254',
ssl_details=None, timeout=5, retries=5):
ud_url = combine_url(metadata_address, api_version)
ud_url = combine_url(ud_url, 'user-data')
try:
response = util.read_file_or_url(ud_url,
ssl_details=ssl_details,
timeout=timeout,
retries=retries)
return str(response)
except Exception:
util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
return ''
def get_instance_metadata(api_version='latest',
metadata_address='http://169.254.169.254',
ssl_details=None, timeout=5, retries=5):
md_url = combine_url(metadata_address, api_version)
md_url = combine_url(md_url, 'meta-data')
caller = functools.partial(util.read_file_or_url,
ssl_details=ssl_details, timeout=timeout,
retries=retries)
try:
response = caller(md_url)
materializer = MetadataMaterializer(str(response), md_url, caller)
md = materializer.materialize()
if not isinstance(md, (dict)):
md = {}
return md
except Exception:
util.logexc(LOG, "Failed fetching metadata from url %s", md_url)
return {}

View File

@ -187,6 +187,10 @@ def _escape_string(text):
def walker_callback(data, filename, payload, headers):
content_type = headers['Content-Type']
if content_type in data.get('excluded'):
LOG.debug('content_type "%s" is excluded', content_type)
return
if content_type in PART_CONTENT_TYPES:
walker_handle_handler(data, content_type, filename, payload)
return

View File

@ -66,6 +66,8 @@ class CloudConfigPartHandler(handlers.Handler):
handlers.Handler.__init__(self, PER_ALWAYS, version=3)
self.cloud_buf = None
self.cloud_fn = paths.get_ipath("cloud_config")
if 'cloud_config_path' in _kwargs:
self.cloud_fn = paths.get_ipath(_kwargs["cloud_config_path"])
self.file_names = []
def list_types(self):

View File

@ -36,6 +36,8 @@ class ShellScriptPartHandler(handlers.Handler):
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS)
self.script_dir = paths.get_ipath_cur('scripts')
if 'script_path' in _kwargs:
self.script_dir = paths.get_ipath_cur(_kwargs['script_path'])
def list_types(self):
return [

View File

@ -200,11 +200,13 @@ class Runners(object):
class ConfigMerger(object):
def __init__(self, paths=None, datasource=None,
additional_fns=None, base_cfg=None):
additional_fns=None, base_cfg=None,
include_vendor=True):
self._paths = paths
self._ds = datasource
self._fns = additional_fns
self._base_cfg = base_cfg
self._include_vendor = include_vendor
# Created on first use
self._cfg = None
@ -237,13 +239,19 @@ class ConfigMerger(object):
# a configuration file to use when running...
if not self._paths:
return i_cfgs
cc_fn = self._paths.get_ipath_cur('cloud_config')
if cc_fn and os.path.isfile(cc_fn):
try:
i_cfgs.append(util.read_conf(cc_fn))
except:
util.logexc(LOG, 'Failed loading of cloud-config from %s',
cc_fn)
cc_paths = ['cloud_config']
if self._include_vendor:
cc_paths.append('vendor_cloud_config')
for cc_p in cc_paths:
cc_fn = self._paths.get_ipath_cur(cc_p)
if cc_fn and os.path.isfile(cc_fn):
try:
i_cfgs.append(util.read_conf(cc_fn))
except:
util.logexc(LOG, 'Failed loading of cloud-config from %s',
cc_fn)
return i_cfgs
def _read_cfg(self):
@ -331,13 +339,17 @@ class Paths(object):
self.lookups = {
"handlers": "handlers",
"scripts": "scripts",
"vendor_scripts": "scripts/vendor",
"sem": "sem",
"boothooks": "boothooks",
"userdata_raw": "user-data.txt",
"userdata": "user-data.txt.i",
"obj_pkl": "obj.pkl",
"cloud_config": "cloud-config.txt",
"vendor_cloud_config": "vendor-cloud-config.txt",
"data": "data",
"vendordata_raw": "vendor-data.txt",
"vendordata": "vendor-data.txt.i",
}
# Set when a datasource becomes active
self.datasource = ds

View File

@ -36,6 +36,7 @@ def find_module(base_name, search_paths, required_attrs=None):
found_places = []
if not required_attrs:
required_attrs = []
# NOTE(harlowja): translate the search paths to include the base name.
real_paths = []
for path in search_paths:
real_path = []
@ -50,8 +51,9 @@ def find_module(base_name, search_paths, required_attrs=None):
mod = None
try:
mod = import_module(full_path)
except ImportError:
pass
except ImportError as e:
LOG.debug("Failed at attempted import of '%s' due to: %s",
full_path, e)
if not mod:
continue
found_attrs = 0

View File

@ -52,6 +52,7 @@ CFG_BUILTIN = {
},
'distro': 'ubuntu',
},
'vendor_data': {'enabled': True, 'prefix': []},
}
# Valid frequencies of handlers/modules

View File

@ -154,7 +154,7 @@ class DataSourceAzureNet(sources.DataSource):
fp_files = []
for pk in self.cfg.get('_pubkeys', []):
bname = pk['fingerprint'] + ".crt"
bname = str(pk['fingerprint'] + ".crt")
fp_files += [os.path.join(mycfg['data_dir'], bname)]
missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
@ -247,7 +247,7 @@ def pubkeys_from_crt_files(flist):
try:
pubkeys.append(crtfile_to_pubkey(fname))
except util.ProcessExecutionError:
errors.extend(fname)
errors.append(fname)
if errors:
LOG.warn("failed to convert the crt files to pubkey: %s" % errors)

View File

@ -284,8 +284,10 @@ def find_candidate_devs():
# followed by fstype items, but with dupes removed
combined = (by_label + [d for d in by_fstype if d not in by_label])
# We are looking for block device (sda, not sda1), ignore partitions
combined = [d for d in combined if not util.is_partition(d)]
# We are looking for a block device or partition with necessary label or
# an unpartitioned block device.
combined = [d for d in combined
if d in by_label or not util.is_partition(d)]
return combined

View File

@ -323,7 +323,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
(output, _error) = util.subp(cmd, data=bcmd)
# exclude vars in bash that change on their own or that we used
excluded = ("RANDOM", "LINENO", "_", "__v")
excluded = ("RANDOM", "LINENO", "SECONDS", "_", "__v")
preset = {}
ret = {}
target = None

View File

@ -47,6 +47,7 @@ SMARTOS_ATTRIB_MAP = {
'iptables_disable': ('iptables_disable', True),
'motd_sys_info': ('motd_sys_info', True),
'availability_zone': ('datacenter_name', True),
'vendordata': ('sdc:operator-script', False),
}
DS_NAME = 'SmartOS'
@ -154,6 +155,7 @@ class DataSourceSmartOS(sources.DataSource):
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
self.vendordata_raw = md['vendordata']
return True
def device_name_to_device(self, name):

View File

@ -53,6 +53,8 @@ class DataSource(object):
self.userdata = None
self.metadata = None
self.userdata_raw = None
self.vendordata = None
self.vendordata_raw = None
# find the datasource config name.
# remove 'DataSource' from classname on front, and remove 'Net' on end.
@ -77,9 +79,14 @@ class DataSource(object):
if self.userdata is None:
self.userdata = self.ud_proc.process(self.get_userdata_raw())
if apply_filter:
return self._filter_userdata(self.userdata)
return self._filter_xdata(self.userdata)
return self.userdata
def get_vendordata(self):
if self.vendordata is None:
self.vendordata = self.ud_proc.process(self.get_vendordata_raw())
return self.vendordata
@property
def launch_index(self):
if not self.metadata:
@ -88,7 +95,7 @@ class DataSource(object):
return self.metadata['launch-index']
return None
def _filter_userdata(self, processed_ud):
def _filter_xdata(self, processed_ud):
filters = [
launch_index.Filter(util.safe_int(self.launch_index)),
]
@ -104,6 +111,9 @@ class DataSource(object):
def get_userdata_raw(self):
return self.userdata_raw
def get_vendordata_raw(self):
return self.vendordata_raw
# the data sources' config_obj is a cloud-config formated
# object that came to it from ways other than cloud-config
# because cloud-config content would be handled elsewhere

View File

@ -123,6 +123,7 @@ class Init(object):
os.path.join(c_dir, 'scripts', 'per-instance'),
os.path.join(c_dir, 'scripts', 'per-once'),
os.path.join(c_dir, 'scripts', 'per-boot'),
os.path.join(c_dir, 'scripts', 'vendor'),
os.path.join(c_dir, 'seed'),
os.path.join(c_dir, 'instances'),
os.path.join(c_dir, 'handlers'),
@ -319,6 +320,7 @@ class Init(object):
if not self._write_to_cache():
return
self._store_userdata()
self._store_vendordata()
def _store_userdata(self):
raw_ud = "%s" % (self.datasource.get_userdata_raw())
@ -326,11 +328,20 @@ class Init(object):
processed_ud = "%s" % (self.datasource.get_userdata())
util.write_file(self._get_ipath('userdata'), processed_ud, 0600)
def _default_userdata_handlers(self):
opts = {
def _store_vendordata(self):
raw_vd = "%s" % (self.datasource.get_vendordata_raw())
util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0600)
processed_vd = "%s" % (self.datasource.get_vendordata())
util.write_file(self._get_ipath('vendordata'), processed_vd, 0600)
def _default_handlers(self, opts=None):
if opts is None:
opts = {}
opts.update({
'paths': self.paths,
'datasource': self.datasource,
}
})
# TODO(harlowja) Hmmm, should we dynamically import these??
def_handlers = [
cc_part.CloudConfigPartHandler(**opts),
@ -340,7 +351,23 @@ class Init(object):
]
return def_handlers
def consume_userdata(self, frequency=PER_INSTANCE):
def _default_userdata_handlers(self):
return self._default_handlers()
def _default_vendordata_handlers(self):
return self._default_handlers(
opts={'script_path': 'vendor_scripts',
'cloud_config_path': 'vendor_cloud_config'})
def _do_handlers(self, data_msg, c_handlers_list, frequency,
excluded=None):
"""
Generalized handlers suitable for use with either vendordata
or userdata
"""
if excluded is None:
excluded = []
cdir = self.paths.get_cpath("handlers")
idir = self._get_ipath("handlers")
@ -352,12 +379,6 @@ class Init(object):
if d and d not in sys.path:
sys.path.insert(0, d)
# Ensure datasource fetched before activation (just incase)
user_data_msg = self.datasource.get_userdata(True)
# This keeps track of all the active handlers
c_handlers = helpers.ContentHandlers()
def register_handlers_in_dir(path):
# Attempts to register any handler modules under the given path.
if not path or not os.path.isdir(path):
@ -382,13 +403,16 @@ class Init(object):
util.logexc(LOG, "Failed to register handler from %s",
fname)
# This keeps track of all the active handlers
c_handlers = helpers.ContentHandlers()
# Add any handlers in the cloud-dir
register_handlers_in_dir(cdir)
# Register any other handlers that come from the default set. This
# is done after the cloud-dir handlers so that the cdir modules can
# take over the default user-data handler content-types.
for mod in self._default_userdata_handlers():
for mod in c_handlers_list:
types = c_handlers.register(mod, overwrite=False)
if types:
LOG.debug("Added default handler for %s from %s", types, mod)
@ -406,7 +430,7 @@ class Init(object):
handlers.call_begin(mod, data, frequency)
c_handlers.initialized.append(mod)
def walk_handlers():
def walk_handlers(excluded):
# Walk the user data
part_data = {
'handlers': c_handlers,
@ -419,9 +443,9 @@ class Init(object):
# to help write there contents to files with numbered
# names...
'handlercount': 0,
'excluded': excluded,
}
handlers.walk(user_data_msg, handlers.walker_callback,
data=part_data)
handlers.walk(data_msg, handlers.walker_callback, data=part_data)
def finalize_handlers():
# Give callbacks opportunity to finalize
@ -438,10 +462,16 @@ class Init(object):
try:
init_handlers()
walk_handlers()
walk_handlers(excluded)
finally:
finalize_handlers()
def consume_data(self, frequency=PER_INSTANCE):
# Consume the userdata first, because we need want to let the part
# handlers run first (for merging stuff)
self._consume_userdata(frequency)
self._consume_vendordata(frequency)
# Perform post-consumption adjustments so that
# modules that run during the init stage reflect
# this consumed set.
@ -453,6 +483,64 @@ class Init(object):
# objects before the load of the userdata happened,
# this is expected.
def _consume_vendordata(self, frequency=PER_INSTANCE):
"""
Consume the vendordata and run the part handlers on it
"""
# User-data should have been consumed first.
# So we merge the other available cloud-configs (everything except
# vendor provided), and check whether or not we should consume
# vendor data at all. That gives user or system a chance to override.
if not self.datasource.get_vendordata_raw():
LOG.debug("no vendordata from datasource")
return
_cc_merger = helpers.ConfigMerger(paths=self._paths,
datasource=self.datasource,
additional_fns=[],
base_cfg=self.cfg,
include_vendor=False)
vdcfg = _cc_merger.cfg.get('vendor_data', {})
if not isinstance(vdcfg, dict):
vdcfg = {'enabled': False}
LOG.warn("invalid 'vendor_data' setting. resetting to: %s", vdcfg)
enabled = vdcfg.get('enabled')
no_handlers = vdcfg.get('disabled_handlers', None)
if not util.is_true(enabled):
LOG.debug("vendordata consumption is disabled.")
return
LOG.debug("vendor data will be consumed. disabled_handlers=%s",
no_handlers)
# Ensure vendordata source fetched before activation (just incase)
vendor_data_msg = self.datasource.get_vendordata()
# This keeps track of all the active handlers, while excluding what the
# users doesn't want run, i.e. boot_hook, cloud_config, shell_script
c_handlers_list = self._default_vendordata_handlers()
# Run the handlers
self._do_handlers(vendor_data_msg, c_handlers_list, frequency,
excluded=no_handlers)
def _consume_userdata(self, frequency=PER_INSTANCE):
"""
Consume the userdata and run the part handlers
"""
# Ensure datasource fetched before activation (just incase)
user_data_msg = self.datasource.get_userdata(True)
# This keeps track of all the active handlers
c_handlers_list = self._default_handlers()
# Run the handlers
self._do_handlers(user_data_msg, c_handlers_list, frequency)
class Modules(object):
def __init__(self, init, cfg_files=None):

View File

@ -88,7 +88,11 @@ class UserDataProcessor(object):
def process(self, blob):
accumulating_msg = MIMEMultipart()
self._process_msg(convert_string(blob), accumulating_msg)
if isinstance(blob, list):
for b in blob:
self._process_msg(convert_string(b), accumulating_msg)
else:
self._process_msg(convert_string(blob), accumulating_msg)
return accumulating_msg
def _process_msg(self, base_msg, append_msg):

View File

@ -172,6 +172,8 @@ class SeLinuxGuard(object):
def __exit__(self, excp_type, excp_value, excp_traceback):
if self.selinux and self.selinux.is_selinux_enabled():
path = os.path.realpath(os.path.expanduser(self.path))
# path should be a string, not unicode
path = str(path)
do_restore = False
try:
# See if even worth restoring??
@ -608,18 +610,28 @@ def del_dir(path):
shutil.rmtree(path)
def runparts(dirp, skip_no_exist=True):
def runparts(dirp, skip_no_exist=True, exe_prefix=None):
if skip_no_exist and not os.path.isdir(dirp):
return
failed = []
attempted = []
if exe_prefix is None:
prefix = []
elif isinstance(exe_prefix, str):
prefix = [str(exe_prefix)]
elif isinstance(exe_prefix, list):
prefix = exe_prefix
else:
raise TypeError("exe_prefix must be None, str, or list")
for exe_name in sorted(os.listdir(dirp)):
exe_path = os.path.join(dirp, exe_name)
if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
attempted.append(exe_path)
try:
subp([exe_path], capture=False)
subp(prefix + [exe_path], capture=False)
except ProcessExecutionError as e:
logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code)
failed.append(e)
@ -865,8 +877,8 @@ def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
IP_address canonical_hostname [aliases...]
Fields of the entry are separated by any number of blanks and/or tab
characters. Text from a "#" character until the end of the line is a
comment, and is ignored. Host names may contain only alphanumeric
characters. Text from a "#" character until the end of the line is a
comment, and is ignored. Host names may contain only alphanumeric
characters, minus signs ("-"), and periods ("."). They must begin with
an alphabetic character and end with an alphanumeric character.
Optional aliases provide for name changes, alternate spellings, shorter
@ -1302,10 +1314,10 @@ def mounts():
mounted = {}
try:
# Go through mounts to see what is already mounted
if os.path.exists("/proc/mounts"):
if os.path.exists("/proc/mounts"):
mount_locs = load_file("/proc/mounts").splitlines()
method = 'proc'
else:
else:
(mountoutput, _err) = subp("mount")
mount_locs = mountoutput.splitlines()
method = 'mount'
@ -1313,7 +1325,7 @@ def mounts():
# Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered)
# FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates)
try:
if method == 'proc' and len(mpline) == 6:
if method == 'proc' and len(mpline) == 6:
(dev, mp, fstype, opts, _freq, _passno) = mpline.split()
elif method == 'mount':
m = re.search('^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', mpline)
@ -1788,6 +1800,7 @@ def parse_mount(path):
return devpth, fs_type, mount_point
return None
def get_mount_info(path, log=LOG):
# Use /proc/$$/mountinfo to find the device where path is mounted.
# This is done because with a btrfs filesystem using os.stat(path)

View File

@ -64,6 +64,7 @@ cloud_config_modules:
# The modules that run in the 'final' stage
cloud_final_modules:
- rightscale_userdata
- scripts-vendor
- scripts-per-once
- scripts-per-boot
- scripts-per-instance

View File

@ -59,3 +59,8 @@ log_cfgs:
- [ *log_base, *log_file ]
# A file path can also be used
# - /etc/log.conf
# this tells cloud-init to redirect its stdout and stderr to
# 'tee -a /var/log/cloud-init-output.log' so the user can see output
# there without needing to look on the console.
output: {all: '| tee -a /var/log/cloud-init-output.log'}

View File

@ -49,7 +49,6 @@ datasource:
hostname_bounce:
interface: eth0
policy: on # [can be 'on', 'off' or 'force']
}
SmartOS:
# Smart OS datasource works over a serial console interacting with

View File

@ -1,24 +1,24 @@
Cloud-init supports the creation of simple partition tables and file systems
on devices.
# Cloud-init supports the creation of simple partition tables and file systems
# on devices.
Default disk definitions for AWS
--------------------------------
(Not implemented yet, but provided for future documentation)
# Default disk definitions for AWS
# --------------------------------
# (Not implemented yet, but provided for future documentation)
disk_setup:
ephmeral0:
type: 'mbr'
layout: True
overwrite: False
disk_setup:
ephmeral0:
type: 'mbr'
layout: True
overwrite: False
fs_setup:
- label: None,
filesystem: ext3
device: ephemeral0
partition: auto
fs_setup:
- label: None,
filesystem: ext3
device: ephemeral0
partition: auto
Default disk definitions for Windows Azure
------------------------------------------
# Default disk definitions for Windows Azure
# ------------------------------------------
device_aliases: {'ephemeral0': '/dev/sdb'}
disk_setup:
@ -34,8 +34,8 @@ fs_setup:
replace_fs: ntfs
Default disk definitions for SmartOS
------------------------------------
# Default disk definitions for SmartOS
# ------------------------------------
device_aliases: {'ephemeral0': '/dev/sdb'}
disk_setup:
@ -49,203 +49,203 @@ fs_setup:
filesystem: ext3
device: ephemeral0.0
Cavaut for SmartOS: if ephemeral disk is not defined, then the disk will
not be automatically added to the mounts.
# Cavaut for SmartOS: if ephemeral disk is not defined, then the disk will
# not be automatically added to the mounts.
The default definition is used to make sure that the ephemeral storage is
setup properly.
# The default definition is used to make sure that the ephemeral storage is
# setup properly.
"disk_setup": disk partitioning
--------------------------------
# "disk_setup": disk partitioning
# --------------------------------
The disk_setup directive instructs Cloud-init to partition a disk. The format is:
# The disk_setup directive instructs Cloud-init to partition a disk. The format is:
disk_setup:
ephmeral0:
type: 'mbr'
layout: 'auto'
/dev/xvdh:
type: 'mbr'
layout:
- 33
- [33, 82]
- 33
overwrite: True
disk_setup:
ephmeral0:
type: 'mbr'
layout: 'auto'
/dev/xvdh:
type: 'mbr'
layout:
- 33
- [33, 82]
- 33
overwrite: True
The format is a list of dicts of dicts. The first value is the name of the
device and the subsequent values define how to create and layout the partition.
# The format is a list of dicts of dicts. The first value is the name of the
# device and the subsequent values define how to create and layout the
# partition.
# The general format is:
# disk_setup:
# <DEVICE>:
# type: 'mbr'
# layout: <LAYOUT|BOOL>
# overwrite: <BOOL>
#
# Where:
# <DEVICE>: The name of the device. 'ephemeralX' and 'swap' are special
# values which are specific to the cloud. For these devices
# Cloud-init will look up what the real devices is and then
# use it.
#
# For other devices, the kernel device name is used. At this
# time only simply kernel devices are supported, meaning
# that device mapper and other targets may not work.
#
# Note: At this time, there is no handling or setup of
# device mapper targets.
#
# type=<TYPE>: Currently the following are supported:
# 'mbr': default and setups a MS-DOS partition table
#
# Note: At this time only 'mbr' partition tables are allowed.
# It is anticipated in the future that we'll have GPT as
# option in the future, or even "RAID" to create a mdadm
# RAID.
#
# layout={...}: The device layout. This is a list of values, with the
# percentage of disk that partition will take.
# Valid options are:
# [<SIZE>, [<SIZE>, <PART_TYPE]]
#
# Where <SIZE> is the _percentage_ of the disk to use, while
# <PART_TYPE> is the numerical value of the partition type.
#
# The following setups two partitions, with the first
# partition having a swap label, taking 1/3 of the disk space
# and the remainder being used as the second partition.
# /dev/xvdh':
# type: 'mbr'
# layout:
# - [33,82]
# - 66
# overwrite: True
#
# When layout is "true" it means single partition the entire
# device.
#
# When layout is "false" it means don't partition or ignore
# existing partitioning.
#
# If layout is set to "true" and overwrite is set to "false",
# it will skip partitioning the device without a failure.
#
# overwrite=<BOOL>: This describes whether to ride with saftey's on and
# everything holstered.
#
# 'false' is the default, which means that:
# 1. The device will be checked for a partition table
# 2. The device will be checked for a file system
# 3. If either a partition of file system is found, then
# the operation will be _skipped_.
#
# 'true' is cowboy mode. There are no checks and things are
# done blindly. USE with caution, you can do things you
# really, really don't want to do.
#
#
# fs_setup: Setup the file system
# -------------------------------
#
# fs_setup describes the how the file systems are supposed to look.
The general format is:
disk_setup:
<DEVICE>:
type: 'mbr'
layout: <LAYOUT|BOOL>
overwrite: <BOOL>
fs_setup:
- label: ephemeral0
filesystem: 'ext3'
device: 'ephemeral0'
partition: 'auto'
- label: mylabl2
filesystem: 'ext4'
device: '/dev/xvda1'
- special:
cmd: mkfs -t %(FILESYSTEM)s -L %(LABEL)s %(DEVICE)s
filesystem: 'btrfs'
device: '/dev/xvdh'
Where:
<DEVICE>: The name of the device. 'ephemeralX' and 'swap' are special
values which are specific to the cloud. For these devices
Cloud-init will look up what the real devices is and then
use it.
For other devices, the kernel device name is used. At this
time only simply kernel devices are supported, meaning
that device mapper and other targets may not work.
Note: At this time, there is no handling or setup of
device mapper targets.
type=<TYPE>: Currently the following are supported:
'mbr': default and setups a MS-DOS partition table
Note: At this time only 'mbr' partition tables are allowed.
It is anticipated in the future that we'll have GPT as
option in the future, or even "RAID" to create a mdadm
RAID.
layout={...}: The device layout. This is a list of values, with the
percentage of disk that partition will take.
Valid options are:
[<SIZE>, [<SIZE>, <PART_TYPE]]
Where <SIZE> is the _percentage_ of the disk to use, while
<PART_TYPE> is the numerical value of the partition type.
The following setups two partitions, with the first
partition having a swap label, taking 1/3 of the disk space
and the remainder being used as the second partition.
/dev/xvdh':
type: 'mbr'
layout:
- [33,82]
- 66
overwrite: True
When layout is "true" it means single partition the entire
device.
When layout is "false" it means don't partition or ignore
existing partitioning.
If layout is set to "true" and overwrite is set to "false",
it will skip partitioning the device without a failure.
overwrite=<BOOL>: This describes whether to ride with saftey's on and
everything holstered.
'false' is the default, which means that:
1. The device will be checked for a partition table
2. The device will be checked for a file system
3. If either a partition of file system is found, then
the operation will be _skipped_.
'true' is cowboy mode. There are no checks and things are
done blindly. USE with caution, you can do things you
really, really don't want to do.
fs_setup: Setup the file system
-------------------------------
fs_setup describes the how the file systems are supposed to look.
fs_setup:
- label: ephemeral0
filesystem: 'ext3'
device: 'ephemeral0'
partition: 'auto'
- label: mylabl2
filesystem: 'ext4'
device: '/dev/xvda1'
- special:
cmd: mkfs -t %(FILESYSTEM)s -L %(LABEL)s %(DEVICE)s
filesystem: 'btrfs'
device: '/dev/xvdh'
The general format is:
fs_setup:
- label: <LABEL>
filesystem: <FS_TYPE>
device: <DEVICE>
partition: <PART_VALUE>
overwrite: <OVERWRITE>
replace_fs: <FS_TYPE>
Where:
<LABEL>: The file system label to be used. If set to None, no label is
used.
<FS_TYPE>: The file system type. It is assumed that the there
will be a "mkfs.<FS_TYPE>" that behaves likes "mkfs". On a standard
Ubuntu Cloud Image, this means that you have the option of ext{2,3,4},
and vfat by default.
<DEVICE>: The device name. Special names of 'ephemeralX' or 'swap'
are allowed and the actual device is acquired from the cloud datasource.
When using 'ephemeralX' (i.e. ephemeral0), make sure to leave the
label as 'ephemeralX' otherwise there may be issues with the mounting
of the ephemeral storage layer.
If you define the device as 'ephemeralX.Y' then Y will be interpetted
as a partition value. However, ephermalX.0 is the _same_ as ephemeralX.
<PART_VALUE>:
Partition definitions are overwriten if you use the '<DEVICE>.Y' notation.
The valid options are:
"auto|any": tell cloud-init not to care whether there is a partition
or not. Auto will use the first partition that does not contain a
file system already. In the absence of a partition table, it will
put it directly on the disk.
"auto": If a file system that matches the specification in terms of
label, type and device, then cloud-init will skip the creation of
the file system.
"any": If a file system that matches the file system type and device,
then cloud-init will skip the creation of the file system.
Devices are selected based on first-detected, starting with partitions
and then the raw disk. Consider the following:
NAME FSTYPE LABEL
xvdb
|-xvdb1 ext4
|-xvdb2
|-xvdb3 btrfs test
\-xvdb4 ext4 test
If you ask for 'auto', label of 'test, and file system of 'ext4'
then cloud-init will select the 2nd partition, even though there
is a partition match at the 4th partition.
If you ask for 'any' and a label of 'test', then cloud-init will
select the 1st partition.
If you ask for 'auto' and don't define label, then cloud-init will
select the 1st partition.
In general, if you have a specific partition configuration in mind,
you should define either the device or the partition number. 'auto'
and 'any' are specifically intended for formating ephemeral storage or
for simple schemes.
"none": Put the file system directly on the device.
<NUM>: where NUM is the actual partition number.
<OVERWRITE>: Defines whether or not to overwrite any existing
filesystem.
"true": Indiscriminately destroy any pre-existing file system. Use at
your own peril.
"false": If an existing file system exists, skip the creation.
<REPLACE_FS>: This is a special directive, used for Windows Azure that
instructs cloud-init to replace a file system of <FS_TYPE>. NOTE:
unless you define a label, this requires the use of the 'any' partition
directive.
Behavior Caveat: The default behavior is to _check_ if the file system exists.
If a file system matches the specification, then the operation is a no-op.
# The general format is:
# fs_setup:
# - label: <LABEL>
# filesystem: <FS_TYPE>
# device: <DEVICE>
# partition: <PART_VALUE>
# overwrite: <OVERWRITE>
# replace_fs: <FS_TYPE>
#
# Where:
# <LABEL>: The file system label to be used. If set to None, no label is
# used.
#
# <FS_TYPE>: The file system type. It is assumed that the there
# will be a "mkfs.<FS_TYPE>" that behaves likes "mkfs". On a standard
# Ubuntu Cloud Image, this means that you have the option of ext{2,3,4},
# and vfat by default.
#
# <DEVICE>: The device name. Special names of 'ephemeralX' or 'swap'
# are allowed and the actual device is acquired from the cloud datasource.
# When using 'ephemeralX' (i.e. ephemeral0), make sure to leave the
# label as 'ephemeralX' otherwise there may be issues with the mounting
# of the ephemeral storage layer.
#
# If you define the device as 'ephemeralX.Y' then Y will be interpetted
# as a partition value. However, ephermalX.0 is the _same_ as ephemeralX.
#
# <PART_VALUE>:
# Partition definitions are overwriten if you use the '<DEVICE>.Y' notation.
#
# The valid options are:
# "auto|any": tell cloud-init not to care whether there is a partition
# or not. Auto will use the first partition that does not contain a
# file system already. In the absence of a partition table, it will
# put it directly on the disk.
#
# "auto": If a file system that matches the specification in terms of
# label, type and device, then cloud-init will skip the creation of
# the file system.
#
# "any": If a file system that matches the file system type and device,
# then cloud-init will skip the creation of the file system.
#
# Devices are selected based on first-detected, starting with partitions
# and then the raw disk. Consider the following:
# NAME FSTYPE LABEL
# xvdb
# |-xvdb1 ext4
# |-xvdb2
# |-xvdb3 btrfs test
# \-xvdb4 ext4 test
#
# If you ask for 'auto', label of 'test, and file system of 'ext4'
# then cloud-init will select the 2nd partition, even though there
# is a partition match at the 4th partition.
#
# If you ask for 'any' and a label of 'test', then cloud-init will
# select the 1st partition.
#
# If you ask for 'auto' and don't define label, then cloud-init will
# select the 1st partition.
#
# In general, if you have a specific partition configuration in mind,
# you should define either the device or the partition number. 'auto'
# and 'any' are specifically intended for formating ephemeral storage or
# for simple schemes.
#
# "none": Put the file system directly on the device.
#
# <NUM>: where NUM is the actual partition number.
#
# <OVERWRITE>: Defines whether or not to overwrite any existing
# filesystem.
#
# "true": Indiscriminately destroy any pre-existing file system. Use at
# your own peril.
#
# "false": If an existing file system exists, skip the creation.
#
# <REPLACE_FS>: This is a special directive, used for Windows Azure that
# instructs cloud-init to replace a file system of <FS_TYPE>. NOTE:
# unless you define a label, this requires the use of the 'any' partition
# directive.
#
# Behavior Caveat: The default behavior is to _check_ if the file system exists.
# If a file system matches the specification, then the operation is a no-op.

View File

@ -5,12 +5,10 @@
#
# mode:
# values:
# * auto: use any option possible (growpart or parted)
# * auto: use any option possible (any available)
# if none are available, do not warn, but debug.
# * growpart: use growpart to grow partitions
# if growpart is not available, this is an error.
# * parted: use parted (parted resizepart) to resize partitions
# if parted is not available, this is an error.
# * off, false
#
# devices:

View File

@ -6,6 +6,9 @@
#
# Note: 'tags' should be specified as a comma delimited string
# rather than a list.
#
# You can get example key/values by running 'landscape-config',
# answer question, then look at /etc/landscape/client.config
landscape:
client:
url: "https://landscape.canonical.com/message-system"
@ -13,3 +16,7 @@ landscape:
data_path: "/var/lib/landscape/client"
http_proxy: "http://my.proxy.com/foobar"
tags: "server,cloud"
computer_title = footitle
https_proxy = fooproxy
registration_key = fookey
account_name = fooaccount

View File

@ -0,0 +1,16 @@
#cloud-config
#
# This explains how to control vendordata via a cloud-config
#
# On select Datasources, vendors have a channel for the consumptions
# of all support user-data types via a special channel called
# vendordata. Users of the end system are given ultimate control.
#
vendor_data:
enabled: True
prefix: /usr/bin/ltrace
# enabled: whether it is enabled or not
# prefix: the command to run before any vendor scripts.
# Note: this is a fairly weak method of containment. It should
# be used to profile a script, not to prevent its run

View File

@ -74,7 +74,7 @@ apt_preserve_sources_list: true
# 'source' entries in apt-sources that match this python regex
# expression will be passed to add-apt-repository
add_apt_repo_match = "^[\w-]+:\w"
add_apt_repo_match: '^[\w-]+:\w'
apt_sources:
- source: "deb http://ppa.launchpad.net/byobu/ppa/ubuntu karmic main"
@ -147,8 +147,13 @@ apt_sources:
# '--option=Dpkg::options::=--force-unsafe-io', '--assume-yes', '--quiet']
#
# apt_get_upgrade_subcommand:
# Specify a different subcommand for 'upgrade. The default is 'dist-upgrade'.
# This is the subcommand that is invoked if package_upgrade is set to true above.
# Specify a different subcommand for 'upgrade. The default is 'dist-upgrade'.
# This is the subcommand that is invoked if package_upgrade is set to true above.
#
# apt_get_wrapper:
# command: eatmydata
# enabled: [True, False, "auto"]
#
# Install additional packages on first boot
#

View File

@ -130,10 +130,6 @@ To see which versions are supported from your cloud provider use the following U
...
latest
**Note:** internally in cloudinit the `boto`_ library used to fetch the instance
userdata and instance metadata, feel free to check that library out, it provides
many other useful EC2 functionality.
---------------------------
Config Drive
---------------------------

53
doc/vendordata.txt Normal file
View File

@ -0,0 +1,53 @@
=== Overview ===
Vendordata is data provided by the entity that launches an instance
(for example, the cloud provider). This data can be used to
customize the image to fit into the particular environment it is
being run in.
Vendordata follows the same rules as user-data, with the following
caveats:
1. Users have ultimate control over vendordata. They can disable its
execution or disable handling of specific parts of multipart input.
2. By default it only runs on first boot
3. Vendordata can be disabled by the user. If the use of vendordata is
required for the instance to run, then vendordata should not be
used.
4. user supplied cloud-config is merged over cloud-config from
vendordata.
Users providing cloud-config data can use the '#cloud-config-jsonp' method
to more finely control their modifications to the vendor supplied
cloud-config. For example, if both vendor and user have provided
'runcnmd' then the default merge handler will cause the user's runcmd to
override the one provided by the vendor. To append to 'runcmd', the user
could better provide multipart input with a cloud-config-jsonp part like:
#cloud-config-jsonp
[{ "op": "add", "path": "/runcmd", "value": ["my", "command", "here"]}]
Further, we strongly advise vendors to not 'be evil'. By evil, we
mean any action that could compromise a system. Since users trust
you, please take care to make sure that any vendordata is safe,
atomic, idempotent and does not put your users at risk.
=== Input Formats ===
cloud-init will download and cache to filesystem any vendor-data that it
finds. Vendordata is handled exactly like user-data. That means that
the vendor can supply multipart input and have those parts acted on
in the same way as user-data.
The only differences are:
* user-scripts are stored in a different location than user-scripts (to
avoid namespace collision)
* user can disable part handlers by cloud-config settings.
For example, to disable handling of 'part-handlers' in vendor-data,
the user could provide user-data like this:
#cloud-config
vendordata: {excluded: 'text/part-handler'}
=== Examples ===
There are examples in the examples subdirectory.
Additionally, the 'tools' directory contains 'write-mime-multipart',
which can be used to easily generate mime-multi-part files from a list
of input files. That data can then be given to an instance.
See 'write-mime-multipart --help' for usage.

View File

@ -29,10 +29,9 @@ import argparse
# file pypi package name to a debian/ubuntu package name.
PKG_MP = {
'argparse': 'python-argparse',
'boto': 'python-boto',
'cheetah': 'python-cheetah',
'configobj': 'python-configobj',
'jsonpatch': 'python-json-patch',
'jsonpatch': 'python-jsonpatch | python-json-patch',
'oauth': 'python-oauth',
'prettytable': 'python-prettytable',
'pyserial': 'python-serial',

View File

@ -36,7 +36,6 @@ from cloudinit import util
PKG_MP = {
'redhat': {
'argparse': 'python-argparse',
'boto': 'python-boto',
'cheetah': 'python-cheetah',
'configobj': 'python-configobj',
'jsonpatch': 'python-jsonpatch',
@ -48,7 +47,6 @@ PKG_MP = {
},
'suse': {
'argparse': 'python-argparse',
'boto': 'python-boto',
'cheetah': 'python-cheetah',
'configobj': 'python-configobj',
'jsonpatch': 'python-jsonpatch',

View File

@ -25,7 +25,7 @@ Depends: procps,
#end for
python-software-properties | software-properties-common,
\${misc:Depends},
\${python:Depends}
Recommends: sudo
XB-Python-Version: \${python:Versions}
Description: Init scripts for cloud instances
Cloud instances need special scripts to run during initialisation

View File

@ -27,25 +27,3 @@ License: GPL-3
The complete text of the GPL version 3 can be seen in
/usr/share/common-licenses/GPL-3.
Files: cloudinit/boto_utils.py
Copyright: 2006,2007, Mitch Garnaat http://garnaat.org/
License: MIT
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish, dis-
tribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the fol-
lowing conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.

View File

@ -34,6 +34,7 @@ Requires: e2fsprogs
Requires: net-tools
Requires: procps
Requires: shadow-utils
Requires: sudo
# Install pypi 'dynamic' requirements
#for $r in $requires

View File

@ -43,6 +43,7 @@ Requires: iproute2
Requires: e2fsprogs
Requires: net-tools
Requires: procps
Requires: sudo
# Install pypi 'dynamic' requirements
#for $r in $requires

View File

@ -29,8 +29,5 @@ argparse
# Requests handles ssl correctly!
requests
# Boto for ec2
boto
# For patching pieces of cloud-config together
jsonpatch

6
test-requirements.txt Normal file
View File

@ -0,0 +1,6 @@
httpretty>=0.7.1
mocker
nose
pep8
pyflakes
pylint

View File

@ -13,6 +13,7 @@ from email.mime.multipart import MIMEMultipart
from cloudinit import handlers
from cloudinit import helpers as c_helpers
from cloudinit import log
from cloudinit.settings import (PER_INSTANCE)
from cloudinit import sources
from cloudinit import stages
from cloudinit import util
@ -24,10 +25,11 @@ from tests.unittests import helpers
class FakeDataSource(sources.DataSource):
def __init__(self, userdata):
def __init__(self, userdata=None, vendordata=None):
sources.DataSource.__init__(self, {}, None, None)
self.metadata = {'instance-id': INSTANCE_ID}
self.userdata_raw = userdata
self.vendordata_raw = vendordata
# FIXME: these tests shouldn't be checking log output??
@ -45,6 +47,11 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
if self._log_handler and self._log:
self._log.removeHandler(self._log_handler)
def _patchIn(self, root):
self.restore()
self.patchOS(root)
self.patchUtils(root)
def capture_log(self, lvl=logging.DEBUG):
log_file = StringIO.StringIO()
self._log_handler = logging.StreamHandler(log_file)
@ -68,13 +75,89 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
self.patchUtils(new_root)
self.patchOS(new_root)
ci.fetch()
ci.consume_userdata()
ci.consume_data()
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
self.assertEquals(2, len(cc))
self.assertEquals('qux', cc['baz'])
self.assertEquals('qux2', cc['bar'])
def test_simple_jsonp_vendor_and_user(self):
# test that user-data wins over vendor
user_blob = '''
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
{ "op": "add", "path": "/bar", "value": "qux2" }
]
'''
vendor_blob = '''
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "quxA" },
{ "op": "add", "path": "/bar", "value": "quxB" },
{ "op": "add", "path": "/foo", "value": "quxC" }
]
'''
new_root = self.makeDir()
self._patchIn(new_root)
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
initer.read_cfg()
initer.initialize()
initer.fetch()
_iid = initer.instancify()
initer.update()
initer.cloudify().run('consume_data',
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
mods = stages.Modules(initer)
(_which_ran, _failures) = mods.run_section('cloud_init_modules')
cfg = mods.cfg
self.assertIn('vendor_data', cfg)
self.assertEquals('qux', cfg['baz'])
self.assertEquals('qux2', cfg['bar'])
self.assertEquals('quxC', cfg['foo'])
def test_simple_jsonp_no_vendor_consumed(self):
# make sure that vendor data is not consumed
user_blob = '''
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
{ "op": "add", "path": "/bar", "value": "qux2" },
{ "op": "add", "path": "/vendor_data", "value": {"enabled": "false"}}
]
'''
vendor_blob = '''
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "quxA" },
{ "op": "add", "path": "/bar", "value": "quxB" },
{ "op": "add", "path": "/foo", "value": "quxC" }
]
'''
new_root = self.makeDir()
self._patchIn(new_root)
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
initer.read_cfg()
initer.initialize()
initer.fetch()
_iid = initer.instancify()
initer.update()
initer.cloudify().run('consume_data',
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
mods = stages.Modules(initer)
(_which_ran, _failures) = mods.run_section('cloud_init_modules')
cfg = mods.cfg
self.assertEquals('qux', cfg['baz'])
self.assertEquals('qux2', cfg['bar'])
self.assertNotIn('foo', cfg)
def test_mixed_cloud_config(self):
blob_cc = '''
#cloud-config
@ -105,12 +188,87 @@ c: d
self.patchUtils(new_root)
self.patchOS(new_root)
ci.fetch()
ci.consume_userdata()
ci.consume_data()
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
self.assertEquals(1, len(cc))
self.assertEquals('c', cc['a'])
def test_vendor_user_yaml_cloud_config(self):
vendor_blob = '''
#cloud-config
a: b
name: vendor
run:
- x
- y
'''
user_blob = '''
#cloud-config
a: c
vendor_data:
enabled: True
prefix: /bin/true
name: user
run:
- z
'''
new_root = self.makeDir()
self._patchIn(new_root)
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
initer.read_cfg()
initer.initialize()
initer.fetch()
_iid = initer.instancify()
initer.update()
initer.cloudify().run('consume_data',
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
mods = stages.Modules(initer)
(_which_ran, _failures) = mods.run_section('cloud_init_modules')
cfg = mods.cfg
self.assertIn('vendor_data', cfg)
self.assertEquals('c', cfg['a'])
self.assertEquals('user', cfg['name'])
self.assertNotIn('x', cfg['run'])
self.assertNotIn('y', cfg['run'])
self.assertIn('z', cfg['run'])
def test_vendordata_script(self):
vendor_blob = '''
#!/bin/bash
echo "test"
'''
user_blob = '''
#cloud-config
vendor_data:
enabled: True
prefix: /bin/true
'''
new_root = self.makeDir()
self._patchIn(new_root)
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
initer.read_cfg()
initer.initialize()
initer.fetch()
_iid = initer.instancify()
initer.update()
initer.cloudify().run('consume_data',
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
mods = stages.Modules(initer)
(_which_ran, _failures) = mods.run_section('cloud_init_modules')
_cfg = mods.cfg
vendor_script = initer.paths.get_ipath_cur('vendor_scripts')
vendor_script_fns = "%s%s/part-001" % (new_root, vendor_script)
self.assertTrue(os.path.exists(vendor_script_fns))
def test_merging_cloud_config(self):
blob = '''
#cloud-config
@ -185,7 +343,7 @@ p: 1
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_userdata()
ci.consume_data()
self.assertIn(
"Unhandled non-multipart (text/x-not-multipart) userdata:",
log_file.getvalue())
@ -221,7 +379,7 @@ c: 4
self.patchUtils(new_root)
self.patchOS(new_root)
ci.fetch()
ci.consume_userdata()
ci.consume_data()
contents = util.load_file(ci.paths.get_ipath("cloud_config"))
contents = util.load_yaml(contents)
self.assertTrue(isinstance(contents, dict))
@ -244,7 +402,7 @@ c: 4
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_userdata()
ci.consume_data()
self.assertIn(
"Unhandled unknown content-type (text/plain)",
log_file.getvalue())
@ -264,7 +422,7 @@ c: 4
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_userdata()
ci.consume_data()
self.assertEqual("", log_file.getvalue())
def test_mime_text_x_shellscript(self):
@ -284,7 +442,7 @@ c: 4
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_userdata()
ci.consume_data()
self.assertEqual("", log_file.getvalue())
def test_mime_text_plain_shell(self):
@ -304,5 +462,5 @@ c: 4
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_userdata()
ci.consume_data()
self.assertEqual("", log_file.getvalue())

View File

@ -285,10 +285,11 @@ class TestConfigDriveDataSource(MockerTestCase):
self.assertEqual(["/dev/vdb", "/dev/zdd"],
ds.find_candidate_devs())
# verify that partitions are not considered
# verify that partitions are considered, but only if they have a label.
devs_with_answers = {"TYPE=vfat": ["/dev/sda1"],
"TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]}
self.assertEqual([], ds.find_candidate_devs())
self.assertEqual(["/dev/vdb3"],
ds.find_candidate_devs())
finally:
util.find_devs_with = orig_find_devs_with

View File

@ -258,6 +258,14 @@ iface eth0 inet static
''')
class TestParseShellConfig(MockerTestCase):
def test_no_seconds(self):
cfg = '\n'.join(["foo=bar", "SECONDS=2", "xx=foo"])
# we could test 'sleep 2', but that would make the test run slower.
ret = ds.parse_shell_config(cfg)
self.assertEqual(ret, {"foo": "bar", "xx": "foo"})
def populate_context_dir(path, variables):
data = "# Context variables generated by OpenNebula\n"
for (k, v) in variables.iteritems():

View File

@ -0,0 +1,130 @@
from tests.unittests import helpers
from cloudinit import ec2_utils as eu
import httpretty as hp
class TestEc2Util(helpers.TestCase):
VERSION = 'latest'
@hp.activate
def test_userdata_fetch(self):
hp.register_uri(hp.GET,
'http://169.254.169.254/%s/user-data' % (self.VERSION),
body='stuff',
status=200)
userdata = eu.get_instance_userdata(self.VERSION)
self.assertEquals('stuff', userdata)
@hp.activate
def test_userdata_fetch_fail_not_found(self):
hp.register_uri(hp.GET,
'http://169.254.169.254/%s/user-data' % (self.VERSION),
status=404)
userdata = eu.get_instance_userdata(self.VERSION, retries=0)
self.assertEquals('', userdata)
@hp.activate
def test_userdata_fetch_fail_server_dead(self):
hp.register_uri(hp.GET,
'http://169.254.169.254/%s/user-data' % (self.VERSION),
status=500)
userdata = eu.get_instance_userdata(self.VERSION, retries=0)
self.assertEquals('', userdata)
@hp.activate
def test_metadata_fetch_no_keys(self):
base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
hp.register_uri(hp.GET, base_url, status=200,
body="\n".join(['hostname',
'instance-id',
'ami-launch-index']))
hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
status=200, body='ec2.fake.host.name.com')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
status=200, body='123')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'ami-launch-index'),
status=200, body='1')
md = eu.get_instance_metadata(self.VERSION, retries=0)
self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
self.assertEquals(md['instance-id'], '123')
self.assertEquals(md['ami-launch-index'], '1')
@hp.activate
def test_metadata_fetch_key(self):
base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
hp.register_uri(hp.GET, base_url, status=200,
body="\n".join(['hostname',
'instance-id',
'public-keys/']))
hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
status=200, body='ec2.fake.host.name.com')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
status=200, body='123')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'),
status=200, body='0=my-public-key')
hp.register_uri(hp.GET,
eu.combine_url(base_url, 'public-keys/0/openssh-key'),
status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
self.assertEquals(md['instance-id'], '123')
self.assertEquals(1, len(md['public-keys']))
@hp.activate
def test_metadata_fetch_with_2_keys(self):
base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
hp.register_uri(hp.GET, base_url, status=200,
body="\n".join(['hostname',
'instance-id',
'public-keys/']))
hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
status=200, body='ec2.fake.host.name.com')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
status=200, body='123')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'),
status=200,
body="\n".join(['0=my-public-key', '1=my-other-key']))
hp.register_uri(hp.GET,
eu.combine_url(base_url, 'public-keys/0/openssh-key'),
status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
hp.register_uri(hp.GET,
eu.combine_url(base_url, 'public-keys/1/openssh-key'),
status=200, body='ssh-rsa AAAA.....wZEf my-other-key')
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
self.assertEquals(md['instance-id'], '123')
self.assertEquals(2, len(md['public-keys']))
@hp.activate
def test_metadata_fetch_bdm(self):
base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
hp.register_uri(hp.GET, base_url, status=200,
body="\n".join(['hostname',
'instance-id',
'block-device-mapping/']))
hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
status=200, body='ec2.fake.host.name.com')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
status=200, body='123')
hp.register_uri(hp.GET,
eu.combine_url(base_url, 'block-device-mapping/'),
status=200,
body="\n".join(['ami', 'ephemeral0']))
hp.register_uri(hp.GET,
eu.combine_url(base_url, 'block-device-mapping/ami'),
status=200,
body="sdb")
hp.register_uri(hp.GET,
eu.combine_url(base_url,
'block-device-mapping/ephemeral0'),
status=200,
body="sdc")
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
self.assertEquals(md['instance-id'], '123')
bdm = md['block-device-mapping']
self.assertEquals(2, len(bdm))
self.assertEquals(bdm['ami'], 'sdb')
self.assertEquals(bdm['ephemeral0'], 'sdc')

View File

@ -12,50 +12,9 @@ import re
import unittest
# growpart:
# mode: auto # off, on, auto, 'growpart', 'parted'
# mode: auto # off, on, auto, 'growpart'
# devices: ['root']
HELP_PARTED_NO_RESIZE = """
Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...]
Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in
interactive mode.
OPTIONs:
<SNIP>
COMMANDs:
<SNIP>
quit exit program
rescue START END rescue a lost partition near START
and END
resize NUMBER START END resize partition NUMBER and its file
system
rm NUMBER delete partition NUMBER
<SNIP>
Report bugs to bug-parted@gnu.org
"""
HELP_PARTED_RESIZE = """
Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...]
Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in
interactive mode.
OPTIONs:
<SNIP>
COMMANDs:
<SNIP>
quit exit program
rescue START END rescue a lost partition near START
and END
resize NUMBER START END resize partition NUMBER and its file
system
resizepart NUMBER END resize partition NUMBER
rm NUMBER delete partition NUMBER
<SNIP>
Report bugs to bug-parted@gnu.org
"""
HELP_GROWPART_RESIZE = """
growpart disk partition
rewrite partition table so that partition takes up all the space it can
@ -122,11 +81,8 @@ class TestConfig(MockerTestCase):
# Order must be correct
self.mocker.order()
@unittest.skip("until LP: #1212444 fixed")
def test_no_resizers_auto_is_fine(self):
subp = self.mocker.replace(util.subp, passthrough=False)
subp(['parted', '--help'], env={'LANG': 'C'})
self.mocker.result((HELP_PARTED_NO_RESIZE, ""))
subp(['growpart', '--help'], env={'LANG': 'C'})
self.mocker.result((HELP_GROWPART_NO_RESIZE, ""))
self.mocker.replay()
@ -144,15 +100,14 @@ class TestConfig(MockerTestCase):
self.assertRaises(ValueError, self.handle, self.name, config,
self.cloud_init, self.log, self.args)
@unittest.skip("until LP: #1212444 fixed")
def test_mode_auto_prefers_parted(self):
def test_mode_auto_prefers_growpart(self):
subp = self.mocker.replace(util.subp, passthrough=False)
subp(['parted', '--help'], env={'LANG': 'C'})
self.mocker.result((HELP_PARTED_RESIZE, ""))
subp(['growpart', '--help'], env={'LANG': 'C'})
self.mocker.result((HELP_GROWPART_RESIZE, ""))
self.mocker.replay()
ret = cc_growpart.resizer_factory(mode="auto")
self.assertTrue(isinstance(ret, cc_growpart.ResizeParted))
self.assertTrue(isinstance(ret, cc_growpart.ResizeGrowPart))
def test_handle_with_no_growpart_entry(self):
#if no 'growpart' entry in config, then mode=auto should be used

View File

@ -35,8 +35,8 @@ class TestMergeRun(helpers.FilesystemMockingTestCase):
initer.datasource.userdata_raw = ud
_iid = initer.instancify()
initer.update()
initer.cloudify().run('consume_userdata',
initer.consume_userdata,
initer.cloudify().run('consume_data',
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
mirrors = initer.distro.get_option('package_mirrors')

View File

@ -66,8 +66,8 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
initer.update()
self.assertTrue(os.path.islink("var/lib/cloud/instance"))
initer.cloudify().run('consume_userdata',
initer.consume_userdata,
initer.cloudify().run('consume_data',
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)

View File

@ -1,32 +1,23 @@
#!/bin/sh
#!/usr/bin/env python
set -e
import os
import sys
find_root() {
local topd
if [ -z "${CLOUD_INIT_TOP_D}" ]; then
topd=$(cd "$(dirname "${0}")" && cd .. && pwd)
else
topd=$(cd "${CLOUD_INIT_TOP_D}" && pwd)
fi
[ $? -eq 0 -a -f "${topd}/setup.py" ] || return
ROOT_DIR="$topd"
}
fail() { echo "$0:" "$@" 1>&2; exit 1; }
if 'CLOUD_INIT_TOP_D' in os.environ:
topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
else:
topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if ! find_root; then
fail "Unable to locate 'setup.py' file that should " \
"exist in the cloud-init root directory."
fi
for fname in ("setup.py", "requirements.txt"):
if not os.path.isfile(os.path.join(topd, fname)):
sys.stderr.write("Unable to locate '%s' file that should "
"exist in cloud-init root directory." % fname)
sys.exit(1)
REQUIRES="$ROOT_DIR/Requires"
with open(os.path.join(topd, "requirements.txt"), "r") as fp:
for line in fp:
if not line.strip() or line.startswith("#"):
continue
sys.stdout.write(line)
if [ ! -e "$REQUIRES" ]; then
fail "Unable to find 'Requires' file located at '$REQUIRES'"
fi
# Filter out comments and empty lines
DEPS=$(sed -n -e 's,#.*,,' -e '/./p' "$REQUIRES") &&
[ -n "$DEPS" ] ||
fail "failed to read deps from '${REQUIRES}'"
echo "$DEPS" | sort -d -f
sys.exit(0)

View File

@ -1,32 +1,26 @@
#!/bin/sh
#!/usr/bin/env python
set -e
import os
import re
import sys
find_root() {
local topd
if [ -z "${CLOUD_INIT_TOP_D}" ]; then
topd=$(cd "$(dirname "${0}")" && cd .. && pwd)
else
topd=$(cd "${CLOUD_INIT_TOP_D}" && pwd)
fi
[ $? -eq 0 -a -f "${topd}/setup.py" ] || return
ROOT_DIR="$topd"
}
fail() { echo "$0:" "$@" 1>&2; exit 1; }
if 'CLOUD_INIT_TOP_D' in os.environ:
topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
else:
topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if ! find_root; then
fail "Unable to locate 'setup.py' file that should " \
"exist in the cloud-init root directory."
fi
for fname in ("setup.py", "ChangeLog"):
if not os.path.isfile(os.path.join(topd, fname)):
sys.stderr.write("Unable to locate '%s' file that should "
"exist in cloud-init root directory." % fname)
sys.exit(1)
CHNG_LOG="$ROOT_DIR/ChangeLog"
vermatch = re.compile(r"^[0-9]+[.][0-9]+[.][0-9]+:$")
if [ ! -e "$CHNG_LOG" ]; then
fail "Unable to find 'ChangeLog' file located at '$CHNG_LOG'"
fi
with open(os.path.join(topd, "ChangeLog"), "r") as fp:
for line in fp:
if vermatch.match(line):
sys.stdout.write(line.strip()[:-1] + "\n")
break
VERSION=$(grep -m1 -o -E '^[0-9]+(\.[0-9]+)+' \
"$CHNG_LOG") &&
[ -n "$VERSION" ] ||
fail "failed to get version from '$CHNG_LOG'"
echo "$VERSION"
sys.exit(0)