Rebase on upstream

This commit is contained in:
Ben Howard 2014-02-07 10:10:02 +02:00
commit b56c388430
68 changed files with 2166 additions and 504 deletions

View File

@ -11,6 +11,18 @@
unicode).
- config/cloud.cfg.d/05_logging.cfg: provide a default 'output' setting, to
redirect cloud-init stderr and stdout /var/log/cloud-init-output.log.
- drop support for resizing partitions with parted entirely (LP: #1212492).
This was broken as it was anyway.
- add support for vendordata in SmartOS and NoCloud datasources.
- drop dependency on boto for crawling ec2 metadata service.
- add 'Requires' on sudo (for OpenNebula datasource) in rpm specs, and
'Recommends' in the debian/control.in [Vlastimil Holer]
- if mount_info reports /dev/root is a device path for /, then convert
that to a device via help of kernel cmdline.
- configdrive: consider partitions as possible datasources if they have
theh correct filesystem label. [Paul Querna]
- initial freebsd support [Harm Weites]
- fix in is_ipv4 to accept IP addresses with a '0' in them.
0.7.4:
- fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a
partitioned block device with target filesystem on ephemeral0.1.

View File

@ -8,6 +8,8 @@ YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f )
CHANGELOG_VERSION=$(shell $(CWD)/tools/read-version)
CODE_VERSION=$(shell python -c "from cloudinit import version; print version.version_string()")
PIP_INSTALL := pip install
ifeq ($(distro),)
distro = redhat
endif
@ -23,7 +25,16 @@ pylint:
pyflakes:
pyflakes $(PY_FILES)
test:
pip-requirements:
@echo "Installing cloud-init dependencies..."
$(PIP_INSTALL) -r "$@.txt" -q
pip-test-requirements:
@echo "Installing cloud-init test dependencies..."
$(PIP_INSTALL) -r "$@.txt" -q
test: clean_pyc
@echo "Running tests..."
@nosetests $(noseopts) tests/
check_version:
@ -32,12 +43,14 @@ check_version:
"not equal to code version $(CODE_VERSION)"; exit 2; \
else true; fi
clean_pyc:
@find . -type f -name "*.pyc" -delete
2to3:
2to3 $(PY_FILES)
clean:
rm -rf /var/log/cloud-init.log \
/var/lib/cloud/
clean: clean_pyc
rm -rf /var/log/cloud-init.log /var/lib/cloud/
yaml:
@$(CWD)/tools/validate-yaml.py $(YAML_FILES)
@ -49,4 +62,4 @@ deb:
./packages/bddeb
.PHONY: test pylint pyflakes 2to3 clean pep8 rpm deb yaml check_version
.PHONY: pip-test-requirements pip-requirements clean_pyc

View File

@ -261,8 +261,8 @@ def main_init(name, args):
# Attempt to consume the data per instance.
# This may run user-data handlers and/or perform
# url downloads and such as needed.
(ran, _results) = init.cloudify().run('consume_userdata',
init.consume_userdata,
(ran, _results) = init.cloudify().run('consume_data',
init.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
if not ran:
@ -271,7 +271,7 @@ def main_init(name, args):
#
# See: https://bugs.launchpad.net/bugs/819507 for a little
# reason behind this...
init.consume_userdata(PER_ALWAYS)
init.consume_data(PER_ALWAYS)
except Exception:
util.logexc(LOG, "Consuming user data failed!")
return 1

View File

@ -14,10 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from StringIO import StringIO
from cloudinit import util
from cloudinit import type_utils
from cloudinit import util
import copy
from StringIO import StringIO
def _make_header(text):

View File

@ -80,30 +80,6 @@ class ResizeFailedException(Exception):
pass
class ResizeParted(object):
def available(self):
myenv = os.environ.copy()
myenv['LANG'] = 'C'
try:
(out, _err) = util.subp(["parted", "--help"], env=myenv)
if re.search(r"COMMAND.*resizepart\s+", out, re.DOTALL):
return True
except util.ProcessExecutionError:
pass
return False
def resize(self, diskdev, partnum, partdev):
before = get_size(partdev)
try:
util.subp(["parted", diskdev, "resizepart", partnum])
except util.ProcessExecutionError as e:
raise ResizeFailedException(e)
return (before, get_size(partdev))
class ResizeGrowPart(object):
def available(self):
myenv = os.environ.copy()
@ -138,6 +114,41 @@ class ResizeGrowPart(object):
return (before, get_size(partdev))
class ResizeGpart(object):
def available(self):
if not util.which('gpart'):
return False
return True
def resize(self, diskdev, partnum, partdev):
"""
GPT disks store metadata at the beginning (primary) and at the
end (secondary) of the disk. When launching an image with a
larger disk compared to the original image, the secondary copy
is lost. Thus, the metadata will be marked CORRUPT, and need to
be recovered.
"""
try:
util.subp(["gpart", "recover", diskdev])
except util.ProcessExecutionError as e:
if e.exit_code != 0:
util.logexc(LOG, "Failed: gpart recover %s", diskdev)
raise ResizeFailedException(e)
before = get_size(partdev)
try:
util.subp(["gpart", "resize", "-i", partnum, diskdev])
except util.ProcessExecutionError as e:
util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev)
raise ResizeFailedException(e)
# Since growing the FS requires a reboot, make sure we reboot
# first when this module has finished.
open('/var/run/reboot-required', 'a').close()
return (before, get_size(partdev))
def get_size(filename):
fd = os.open(filename, os.O_RDONLY)
try:
@ -156,6 +167,12 @@ def device_part_info(devpath):
bname = os.path.basename(rpath)
syspath = "/sys/class/block/%s" % bname
# FreeBSD doesn't know of sysfs so just get everything we need from
# the device, like /dev/vtbd0p2.
if util.system_info()["platform"].startswith('FreeBSD'):
m = re.search('^(/dev/.+)p([0-9])$', devpath)
return (m.group(1), m.group(2))
if not os.path.exists(syspath):
raise ValueError("%s had no syspath (%s)" % (devpath, syspath))
@ -206,7 +223,8 @@ def resize_devices(resizer, devices):
"stat of '%s' failed: %s" % (blockdev, e),))
continue
if not stat.S_ISBLK(statret.st_mode):
if (not stat.S_ISBLK(statret.st_mode) and
not stat.S_ISCHR(statret.st_mode)):
info.append((devent, RESIZE.SKIPPED,
"device '%s' not a block device" % blockdev,))
continue
@ -279,6 +297,4 @@ def handle(_name, cfg, _cloud, log, _args):
else:
log.debug("'%s' %s: %s" % (entry, action, msg))
# LP: 1212444 FIXME re-order and favor ResizeParted
#RESIZERS = (('growpart', ResizeGrowPart),)
RESIZERS = (('growpart', ResizeGrowPart), ('parted', ResizeParted))
RESIZERS = (('growpart', ResizeGrowPart), ('gpart', ResizeGpart))

View File

@ -22,6 +22,7 @@ from cloudinit import util
import errno
import os
import re
import signal
import subprocess
import time
@ -30,6 +31,24 @@ frequency = PER_INSTANCE
EXIT_FAIL = 254
def givecmdline(pid):
# Returns the cmdline for the given process id. In Linux we can use procfs
# for this but on BSD there is /usr/bin/procstat.
try:
# Example output from procstat -c 1
# PID COMM ARGS
# 1 init /bin/init --
if util.system_info()["platform"].startswith('FreeBSD'):
(output, _err) = util.subp(['procstat', '-c', str(pid)])
line = output.splitlines()[1]
m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line)
return m.group(2)
else:
return util.load_file("/proc/%s/cmdline" % pid)
except IOError:
return None
def handle(_name, cfg, _cloud, log, _args):
try:
@ -42,8 +61,8 @@ def handle(_name, cfg, _cloud, log, _args):
return
mypid = os.getpid()
cmdline = util.load_file("/proc/%s/cmdline" % mypid)
cmdline = givecmdline(mypid)
if not cmdline:
log.warn("power_state: failed to get cmdline of current process")
return
@ -119,8 +138,6 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args):
msg = None
end_time = time.time() + timeout
cmdline_f = "/proc/%s/cmdline" % pid
def fatal(msg):
if log:
log.warn(msg)
@ -134,16 +151,14 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args):
break
try:
cmdline = ""
with open(cmdline_f) as fp:
cmdline = fp.read()
cmdline = givecmdline(pid)
if cmdline != pidcmdline:
msg = "cmdline changed for %s [now: %s]" % (pid, cmdline)
break
except IOError as ioerr:
if ioerr.errno in known_errnos:
msg = "pidfile '%s' gone [%d]" % (cmdline_f, ioerr.errno)
msg = "pidfile gone [%d]" % ioerr.errno
else:
fatal("IOError during wait: %s" % ioerr)
break

View File

@ -39,6 +39,10 @@ def _resize_ext(mount_point, devpth): # pylint: disable=W0613
def _resize_xfs(mount_point, devpth): # pylint: disable=W0613
return ('xfs_growfs', devpth)
def _resize_ufs(mount_point, devpth): # pylint: disable=W0613
return ('growfs', devpth)
# Do not use a dictionary as these commands should be able to be used
# for multiple filesystem types if possible, e.g. one command for
# ext2, ext3 and ext4.
@ -46,11 +50,31 @@ RESIZE_FS_PREFIXES_CMDS = [
('btrfs', _resize_btrfs),
('ext', _resize_ext),
('xfs', _resize_xfs),
('ufs', _resize_ufs),
]
NOBLOCK = "noblock"
def rootdev_from_cmdline(cmdline):
found = None
for tok in cmdline.split():
if tok.startswith("root="):
found = tok[5:]
break
if found is None:
return None
if found.startswith("/dev/"):
return found
if found.startswith("LABEL="):
return "/dev/disk/by-label/" + found[len("LABEL="):]
if found.startswith("UUID="):
return "/dev/disk/by-uuid/" + found[len("UUID="):]
return "/dev/" + found
def handle(name, cfg, _cloud, log, args):
if len(args) != 0:
resize_root = args[0]
@ -78,10 +102,20 @@ def handle(name, cfg, _cloud, log, args):
info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
log.debug("resize_info: %s" % info)
container = util.is_container()
if (devpth == "/dev/root" and not os.path.exists(devpth) and
not container):
devpth = rootdev_from_cmdline(util.get_cmdline())
if devpth is None:
log.warn("Unable to find device '/dev/root'")
return
log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth)
try:
statret = os.stat(devpth)
except OSError as exc:
if util.is_container() and exc.errno == errno.ENOENT:
if container and exc.errno == errno.ENOENT:
log.debug("Device '%s' did not exist in container. "
"cannot resize: %s" % (devpth, info))
elif exc.errno == errno.ENOENT:
@ -91,8 +125,8 @@ def handle(name, cfg, _cloud, log, args):
raise exc
return
if not stat.S_ISBLK(statret.st_mode):
if util.is_container():
if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
if container:
log.debug("device '%s' not a block device in container."
" cannot resize: %s" % (devpth, info))
else:

View File

@ -0,0 +1,43 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2014 Canonical Ltd.
#
# Author: Ben Howard <ben.howard@canonical.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
SCRIPT_SUBDIR = 'vendor'
def handle(name, cfg, cloud, log, _args):
# This is written to by the vendor data handlers
# any vendor data shell scripts get placed in runparts_path
runparts_path = os.path.join(cloud.get_ipath_cur(), 'scripts',
SCRIPT_SUBDIR)
prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), [])
try:
util.runparts(runparts_path, exe_prefix=prefix)
except:
log.warn("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
raise

View File

@ -39,6 +39,7 @@ from cloudinit.distros.parsers import hosts
OSFAMILIES = {
'debian': ['debian', 'ubuntu'],
'redhat': ['fedora', 'rhel'],
'freebsd': ['freebsd'],
'suse': ['sles']
}

View File

@ -0,0 +1,259 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2014 Harm Weites
#
# Author: Harm Weites <harm@weites.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from StringIO import StringIO
import re
from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import ssh_util
from cloudinit import util
LOG = logging.getLogger(__name__)
class Distro(distros.Distro):
rc_conf_fn = "/etc/rc.conf"
login_conf_fn = '/etc/login.conf'
login_conf_fn_bak = '/etc/login.conf.orig'
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
# This will be used to restrict certain
# calls from repeatly happening (when they
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
self.osfamily = 'freebsd'
# Updates a key in /etc/rc.conf.
def updatercconf(self, key, value):
LOG.debug("updatercconf: %s => %s", key, value)
conf = self.loadrcconf()
config_changed = False
for item in conf:
if item == key and conf[item] != value:
conf[item] = value
LOG.debug("[rc.conf]: Value %s for key %s needs to be changed",
value, key)
config_changed = True
if config_changed:
LOG.debug("Writing new %s file", self.rc_conf_fn)
buf = StringIO()
for keyval in conf.items():
buf.write("%s=%s\n" % keyval)
util.write_file(self.rc_conf_fn, buf.getvalue())
# Load the contents of /etc/rc.conf and store all keys in a dict.
def loadrcconf(self):
conf = {}
lines = util.load_file(self.rc_conf_fn).splitlines()
for line in lines:
tok = line.split('=')
conf[tok[0]] = tok[1].rstrip()
return conf
def readrcconf(self, key):
conf = self.loadrcconf()
try:
val = conf[key]
except KeyError:
val = None
return val
def _read_system_hostname(self):
sys_hostname = self._read_hostname()
return ('rc.conf', sys_hostname)
def _read_hostname(self, filename, default=None):
hostname = None
try:
hostname = self.readrcconf('hostname')
except IOError:
pass
if not hostname:
return default
return hostname
def _select_hostname(self, hostname, fqdn):
if not hostname:
return fqdn
return hostname
def _write_hostname(self, hostname, filename):
self.updatercconf('hostname', hostname)
def create_group(self, name, members):
group_add_cmd = ['pw', '-n', name]
if util.is_group(name):
LOG.warn("Skipping creation of existing group '%s'", name)
else:
try:
util.subp(group_add_cmd)
LOG.info("Created new group %s", name)
except Exception as e:
util.logexc(LOG, "Failed to create group %s", name)
raise e
if len(members) > 0:
for member in members:
if not util.is_user(member):
LOG.warn("Unable to add group member '%s' to group '%s'"
"; user does not exist.", member, name)
continue
try:
util.subp(['pw', 'usermod', '-n', name, '-G', member])
LOG.info("Added user '%s' to group '%s'", member, name)
except Exception:
util.logexc(LOG, "Failed to add user '%s' to group '%s'",
member, name)
def add_user(self, name, **kwargs):
if util.is_user(name):
LOG.info("User %s already exists, skipping.", name)
return False
adduser_cmd = ['pw', 'useradd', '-n', name]
log_adduser_cmd = ['pw', 'useradd', '-n', name]
adduser_opts = {
"homedir": '-d',
"gecos": '-c',
"primary_group": '-g',
"groups": '-G',
"passwd": '-h',
"shell": '-s',
"inactive": '-E',
}
adduser_flags = {
"no_user_group": '--no-user-group',
"system": '--system',
"no_log_init": '--no-log-init',
}
redact_opts = ['passwd']
for key, val in kwargs.iteritems():
if key in adduser_opts and val and isinstance(val, basestring):
adduser_cmd.extend([adduser_opts[key], val])
# Redact certain fields from the logs
if key in redact_opts:
log_adduser_cmd.extend([adduser_opts[key], 'REDACTED'])
else:
log_adduser_cmd.extend([adduser_opts[key], val])
elif key in adduser_flags and val:
adduser_cmd.append(adduser_flags[key])
log_adduser_cmd.append(adduser_flags[key])
if 'no_create_home' in kwargs or 'system' in kwargs:
adduser_cmd.append('-d/nonexistent')
log_adduser_cmd.append('-d/nonexistent')
else:
adduser_cmd.append('-d/usr/home/%s' % name)
adduser_cmd.append('-m')
log_adduser_cmd.append('-d/usr/home/%s' % name)
log_adduser_cmd.append('-m')
# Run the command
LOG.info("Adding user %s", name)
try:
util.subp(adduser_cmd, logstring=log_adduser_cmd)
except Exception as e:
util.logexc(LOG, "Failed to create user %s", name)
raise e
# TODO:
def set_passwd(self, user, passwd, hashed=False):
return False
def lock_passwd(self, name):
try:
util.subp(['pw', 'usermod', name, '-h', '-'])
except Exception as e:
util.logexc(LOG, "Failed to lock user %s", name)
raise e
# TODO:
def write_sudo_rules(self, name, rules, sudo_file=None):
LOG.debug("[write_sudo_rules] Name: %s", name)
def create_user(self, name, **kwargs):
self.add_user(name, **kwargs)
# Set password if plain-text password provided and non-empty
if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
self.set_passwd(name, kwargs['plain_text_passwd'])
# Default locking down the account. 'lock_passwd' defaults to True.
# lock account unless lock_password is False.
if kwargs.get('lock_passwd', True):
self.lock_passwd(name)
# Configure sudo access
if 'sudo' in kwargs:
self.write_sudo_rules(name, kwargs['sudo'])
# Import SSH keys
if 'ssh_authorized_keys' in kwargs:
keys = set(kwargs['ssh_authorized_keys']) or []
ssh_util.setup_user_keys(keys, name, options=None)
def _write_network(self, settings):
return
def apply_locale(self, locale, out_fn=None):
# Adjust the locals value to the new value
newconf = StringIO()
for line in util.load_file(self.login_conf_fn).splitlines():
newconf.write(re.sub(r'^default:',
r'default:lang=%s:' % locale, line))
newconf.write("\n")
# Make a backup of login.conf.
util.copy(self.login_conf_fn, self.login_conf_fn_bak)
# And write the new login.conf.
util.write_file(self.login_conf_fn, newconf.getvalue())
try:
LOG.debug("Running cap_mkdb for %s", locale)
util.subp(['cap_mkdb', self.login_conf_fn])
except util.ProcessExecutionError:
# cap_mkdb failed, so restore the backup.
util.logexc(LOG, "Failed to apply locale %s", locale)
try:
util.copy(self.login_conf_fn_bak, self.login_conf_fn)
except IOError:
util.logexc(LOG, "Failed to restore %s backup",
self.login_conf_fn)
def install_packages(self, pkglist):
return
def package_command(self, cmd, args=None, pkgs=None):
return
def set_timezone(self, tz):
return
def update_package_sources(self):
return

View File

@ -0,0 +1,163 @@
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This is a util function to translate debian based distro interface blobs as
# given in /etc/network/interfaces to an *somewhat* agnostic format for
# distributions that use other formats.
#
# TODO(harlowja) remove when we have python-netcf active...
#
# The format is the following:
# {
# <device-name>: {
# # All optional (if not existent in original format)
# "netmask": <ip>,
# "broadcast": <ip>,
# "gateway": <ip>,
# "address": <ip>,
# "bootproto": "static"|"dhcp",
# "dns-search": <hostname>,
# "hwaddress": <mac-address>,
# "auto": True (or non-existent),
# "dns-nameservers": [<ip/hostname>, ...],
# }
# }
#
# Things to note, comments are removed, if a ubuntu/debian interface is
# marked as auto then only then first segment (?) is retained, ie
# 'auto eth0 eth0:1' just marks eth0 as auto (not eth0:1).
#
# Example input:
#
# auto lo
# iface lo inet loopback
#
# auto eth0
# iface eth0 inet static
# address 10.0.0.1
# netmask 255.255.252.0
# broadcast 10.0.0.255
# gateway 10.0.0.2
# dns-nameservers 98.0.0.1 98.0.0.2
#
# Example output:
# {
# "lo": {
# "auto": true
# },
# "eth0": {
# "auto": true,
# "dns-nameservers": [
# "98.0.0.1",
# "98.0.0.2"
# ],
# "broadcast": "10.0.0.255",
# "netmask": "255.255.252.0",
# "bootproto": "static",
# "address": "10.0.0.1",
# "gateway": "10.0.0.2"
# }
# }
def translate_network(settings):
# Get the standard cmd, args from the ubuntu format
entries = []
for line in settings.splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
split_up = line.split(None, 1)
if len(split_up) <= 1:
continue
entries.append(split_up)
# Figure out where each iface section is
ifaces = []
consume = {}
for (cmd, args) in entries:
if cmd == 'iface':
if consume:
ifaces.append(consume)
consume = {}
consume[cmd] = args
else:
consume[cmd] = args
# Check if anything left over to consume
absorb = False
for (cmd, args) in consume.iteritems():
if cmd == 'iface':
absorb = True
if absorb:
ifaces.append(consume)
# Now translate
real_ifaces = {}
for info in ifaces:
if 'iface' not in info:
continue
iface_details = info['iface'].split(None)
dev_name = None
if len(iface_details) >= 1:
dev = iface_details[0].strip().lower()
if dev:
dev_name = dev
if not dev_name:
continue
iface_info = {}
if len(iface_details) >= 3:
proto_type = iface_details[2].strip().lower()
# Seems like this can be 'loopback' which we don't
# really care about
if proto_type in ['dhcp', 'static']:
iface_info['bootproto'] = proto_type
# These can just be copied over
for k in ['netmask', 'address', 'gateway', 'broadcast']:
if k in info:
val = info[k].strip().lower()
if val:
iface_info[k] = val
# Name server info provided??
if 'dns-nameservers' in info:
iface_info['dns-nameservers'] = info['dns-nameservers'].split()
# Name server search info provided??
if 'dns-search' in info:
iface_info['dns-search'] = info['dns-search'].split()
# Is any mac address spoofing going on??
if 'hwaddress' in info:
hw_info = info['hwaddress'].lower().strip()
hw_split = hw_info.split(None, 1)
if len(hw_split) == 2 and hw_split[0].startswith('ether'):
hw_addr = hw_split[1]
if hw_addr:
iface_info['hwaddress'] = hw_addr
real_ifaces[dev_name] = iface_info
# Check for those that should be started on boot via 'auto'
for (cmd, args) in entries:
if cmd == 'auto':
# Seems like auto can be like 'auto eth0 eth0:1' so just get the
# first part out as the device name
args = args.split(None)
if not args:
continue
dev_name = args[0].strip().lower()
if dev_name in real_ifaces:
real_ifaces[dev_name]['auto'] = True
return real_ifaces

View File

@ -25,6 +25,7 @@ from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
from cloudinit.distros import net_util
from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
@ -63,7 +64,7 @@ class Distro(distros.Distro):
def _write_network(self, settings):
# TODO(harlowja) fix this... since this is the ubuntu format
entries = rhel_util.translate_network(settings)
entries = net_util.translate_network(settings)
LOG.debug("Translated ubuntu style network settings %s into %s",
settings, entries)
# Make the intermediate format as the rhel format...

View File

@ -30,94 +30,6 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
# This is a util function to translate Debian based distro interface blobs as
# given in /etc/network/interfaces to an equivalent format for distributions
# that use ifcfg-* style (Red Hat and SUSE).
# TODO(harlowja) remove when we have python-netcf active...
def translate_network(settings):
# Get the standard cmd, args from the ubuntu format
entries = []
for line in settings.splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
split_up = line.split(None, 1)
if len(split_up) <= 1:
continue
entries.append(split_up)
# Figure out where each iface section is
ifaces = []
consume = {}
for (cmd, args) in entries:
if cmd == 'iface':
if consume:
ifaces.append(consume)
consume = {}
consume[cmd] = args
else:
consume[cmd] = args
# Check if anything left over to consume
absorb = False
for (cmd, args) in consume.iteritems():
if cmd == 'iface':
absorb = True
if absorb:
ifaces.append(consume)
# Now translate
real_ifaces = {}
for info in ifaces:
if 'iface' not in info:
continue
iface_details = info['iface'].split(None)
dev_name = None
if len(iface_details) >= 1:
dev = iface_details[0].strip().lower()
if dev:
dev_name = dev
if not dev_name:
continue
iface_info = {}
if len(iface_details) >= 3:
proto_type = iface_details[2].strip().lower()
# Seems like this can be 'loopback' which we don't
# really care about
if proto_type in ['dhcp', 'static']:
iface_info['bootproto'] = proto_type
# These can just be copied over
for k in ['netmask', 'address', 'gateway', 'broadcast']:
if k in info:
val = info[k].strip().lower()
if val:
iface_info[k] = val
# Name server info provided??
if 'dns-nameservers' in info:
iface_info['dns-nameservers'] = info['dns-nameservers'].split()
# Name server search info provided??
if 'dns-search' in info:
iface_info['dns-search'] = info['dns-search'].split()
# Is any mac address spoofing going on??
if 'hwaddress' in info:
hw_info = info['hwaddress'].lower().strip()
hw_split = hw_info.split(None, 1)
if len(hw_split) == 2 and hw_split[0].startswith('ether'):
hw_addr = hw_split[1]
if hw_addr:
iface_info['hwaddress'] = hw_addr
real_ifaces[dev_name] = iface_info
# Check for those that should be started on boot via 'auto'
for (cmd, args) in entries:
if cmd == 'auto':
# Seems like auto can be like 'auto eth0 eth0:1' so just get the
# first part out as the device name
args = args.split(None)
if not args:
continue
dev_name = args[0].strip().lower()
if dev_name in real_ifaces:
real_ifaces[dev_name]['auto'] = True
return real_ifaces
# Helper function to update a RHEL/SUSE /etc/sysconfig/* file
def update_sysconfig_file(fn, adjustments, allow_empty=False):
if not adjustments:

View File

@ -26,6 +26,7 @@ from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
from cloudinit.distros import net_util
from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
@ -54,7 +55,7 @@ class Distro(distros.Distro):
def _write_network(self, settings):
# Convert debian settings to ifcfg format
entries = rhel_util.translate_network(settings)
entries = net_util.translate_network(settings)
LOG.debug("Translated ubuntu style network settings %s into %s",
settings, entries)
# Make the intermediate format as the suse format...

View File

@ -16,48 +16,181 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import boto.utils as boto_utils
import httplib
from urlparse import (urlparse, urlunparse)
# Versions of boto >= 2.6.0 (and possibly 2.5.2)
# try to lazily load the metadata backing, which
# doesn't work so well in cloud-init especially
# since the metadata is serialized and actions are
# performed where the metadata server may be blocked
# (thus the datasource will start failing) resulting
# in url exceptions when fields that do exist (or
# would have existed) do not exist due to the blocking
# that occurred.
import functools
import json
import urllib
# TODO(harlowja): https://github.com/boto/boto/issues/1401
# When boto finally moves to using requests, we should be able
# to provide it ssl details, it does not yet, so we can't provide them...
from cloudinit import log as logging
from cloudinit import url_helper
from cloudinit import util
LOG = logging.getLogger(__name__)
SKIP_USERDATA_CODES = frozenset([httplib.NOT_FOUND])
def _unlazy_dict(mp):
if not isinstance(mp, (dict)):
return mp
# Walk over the keys/values which
# forces boto to unlazy itself and
# has no effect on dictionaries that
# already have there items.
for (_k, v) in mp.items():
_unlazy_dict(v)
return mp
def maybe_json_object(text):
if not text:
return False
text = text.strip()
if text.startswith("{") and text.endswith("}"):
return True
return False
def get_instance_userdata(api_version, metadata_address):
# Note: boto.utils.get_instance_metadata returns '' for empty string
# so the change from non-true to '' is not specifically necessary, but
# this way cloud-init will get consistent behavior even if boto changed
# in the future to return a None on "no user-data provided".
ud = boto_utils.get_instance_userdata(api_version, None, metadata_address)
if not ud:
ud = ''
return ud
def combine_url(base, add_on):
base_parsed = list(urlparse(base))
path = base_parsed[2]
if path and not path.endswith("/"):
path += "/"
path += urllib.quote(str(add_on), safe="/:")
base_parsed[2] = path
return urlunparse(base_parsed)
def get_instance_metadata(api_version, metadata_address):
metadata = boto_utils.get_instance_metadata(api_version, metadata_address)
if not isinstance(metadata, (dict)):
metadata = {}
return _unlazy_dict(metadata)
# See: http://bit.ly/TyoUQs
#
class MetadataMaterializer(object):
def __init__(self, blob, base_url, caller):
self._blob = blob
self._md = None
self._base_url = base_url
self._caller = caller
def _parse(self, blob):
leaves = {}
children = []
if not blob:
return (leaves, children)
def has_children(item):
if item.endswith("/"):
return True
else:
return False
def get_name(item):
if item.endswith("/"):
return item.rstrip("/")
return item
for field in blob.splitlines():
field = field.strip()
field_name = get_name(field)
if not field or not field_name:
continue
if has_children(field):
if field_name not in children:
children.append(field_name)
else:
contents = field.split("=", 1)
resource = field_name
if len(contents) > 1:
# What a PITA...
(ident, sub_contents) = contents
ident = util.safe_int(ident)
if ident is not None:
resource = "%s/openssh-key" % (ident)
field_name = sub_contents
leaves[field_name] = resource
return (leaves, children)
def materialize(self):
if self._md is not None:
return self._md
self._md = self._materialize(self._blob, self._base_url)
return self._md
def _decode_leaf_blob(self, field, blob):
if not blob:
return blob
if maybe_json_object(blob):
try:
# Assume it's json, unless it fails parsing...
return json.loads(blob)
except (ValueError, TypeError) as e:
LOG.warn("Field %s looked like a json object, but it was"
" not: %s", field, e)
if blob.find("\n") != -1:
return blob.splitlines()
return blob
def _materialize(self, blob, base_url):
(leaves, children) = self._parse(blob)
child_contents = {}
for c in children:
child_url = combine_url(base_url, c)
if not child_url.endswith("/"):
child_url += "/"
child_blob = str(self._caller(child_url))
child_contents[c] = self._materialize(child_blob, child_url)
leaf_contents = {}
for (field, resource) in leaves.items():
leaf_url = combine_url(base_url, resource)
leaf_blob = str(self._caller(leaf_url))
leaf_contents[field] = self._decode_leaf_blob(field, leaf_blob)
joined = {}
joined.update(child_contents)
for field in leaf_contents.keys():
if field in joined:
LOG.warn("Duplicate key found in results from %s", base_url)
else:
joined[field] = leaf_contents[field]
return joined
def _skip_retry_on_codes(status_codes, _request_args, cause):
"""Returns if a request should retry based on a given set of codes that
case retrying to be stopped/skipped.
"""
if cause.code in status_codes:
return False
return True
def get_instance_userdata(api_version='latest',
metadata_address='http://169.254.169.254',
ssl_details=None, timeout=5, retries=5):
ud_url = combine_url(metadata_address, api_version)
ud_url = combine_url(ud_url, 'user-data')
user_data = ''
try:
# It is ok for userdata to not exist (thats why we are stopping if
# NOT_FOUND occurs) and just in that case returning an empty string.
exception_cb = functools.partial(_skip_retry_on_codes,
SKIP_USERDATA_CODES)
response = util.read_file_or_url(ud_url,
ssl_details=ssl_details,
timeout=timeout,
retries=retries,
exception_cb=exception_cb)
user_data = str(response)
except url_helper.UrlError as e:
if e.code not in SKIP_USERDATA_CODES:
util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
except Exception:
util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
return user_data
def get_instance_metadata(api_version='latest',
metadata_address='http://169.254.169.254',
ssl_details=None, timeout=5, retries=5):
md_url = combine_url(metadata_address, api_version)
md_url = combine_url(md_url, 'meta-data')
caller = functools.partial(util.read_file_or_url,
ssl_details=ssl_details, timeout=timeout,
retries=retries)
try:
response = caller(md_url)
materializer = MetadataMaterializer(str(response), md_url, caller)
md = materializer.materialize()
if not isinstance(md, (dict)):
md = {}
return md
except Exception:
util.logexc(LOG, "Failed fetching metadata from url %s", md_url)
return {}

View File

@ -187,6 +187,10 @@ def _escape_string(text):
def walker_callback(data, filename, payload, headers):
content_type = headers['Content-Type']
if content_type in data.get('excluded'):
LOG.debug('content_type "%s" is excluded', content_type)
return
if content_type in PART_CONTENT_TYPES:
walker_handle_handler(data, content_type, filename, payload)
return

View File

@ -66,6 +66,8 @@ class CloudConfigPartHandler(handlers.Handler):
handlers.Handler.__init__(self, PER_ALWAYS, version=3)
self.cloud_buf = None
self.cloud_fn = paths.get_ipath("cloud_config")
if 'cloud_config_path' in _kwargs:
self.cloud_fn = paths.get_ipath(_kwargs["cloud_config_path"])
self.file_names = []
def list_types(self):

View File

@ -36,6 +36,8 @@ class ShellScriptPartHandler(handlers.Handler):
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS)
self.script_dir = paths.get_ipath_cur('scripts')
if 'script_path' in _kwargs:
self.script_dir = paths.get_ipath_cur(_kwargs['script_path'])
def list_types(self):
return [

View File

@ -200,11 +200,13 @@ class Runners(object):
class ConfigMerger(object):
def __init__(self, paths=None, datasource=None,
additional_fns=None, base_cfg=None):
additional_fns=None, base_cfg=None,
include_vendor=True):
self._paths = paths
self._ds = datasource
self._fns = additional_fns
self._base_cfg = base_cfg
self._include_vendor = include_vendor
# Created on first use
self._cfg = None
@ -237,7 +239,13 @@ class ConfigMerger(object):
# a configuration file to use when running...
if not self._paths:
return i_cfgs
cc_fn = self._paths.get_ipath_cur('cloud_config')
cc_paths = ['cloud_config']
if self._include_vendor:
cc_paths.append('vendor_cloud_config')
for cc_p in cc_paths:
cc_fn = self._paths.get_ipath_cur(cc_p)
if cc_fn and os.path.isfile(cc_fn):
try:
i_cfgs.append(util.read_conf(cc_fn))
@ -331,13 +339,17 @@ class Paths(object):
self.lookups = {
"handlers": "handlers",
"scripts": "scripts",
"vendor_scripts": "scripts/vendor",
"sem": "sem",
"boothooks": "boothooks",
"userdata_raw": "user-data.txt",
"userdata": "user-data.txt.i",
"obj_pkl": "obj.pkl",
"cloud_config": "cloud-config.txt",
"vendor_cloud_config": "vendor-cloud-config.txt",
"data": "data",
"vendordata_raw": "vendor-data.txt",
"vendordata": "vendor-data.txt.i",
}
# Set when a datasource becomes active
self.datasource = ds

View File

@ -21,6 +21,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cloudinit.util as util
import re
from prettytable import PrettyTable
@ -40,27 +41,40 @@ def netdev_info(empty=""):
toks = line.lower().strip().split()
if toks[0] == "up":
devs[curdev]['up'] = True
# If the output of ifconfig doesn't contain the required info in the
# obvious place, use a regex filter to be sure.
elif len(toks) > 1:
if re.search(r"flags=\d+<up,", toks[1]):
devs[curdev]['up'] = True
fieldpost = ""
if toks[0] == "inet6":
fieldpost = "6"
for i in range(len(toks)):
if toks[i] == "hwaddr":
if toks[i] == "hwaddr" or toks[i] == "ether":
try:
devs[curdev]["hwaddr"] = toks[i + 1]
except IndexError:
pass
for field in ("addr", "bcast", "mask"):
# Couple the different items we're interested in with the correct
# field since FreeBSD/CentOS/Fedora differ in the output.
ifconfigfields = {
"addr:": "addr", "inet": "addr",
"bcast:": "bcast", "broadcast": "bcast",
"mask:": "mask", "netmask": "mask"
}
for origfield, field in ifconfigfields.items():
target = "%s%s" % (field, fieldpost)
if devs[curdev].get(target, ""):
continue
if toks[i] == "%s:" % field:
if toks[i] == "%s" % origfield:
try:
devs[curdev][target] = toks[i + 1]
except IndexError:
pass
elif toks[i].startswith("%s:" % field):
elif toks[i].startswith("%s" % origfield):
devs[curdev][target] = toks[i][len(field) + 1:]
if empty != "":
@ -73,15 +87,32 @@ def netdev_info(empty=""):
def route_info():
(route_out, _err) = util.subp(["route", "-n"])
(route_out, _err) = util.subp(["netstat", "-rn"])
routes = []
entries = route_out.splitlines()[1:]
for line in entries:
if not line:
continue
toks = line.split()
if len(toks) < 8 or toks[0] == "Kernel" or toks[0] == "Destination":
# FreeBSD shows 6 items in the routing table:
# Destination Gateway Flags Refs Use Netif Expire
# default 10.65.0.1 UGS 0 34920 vtnet0
#
# Linux netstat shows 2 more:
# Destination Gateway Genmask Flags MSS Window irtt Iface
# 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
if (len(toks) < 6 or toks[0] == "Kernel" or
toks[0] == "Destination" or toks[0] == "Internet" or
toks[0] == "Internet6" or toks[0] == "Routing"):
continue
if len(toks) < 8:
toks.append("-")
toks.append("-")
toks[7] = toks[5]
toks[5] = "-"
entry = {
'destination': toks[0],
'gateway': toks[1],
@ -92,6 +123,7 @@ def route_info():
'use': toks[6],
'iface': toks[7],
}
routes.append(entry)
return routes

View File

@ -52,6 +52,7 @@ CFG_BUILTIN = {
},
'distro': 'ubuntu',
},
'vendor_data': {'enabled': True, 'prefix': []},
}
# Valid frequencies of handlers/modules

View File

@ -284,8 +284,10 @@ def find_candidate_devs():
# followed by fstype items, but with dupes removed
combined = (by_label + [d for d in by_fstype if d not in by_label])
# We are looking for block device (sda, not sda1), ignore partitions
combined = [d for d in combined if not util.is_partition(d)]
# We are looking for a block device or partition with necessary label or
# an unpartitioned block device.
combined = [d for d in combined
if d in by_label or not util.is_partition(d)]
return combined

View File

@ -50,41 +50,48 @@ class DataSourceNoCloud(sources.DataSource):
}
found = []
md = {}
ud = ""
mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': ""}
try:
# Parse the kernel command line, getting data passed in
md = {}
if parse_cmdline_data(self.cmdline_id, md):
found.append("cmdline")
mydata.update(md)
except:
util.logexc(LOG, "Unable to parse command line data")
return False
# Check to see if the seed dir has data.
seedret = {}
if util.read_optional_seed(seedret, base=self.seed_dir + "/"):
md = util.mergemanydict([md, seedret['meta-data']])
ud = seedret['user-data']
pp2d_kwargs = {'required': ['user-data', 'meta-data'],
'optional': ['vendor-data']}
try:
seeded = util.pathprefix2dict(self.seed_dir, **pp2d_kwargs)
found.append(self.seed_dir)
LOG.debug("Using seeded cache data from %s", self.seed_dir)
LOG.debug("Using seeded data from %s", self.seed_dir)
except ValueError as e:
pass
if self.seed_dir in found:
mydata = _merge_new_seed(mydata, seeded)
# If the datasource config had a 'seedfrom' entry, then that takes
# precedence over a 'seedfrom' that was found in a filesystem
# but not over external media
if 'seedfrom' in self.ds_cfg and self.ds_cfg['seedfrom']:
found.append("ds_config")
md["seedfrom"] = self.ds_cfg['seedfrom']
if self.ds_cfg.get('seedfrom'):
found.append("ds_config_seedfrom")
mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom']
# if ds_cfg has 'user-data' and 'meta-data'
# fields appropriately named can also just come from the datasource
# config (ie, 'user-data', 'meta-data', 'vendor-data' there)
if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
if self.ds_cfg['user-data']:
ud = self.ds_cfg['user-data']
if self.ds_cfg['meta-data'] is not False:
md = util.mergemanydict([md, self.ds_cfg['meta-data']])
if 'ds_config' not in found:
mydata = _merge_new_seed(mydata, self.ds_cfg)
found.append("ds_config")
def _pp2d_callback(mp, data):
util.pathprefix2dict(mp, **data)
label = self.ds_cfg.get('fs_label', "cidata")
if label is not None:
# Query optical drive to get it in blkid cache for 2.6 kernels
@ -102,15 +109,21 @@ class DataSourceNoCloud(sources.DataSource):
try:
LOG.debug("Attempting to use data from %s", dev)
(newmd, newud) = util.mount_cb(dev, util.read_seeded)
md = util.mergemanydict([newmd, md])
ud = newud
try:
seeded = util.mount_cb(dev, _pp2d_callback)
except ValueError as e:
if dev in label_list:
LOG.warn("device %s with label=%s not a"
"valid seed.", dev, label)
continue
mydata = _merge_new_seed(mydata, seeded)
# For seed from a device, the default mode is 'net'.
# that is more likely to be what is desired. If they want
# dsmode of local, then they must specify that.
if 'dsmode' not in md:
md['dsmode'] = "net"
if 'dsmode' not in mydata['meta-data']:
mydata['meta-data'] = "net"
LOG.debug("Using data from %s", dev)
found.append(dev)
@ -133,8 +146,8 @@ class DataSourceNoCloud(sources.DataSource):
# attempt to seed the userdata / metadata from its value
# its primarily value is in allowing the user to type less
# on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
if "seedfrom" in md:
seedfrom = md["seedfrom"]
if "seedfrom" in mydata['meta-data']:
seedfrom = mydata['meta-data']["seedfrom"]
seedfound = False
for proto in self.supported_seed_starts:
if seedfrom.startswith(proto):
@ -144,7 +157,7 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Seed from %s not supported by %s", seedfrom, self)
return False
if 'network-interfaces' in md:
if 'network-interfaces' in mydata['meta-data']:
seeded_interfaces = self.dsmode
# This could throw errors, but the user told us to do it
@ -153,25 +166,30 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Using seeded cache data from %s", seedfrom)
# Values in the command line override those from the seed
md = util.mergemanydict([md, md_seed])
mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
md_seed])
mydata['user-data'] = ud
found.append(seedfrom)
# Now that we have exhausted any other places merge in the defaults
md = util.mergemanydict([md, defaults])
mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
defaults])
# Update the network-interfaces if metadata had 'network-interfaces'
# entry and this is the local datasource, or 'seedfrom' was used
# and the source of the seed was self.dsmode
# ('local' for NoCloud, 'net' for NoCloudNet')
if ('network-interfaces' in md and
if ('network-interfaces' in mydata['meta-data'] and
(self.dsmode in ("local", seeded_interfaces))):
LOG.debug("Updating network interfaces from %s", self)
self.distro.apply_network(md['network-interfaces'])
self.distro.apply_network(
mydata['meta-data']['network-interfaces'])
if md['dsmode'] == self.dsmode:
if mydata['meta-data']['dsmode'] == self.dsmode:
self.seed = ",".join(found)
self.metadata = md
self.userdata_raw = ud
self.metadata = mydata['meta-data']
self.userdata_raw = mydata['user-data']
self.vendordata = mydata['vendor-data']
return True
LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
@ -222,6 +240,16 @@ def parse_cmdline_data(ds_id, fill, cmdline=None):
return True
def _merge_new_seed(cur, seeded):
ret = cur.copy()
ret['meta-data'] = util.mergemanydict([cur['meta-data'],
util.load_yaml(seeded['meta-data'])])
ret['user-data'] = seeded['user-data']
if 'vendor-data' in seeded:
ret['vendor-data'] = seeded['vendor-data']
return ret
class DataSourceNoCloudNet(DataSourceNoCloud):
def __init__(self, sys_cfg, distro, paths):
DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)

View File

@ -323,7 +323,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
(output, _error) = util.subp(cmd, data=bcmd)
# exclude vars in bash that change on their own or that we used
excluded = ("RANDOM", "LINENO", "_", "__v")
excluded = ("RANDOM", "LINENO", "SECONDS", "_", "__v")
preset = {}
ret = {}
target = None

View File

@ -25,7 +25,9 @@
# requests on the console. For example, to get the hostname, you
# would send "GET hostname" on /dev/ttyS1.
#
# Certain behavior is defined by the DataDictionary
# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
# Comments with "@datadictionary" are snippets of the definition
import base64
from cloudinit import log as logging
@ -43,10 +45,12 @@ SMARTOS_ATTRIB_MAP = {
'local-hostname': ('hostname', True),
'public-keys': ('root_authorized_keys', True),
'user-script': ('user-script', False),
'user-data': ('user-data', False),
'legacy-user-data': ('user-data', False),
'user-data': ('cloud-init:user-data', False),
'iptables_disable': ('iptables_disable', True),
'motd_sys_info': ('motd_sys_info', True),
'availability_zone': ('datacenter_name', True),
'availability_zone': ('sdc:datacenter_name', True),
'vendordata': ('sdc:operator-script', False),
}
DS_NAME = 'SmartOS'
@ -70,7 +74,11 @@ BUILTIN_DS_CONFIG = {
'seed_timeout': 60,
'no_base64_decode': ['root_authorized_keys',
'motd_sys_info',
'iptables_disable'],
'iptables_disable',
'user-data',
'user-script',
'sdc:datacenter_name',
],
'base64_keys': [],
'base64_all': False,
'disk_aliases': {'ephemeral0': '/dev/vdb'},
@ -87,6 +95,11 @@ BUILTIN_CLOUD_CONFIG = {
'device': 'ephemeral0'}],
}
# @datadictionary: this is legacy path for placing files from metadata
# per the SmartOS location. It is not preferable, but is done for
# legacy reasons
LEGACY_USER_D = "/var/db"
class DataSourceSmartOS(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
@ -106,6 +119,9 @@ class DataSourceSmartOS(sources.DataSource):
self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode')
self.b64_keys = self.ds_cfg.get('base64_keys')
self.b64_all = self.ds_cfg.get('base64_all')
self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
self.user_script_d = os.path.join(self.paths.get_cpath("scripts"),
'per-boot')
def __str__(self):
root = sources.DataSource.__str__(self)
@ -143,17 +159,36 @@ class DataSourceSmartOS(sources.DataSource):
smartos_noun, strip = attribute
md[ci_noun] = self.query(smartos_noun, strip=strip)
# @datadictionary: This key may contain a program that is written
# to a file in the filesystem of the guest on each boot and then
# executed. It may be of any format that would be considered
# executable in the guest instance.
u_script = md.get('user-script')
u_script_f = "%s/99_user_script" % self.user_script_d
u_script_l = "%s/user-script" % LEGACY_USER_D
write_boot_content(u_script, u_script_f, link=u_script_l, shebang=True,
mode=0700)
# @datadictionary: This key has no defined format, but its value
# is written to the file /var/db/mdata-user-data on each boot prior
# to the phase that runs user-script. This file is not to be executed.
# This allows a configuration file of some kind to be injected into
# the machine to be consumed by the user-script when it runs.
u_data = md.get('legacy-user-data')
u_data_f = "%s/mdata-user-data" % LEGACY_USER_D
write_boot_content(u_data, u_data_f)
# Handle the cloud-init regular meta
if not md['local-hostname']:
md['local-hostname'] = system_uuid
ud = None
if md['user-data']:
ud = md['user-data']
elif md['user-script']:
ud = md['user-script']
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
self.vendordata_raw = md['vendordata']
return True
def device_name_to_device(self, name):
@ -277,6 +312,62 @@ def dmi_data():
return (sys_uuid.lower().strip(), sys_type.strip())
def write_boot_content(content, content_f, link=None, shebang=False,
mode=0400):
"""
Write the content to content_f. Under the following rules:
1. If no content, remove the file
2. Write the content
3. If executable and no file magic, add it
4. If there is a link, create it
@param content: what to write
@param content_f: the file name
@param backup_d: the directory to save the backup at
@param link: if defined, location to create a symlink to
@param shebang: if no file magic, set shebang
@param mode: file mode
Becuase of the way that Cloud-init executes scripts (no shell),
a script will fail to execute if does not have a magic bit (shebang) set
for the file. If shebang=True, then the script will be checked for a magic
bit and to the SmartOS default of assuming that bash.
"""
if not content and os.path.exists(content_f):
os.unlink(content_f)
if link and os.path.islink(link):
os.unlink(link)
if not content:
return
util.write_file(content_f, content, mode=mode)
if shebang and not content.startswith("#!"):
try:
cmd = ["file", "--brief", "--mime-type", content_f]
(f_type, _err) = util.subp(cmd)
LOG.debug("script %s mime type is %s", content_f, f_type)
if f_type.strip() == "text/plain":
new_content = "\n".join(["#!/bin/bash", content])
util.write_file(content_f, new_content, mode=mode)
LOG.debug("added shebang to file %s", content_f)
except Exception as e:
util.logexc(LOG, ("Failed to identify script type for %s" %
content_f, e))
if link:
try:
if os.path.islink(link):
os.unlink(link)
if content and os.path.exists(content_f):
util.ensure_dir(os.path.dirname(link))
os.symlink(content_f, link)
except IOError as e:
util.logexc(LOG, "failed establishing content link", e)
# Used to match classes to dependencies
datasources = [
(DataSourceSmartOS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),

View File

@ -53,6 +53,8 @@ class DataSource(object):
self.userdata = None
self.metadata = None
self.userdata_raw = None
self.vendordata = None
self.vendordata_raw = None
# find the datasource config name.
# remove 'DataSource' from classname on front, and remove 'Net' on end.
@ -77,9 +79,14 @@ class DataSource(object):
if self.userdata is None:
self.userdata = self.ud_proc.process(self.get_userdata_raw())
if apply_filter:
return self._filter_userdata(self.userdata)
return self._filter_xdata(self.userdata)
return self.userdata
def get_vendordata(self):
if self.vendordata is None:
self.vendordata = self.ud_proc.process(self.get_vendordata_raw())
return self.vendordata
@property
def launch_index(self):
if not self.metadata:
@ -88,7 +95,7 @@ class DataSource(object):
return self.metadata['launch-index']
return None
def _filter_userdata(self, processed_ud):
def _filter_xdata(self, processed_ud):
filters = [
launch_index.Filter(util.safe_int(self.launch_index)),
]
@ -104,6 +111,9 @@ class DataSource(object):
def get_userdata_raw(self):
return self.userdata_raw
def get_vendordata_raw(self):
return self.vendordata_raw
# the data sources' config_obj is a cloud-config formated
# object that came to it from ways other than cloud-config
# because cloud-config content would be handled elsewhere
@ -119,7 +129,7 @@ class DataSource(object):
# when the kernel named them 'vda' or 'xvda'
# we want to return the correct value for what will actually
# exist in this instance
mappings = {"sd": ("vd", "xvd")}
mappings = {"sd": ("vd", "xvd", "vtb")}
for (nfrom, tlist) in mappings.iteritems():
if not short_name.startswith(nfrom):
continue

View File

@ -123,6 +123,7 @@ class Init(object):
os.path.join(c_dir, 'scripts', 'per-instance'),
os.path.join(c_dir, 'scripts', 'per-once'),
os.path.join(c_dir, 'scripts', 'per-boot'),
os.path.join(c_dir, 'scripts', 'vendor'),
os.path.join(c_dir, 'seed'),
os.path.join(c_dir, 'instances'),
os.path.join(c_dir, 'handlers'),
@ -319,6 +320,7 @@ class Init(object):
if not self._write_to_cache():
return
self._store_userdata()
self._store_vendordata()
def _store_userdata(self):
raw_ud = "%s" % (self.datasource.get_userdata_raw())
@ -326,11 +328,20 @@ class Init(object):
processed_ud = "%s" % (self.datasource.get_userdata())
util.write_file(self._get_ipath('userdata'), processed_ud, 0600)
def _default_userdata_handlers(self):
opts = {
def _store_vendordata(self):
raw_vd = "%s" % (self.datasource.get_vendordata_raw())
util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0600)
processed_vd = "%s" % (self.datasource.get_vendordata())
util.write_file(self._get_ipath('vendordata'), processed_vd, 0600)
def _default_handlers(self, opts=None):
if opts is None:
opts = {}
opts.update({
'paths': self.paths,
'datasource': self.datasource,
}
})
# TODO(harlowja) Hmmm, should we dynamically import these??
def_handlers = [
cc_part.CloudConfigPartHandler(**opts),
@ -340,7 +351,23 @@ class Init(object):
]
return def_handlers
def consume_userdata(self, frequency=PER_INSTANCE):
def _default_userdata_handlers(self):
return self._default_handlers()
def _default_vendordata_handlers(self):
return self._default_handlers(
opts={'script_path': 'vendor_scripts',
'cloud_config_path': 'vendor_cloud_config'})
def _do_handlers(self, data_msg, c_handlers_list, frequency,
excluded=None):
"""
Generalized handlers suitable for use with either vendordata
or userdata
"""
if excluded is None:
excluded = []
cdir = self.paths.get_cpath("handlers")
idir = self._get_ipath("handlers")
@ -352,12 +379,6 @@ class Init(object):
if d and d not in sys.path:
sys.path.insert(0, d)
# Ensure datasource fetched before activation (just incase)
user_data_msg = self.datasource.get_userdata(True)
# This keeps track of all the active handlers
c_handlers = helpers.ContentHandlers()
def register_handlers_in_dir(path):
# Attempts to register any handler modules under the given path.
if not path or not os.path.isdir(path):
@ -382,13 +403,16 @@ class Init(object):
util.logexc(LOG, "Failed to register handler from %s",
fname)
# This keeps track of all the active handlers
c_handlers = helpers.ContentHandlers()
# Add any handlers in the cloud-dir
register_handlers_in_dir(cdir)
# Register any other handlers that come from the default set. This
# is done after the cloud-dir handlers so that the cdir modules can
# take over the default user-data handler content-types.
for mod in self._default_userdata_handlers():
for mod in c_handlers_list:
types = c_handlers.register(mod, overwrite=False)
if types:
LOG.debug("Added default handler for %s from %s", types, mod)
@ -406,7 +430,7 @@ class Init(object):
handlers.call_begin(mod, data, frequency)
c_handlers.initialized.append(mod)
def walk_handlers():
def walk_handlers(excluded):
# Walk the user data
part_data = {
'handlers': c_handlers,
@ -419,9 +443,9 @@ class Init(object):
# to help write there contents to files with numbered
# names...
'handlercount': 0,
'excluded': excluded,
}
handlers.walk(user_data_msg, handlers.walker_callback,
data=part_data)
handlers.walk(data_msg, handlers.walker_callback, data=part_data)
def finalize_handlers():
# Give callbacks opportunity to finalize
@ -438,10 +462,16 @@ class Init(object):
try:
init_handlers()
walk_handlers()
walk_handlers(excluded)
finally:
finalize_handlers()
def consume_data(self, frequency=PER_INSTANCE):
# Consume the userdata first, because we need want to let the part
# handlers run first (for merging stuff)
self._consume_userdata(frequency)
self._consume_vendordata(frequency)
# Perform post-consumption adjustments so that
# modules that run during the init stage reflect
# this consumed set.
@ -453,6 +483,64 @@ class Init(object):
# objects before the load of the userdata happened,
# this is expected.
def _consume_vendordata(self, frequency=PER_INSTANCE):
"""
Consume the vendordata and run the part handlers on it
"""
# User-data should have been consumed first.
# So we merge the other available cloud-configs (everything except
# vendor provided), and check whether or not we should consume
# vendor data at all. That gives user or system a chance to override.
if not self.datasource.get_vendordata_raw():
LOG.debug("no vendordata from datasource")
return
_cc_merger = helpers.ConfigMerger(paths=self._paths,
datasource=self.datasource,
additional_fns=[],
base_cfg=self.cfg,
include_vendor=False)
vdcfg = _cc_merger.cfg.get('vendor_data', {})
if not isinstance(vdcfg, dict):
vdcfg = {'enabled': False}
LOG.warn("invalid 'vendor_data' setting. resetting to: %s", vdcfg)
enabled = vdcfg.get('enabled')
no_handlers = vdcfg.get('disabled_handlers', None)
if not util.is_true(enabled):
LOG.debug("vendordata consumption is disabled.")
return
LOG.debug("vendor data will be consumed. disabled_handlers=%s",
no_handlers)
# Ensure vendordata source fetched before activation (just incase)
vendor_data_msg = self.datasource.get_vendordata()
# This keeps track of all the active handlers, while excluding what the
# users doesn't want run, i.e. boot_hook, cloud_config, shell_script
c_handlers_list = self._default_vendordata_handlers()
# Run the handlers
self._do_handlers(vendor_data_msg, c_handlers_list, frequency,
excluded=no_handlers)
def _consume_userdata(self, frequency=PER_INSTANCE):
"""
Consume the userdata and run the part handlers
"""
# Ensure datasource fetched before activation (just incase)
user_data_msg = self.datasource.get_userdata(True)
# This keeps track of all the active handlers
c_handlers_list = self._default_handlers()
# Run the handlers
self._do_handlers(user_data_msg, c_handlers_list, frequency)
class Modules(object):
def __init__(self, init, cfg_files=None):

View File

@ -20,6 +20,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import httplib
import time
import requests
@ -32,6 +33,8 @@ from cloudinit import version
LOG = logging.getLogger(__name__)
NOT_FOUND = httplib.NOT_FOUND
# Check if requests has ssl support (added in requests >= 0.8.8)
SSL_ENABLED = False
CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0)
@ -58,6 +61,31 @@ def _cleanurl(url):
return urlunparse(parsed_url)
# Made to have same accessors as UrlResponse so that the
# read_file_or_url can return this or that object and the
# 'user' of those objects will not need to know the difference.
class StringResponse(object):
def __init__(self, contents, code=200):
self.code = code
self.headers = {}
self.contents = contents
self.url = None
def ok(self, *args, **kwargs): # pylint: disable=W0613
if self.code != 200:
return False
return True
def __str__(self):
return self.contents
class FileResponse(StringResponse):
def __init__(self, path, contents, code=200):
StringResponse.__init__(self, contents, code=code)
self.url = path
class UrlResponse(object):
def __init__(self, response):
self._response = response
@ -103,7 +131,7 @@ class UrlError(IOError):
def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
headers=None, headers_cb=None, ssl_details=None,
check_status=True, allow_redirects=True):
check_status=True, allow_redirects=True, exception_cb=None):
url = _cleanurl(url)
req_args = {
'url': url,
@ -163,14 +191,13 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
# Handle retrying ourselves since the built-in support
# doesn't handle sleeping between tries...
for i in range(0, manual_tries):
try:
req_args['headers'] = headers_cb(url)
filtered_req_args = {}
for (k, v) in req_args.items():
if k == 'data':
continue
filtered_req_args[k] = v
try:
LOG.debug("[%s/%s] open '%s' with %s configuration", i,
manual_tries, url, filtered_req_args)
@ -196,6 +223,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
# ssl exceptions are not going to get fixed by waiting a
# few seconds
break
if exception_cb and not exception_cb(filtered_req_args, excps[-1]):
break
if i + 1 < manual_tries and sec_between > 0:
LOG.debug("Please wait %s seconds while we wait to try again",
sec_between)

View File

@ -88,6 +88,10 @@ class UserDataProcessor(object):
def process(self, blob):
accumulating_msg = MIMEMultipart()
if isinstance(blob, list):
for b in blob:
self._process_msg(convert_string(b), accumulating_msg)
else:
self._process_msg(convert_string(blob), accumulating_msg)
return accumulating_msg
@ -307,7 +311,8 @@ class UserDataProcessor(object):
def _attach_part(self, outer_msg, part):
"""
Attach a message to an outer message. outermsg must be a MIMEMultipart.
Modifies a header in the outer message to keep track of number of attachments.
Modifies a header in the outer message to keep track of number of
attachments.
"""
part_count = self._multi_part_count(outer_msg)
self._process_before_attach(part, part_count + 1)

View File

@ -26,6 +26,7 @@ from StringIO import StringIO
import contextlib
import copy as obj_copy
import ctypes
import errno
import glob
import grp
@ -36,6 +37,7 @@ import os.path
import platform
import pwd
import random
import re
import shutil
import socket
import stat
@ -72,31 +74,6 @@ FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
CONTAINER_TESTS = ['running-in-container', 'lxc-is-container']
# Made to have same accessors as UrlResponse so that the
# read_file_or_url can return this or that object and the
# 'user' of those objects will not need to know the difference.
class StringResponse(object):
def __init__(self, contents, code=200):
self.code = code
self.headers = {}
self.contents = contents
self.url = None
def ok(self, *args, **kwargs): # pylint: disable=W0613
if self.code != 200:
return False
return True
def __str__(self):
return self.contents
class FileResponse(StringResponse):
def __init__(self, path, contents, code=200):
StringResponse.__init__(self, contents, code=code)
self.url = path
class ProcessExecutionError(IOError):
MESSAGE_TMPL = ('%(description)s\n'
@ -392,11 +369,11 @@ def is_ipv4(instr):
return False
try:
toks = [x for x in toks if (int(x) < 256 and int(x) > 0)]
toks = [x for x in toks if int(x) < 256 and int(x) >= 0]
except:
return False
return (len(toks) == 4)
return len(toks) == 4
def get_cfg_option_bool(yobj, key, default=False):
@ -608,18 +585,28 @@ def del_dir(path):
shutil.rmtree(path)
def runparts(dirp, skip_no_exist=True):
def runparts(dirp, skip_no_exist=True, exe_prefix=None):
if skip_no_exist and not os.path.isdir(dirp):
return
failed = []
attempted = []
if exe_prefix is None:
prefix = []
elif isinstance(exe_prefix, str):
prefix = [str(exe_prefix)]
elif isinstance(exe_prefix, list):
prefix = exe_prefix
else:
raise TypeError("exe_prefix must be None, str, or list")
for exe_name in sorted(os.listdir(dirp)):
exe_path = os.path.join(dirp, exe_name)
if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
attempted.append(exe_path)
try:
subp([exe_path], capture=False)
subp(prefix + [exe_path], capture=False)
except ProcessExecutionError as e:
logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code)
failed.append(e)
@ -639,8 +626,8 @@ def read_optional_seed(fill, base="", ext="", timeout=5):
fill['user-data'] = ud
fill['meta-data'] = md
return True
except IOError as e:
if e.errno == errno.ENOENT:
except url_helper.UrlError as e:
if e.code == url_helper.NOT_FOUND:
return False
raise
@ -679,7 +666,7 @@ def fetch_ssl_details(paths=None):
def read_file_or_url(url, timeout=5, retries=10,
headers=None, data=None, sec_between=1, ssl_details=None,
headers_cb=None):
headers_cb=None, exception_cb=None):
url = url.lstrip()
if url.startswith("/"):
url = "file://%s" % url
@ -687,7 +674,14 @@ def read_file_or_url(url, timeout=5, retries=10,
if data:
LOG.warn("Unable to post data to file resource %s", url)
file_path = url[len("file://"):]
return FileResponse(file_path, contents=load_file(file_path))
try:
contents = load_file(file_path)
except IOError as e:
code = e.errno
if e.errno == errno.ENOENT:
code = url_helper.NOT_FOUND
raise url_helper.UrlError(cause=e, code=code, headers=None)
return url_helper.FileResponse(file_path, contents=contents)
else:
return url_helper.readurl(url,
timeout=timeout,
@ -696,7 +690,8 @@ def read_file_or_url(url, timeout=5, retries=10,
headers_cb=headers_cb,
data=data,
sec_between=sec_between,
ssl_details=ssl_details)
ssl_details=ssl_details,
exception_cb=exception_cb)
def load_yaml(blob, default=None, allowed=(dict,)):
@ -950,7 +945,7 @@ def is_resolvable(name):
pass
_DNS_REDIRECT_IP = badips
if badresults:
LOG.debug("detected dns redirection: %s" % badresults)
LOG.debug("detected dns redirection: %s", badresults)
try:
result = socket.getaddrinfo(name, None)
@ -977,7 +972,7 @@ def gethostbyaddr(ip):
def is_resolvable_url(url):
"""determine if this url is resolvable (existing or ip)."""
return (is_resolvable(urlparse.urlparse(url).hostname))
return is_resolvable(urlparse.urlparse(url).hostname)
def search_for_mirror(candidates):
@ -1302,11 +1297,26 @@ def mounts():
mounted = {}
try:
# Go through mounts to see what is already mounted
if os.path.exists("/proc/mounts"):
mount_locs = load_file("/proc/mounts").splitlines()
method = 'proc'
else:
(mountoutput, _err) = subp("mount")
mount_locs = mountoutput.splitlines()
method = 'mount'
mountre = r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$'
for mpline in mount_locs:
# Format at: man fstab
# Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered)
# FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates)
try:
if method == 'proc':
(dev, mp, fstype, opts, _freq, _passno) = mpline.split()
else:
m = re.search(mountre, mpline)
dev = m.group(1)
mp = m.group(2)
fstype = m.group(3)
opts = m.group(4)
except:
continue
# If the name of the mount point contains spaces these
@ -1317,9 +1327,9 @@ def mounts():
'mountpoint': mp,
'opts': opts,
}
LOG.debug("Fetched %s mounts from %s", mounted, "/proc/mounts")
LOG.debug("Fetched %s mounts from %s", mounted, method)
except (IOError, OSError):
logexc(LOG, "Failed fetching mount points from /proc/mounts")
logexc(LOG, "Failed fetching mount points")
return mounted
@ -1376,7 +1386,7 @@ def get_builtin_cfg():
def sym_link(source, link):
LOG.debug("Creating symbolic link from %r => %r" % (link, source))
LOG.debug("Creating symbolic link from %r => %r", link, source)
os.symlink(source, link)
@ -1404,12 +1414,27 @@ def time_rfc2822():
def uptime():
uptime_str = '??'
method = 'unknown'
try:
if os.path.exists("/proc/uptime"):
method = '/proc/uptime'
contents = load_file("/proc/uptime").strip()
if contents:
uptime_str = contents.split()[0]
else:
method = 'ctypes'
libc = ctypes.CDLL('/lib/libc.so.7')
size = ctypes.c_size_t()
buf = ctypes.c_int()
size.value = ctypes.sizeof(buf)
libc.sysctlbyname("kern.boottime", ctypes.byref(buf),
ctypes.byref(size), None, 0)
now = time.time()
bootup = buf.value
uptime_str = now - bootup
except:
logexc(LOG, "Unable to read uptime from /proc/uptime")
logexc(LOG, "Unable to read uptime using method: %s" % method)
return uptime_str
@ -1748,6 +1773,19 @@ def parse_mtab(path):
return None
def parse_mount(path):
(mountoutput, _err) = subp("mount")
mount_locs = mountoutput.splitlines()
for line in mount_locs:
m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line)
devpth = m.group(1)
mount_point = m.group(2)
fs_type = m.group(3)
if mount_point == path:
return devpth, fs_type, mount_point
return None
def get_mount_info(path, log=LOG):
# Use /proc/$$/mountinfo to find the device where path is mounted.
# This is done because with a btrfs filesystem using os.stat(path)
@ -1781,8 +1819,10 @@ def get_mount_info(path, log=LOG):
if os.path.exists(mountinfo_path):
lines = load_file(mountinfo_path).splitlines()
return parse_mount_info(path, lines, log)
else:
elif os.path.exists("/etc/mtab"):
return parse_mtab(path)
else:
return parse_mount(path)
def which(program):
@ -1795,7 +1835,7 @@ def which(program):
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
for path in os.environ.get("PATH", "").split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
@ -1849,3 +1889,28 @@ def expand_dotted_devname(dotted):
return toks
else:
return (dotted, None)
def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
# return a dictionary populated with keys in 'required' and 'optional'
# by reading files in prefix + delim + entry
if required is None:
required = []
if optional is None:
optional = []
missing = []
ret = {}
for f in required + optional:
try:
ret[f] = load_file(base + delim + f, quiet=False)
except IOError as e:
if e.errno != errno.ENOENT:
raise
if f in required:
missing.append(f)
if len(missing):
raise ValueError("Missing required files: %s", ','.join(missing))
return ret

View File

@ -64,6 +64,7 @@ cloud_config_modules:
# The modules that run in the 'final' stage
cloud_final_modules:
- rightscale_userdata
- scripts-vendor
- scripts-per-once
- scripts-per-boot
- scripts-per-instance

View File

@ -5,12 +5,10 @@
#
# mode:
# values:
# * auto: use any option possible (growpart or parted)
# * auto: use any option possible (any available)
# if none are available, do not warn, but debug.
# * growpart: use growpart to grow partitions
# if growpart is not available, this is an error.
# * parted: use parted (parted resizepart) to resize partitions
# if parted is not available, this is an error.
# * off, false
#
# devices:

View File

@ -6,6 +6,9 @@
#
# Note: 'tags' should be specified as a comma delimited string
# rather than a list.
#
# You can get example key/values by running 'landscape-config',
# answer question, then look at /etc/landscape/client.config
landscape:
client:
url: "https://landscape.canonical.com/message-system"
@ -13,3 +16,7 @@ landscape:
data_path: "/var/lib/landscape/client"
http_proxy: "http://my.proxy.com/foobar"
tags: "server,cloud"
computer_title = footitle
https_proxy = fooproxy
registration_key = fookey
account_name = fooaccount

View File

@ -0,0 +1,16 @@
#cloud-config
#
# This explains how to control vendordata via a cloud-config
#
# On select Datasources, vendors have a channel for the consumptions
# of all support user-data types via a special channel called
# vendordata. Users of the end system are given ultimate control.
#
vendor_data:
enabled: True
prefix: /usr/bin/ltrace
# enabled: whether it is enabled or not
# prefix: the command to run before any vendor scripts.
# Note: this is a fairly weak method of containment. It should
# be used to profile a script, not to prevent its run

View File

@ -1,4 +1,5 @@
import sys, os
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the

View File

@ -130,10 +130,6 @@ To see which versions are supported from your cloud provider use the following U
...
latest
**Note:** internally in cloudinit the `boto`_ library used to fetch the instance
userdata and instance metadata, feel free to check that library out, it provides
many other useful EC2 functionality.
---------------------------
Config Drive
---------------------------

View File

@ -16,11 +16,35 @@ responds with the status and if "SUCCESS" returns until a single ".\n".
New versions of the SmartOS tooling will include support for base64 encoded data.
Userdata
--------
Meta-data channels
------------------
In SmartOS parlance, user-data is a actually meta-data. This userdata can be
provided as key-value pairs.
Cloud-init supports three modes of delivering user/meta-data via the flexible
channels of SmartOS.
* user-data is written to /var/db/user-data
- per the spec, user-data is for consumption by the end-user, not provisioning
tools
- cloud-init entirely ignores this channel other than writting it to disk
- removal of the meta-data key means that /var/db/user-data gets removed
- a backup of previous meta-data is maintained as /var/db/user-data.<timestamp>
- <timestamp> is the epoch time when cloud-init ran
* user-script is written to /var/lib/cloud/scripts/per-boot/99_user_data
- this is executed each boot
- a link is created to /var/db/user-script
- previous versions of the user-script is written to
/var/lib/cloud/scripts/per-boot.backup/99_user_script.<timestamp>.
- <timestamp> is the epoch time when cloud-init ran.
- when the 'user-script' meta-data key goes missing, the user-script is
removed from the file system, although a backup is maintained.
- if the script is not shebanged (i.e. starts with #!<executable>), then
or is not an executable, cloud-init will add a shebang of "#!/bin/bash"
* cloud-init:user-data is treated like on other Clouds.
- this channel is used for delivering _all_ cloud-init instructions
- scripts delivered over this channel must be well formed (i.e. must have
a shebang)
Cloud-init supports reading the traditional meta-data fields supported by the
SmartOS tools. These are:
@ -32,19 +56,49 @@ SmartOS tools. These are:
Note: At this time iptables_disable and enable_motd_sys_info are read but
are not actioned.
user-script
-----------
disabling user-script
---------------------
SmartOS traditionally supports sending over a user-script for execution at the
rc.local level. Cloud-init supports running user-scripts as if they were
cloud-init user-data. In this sense, anything with a shell interpreter
directive will run.
Cloud-init uses the per-boot script functionality to handle the execution
of the user-script. If you want to prevent this use a cloud-config of:
user-data and user-script
-------------------------
#cloud-config
cloud_final_modules:
- scripts-per-once
- scripts-per-instance
- scripts-user
- ssh-authkey-fingerprints
- keys-to-console
- phone-home
- final-message
- power-state-change
In the event that a user defines the meta-data key of "user-data" it will
always supersede any user-script data. This is for consistency.
Alternatively you can use the json patch method
#cloud-config-jsonp
[
{ "op": "replace",
"path": "/cloud_final_modules",
"value": ["scripts-per-once",
"scripts-per-instance",
"scripts-user",
"ssh-authkey-fingerprints",
"keys-to-console",
"phone-home",
"final-message",
"power-state-change"]
}
]
The default cloud-config includes "script-per-boot". Cloud-init will still
ingest and write the user-data but will not execute it, when you disable
the per-boot script handling.
Note: Unless you have an explicit use-case, it is recommended that you not
disable the per-boot script execution, especially if you are using
any of the life-cycle management features of SmartOS.
The cloud-config needs to be delivered over the cloud-init:user-data channel
in order for cloud-init to ingest it.
base64
------
@ -54,6 +108,8 @@ are provided by SmartOS:
* root_authorized_keys
* enable_motd_sys_info
* iptables_disable
* user-data
* user-script
This list can be changed through system config of variable 'no_base64_decode'.

53
doc/vendordata.txt Normal file
View File

@ -0,0 +1,53 @@
=== Overview ===
Vendordata is data provided by the entity that launches an instance
(for example, the cloud provider). This data can be used to
customize the image to fit into the particular environment it is
being run in.
Vendordata follows the same rules as user-data, with the following
caveats:
1. Users have ultimate control over vendordata. They can disable its
execution or disable handling of specific parts of multipart input.
2. By default it only runs on first boot
3. Vendordata can be disabled by the user. If the use of vendordata is
required for the instance to run, then vendordata should not be
used.
4. user supplied cloud-config is merged over cloud-config from
vendordata.
Users providing cloud-config data can use the '#cloud-config-jsonp' method
to more finely control their modifications to the vendor supplied
cloud-config. For example, if both vendor and user have provided
'runcnmd' then the default merge handler will cause the user's runcmd to
override the one provided by the vendor. To append to 'runcmd', the user
could better provide multipart input with a cloud-config-jsonp part like:
#cloud-config-jsonp
[{ "op": "add", "path": "/runcmd", "value": ["my", "command", "here"]}]
Further, we strongly advise vendors to not 'be evil'. By evil, we
mean any action that could compromise a system. Since users trust
you, please take care to make sure that any vendordata is safe,
atomic, idempotent and does not put your users at risk.
=== Input Formats ===
cloud-init will download and cache to filesystem any vendor-data that it
finds. Vendordata is handled exactly like user-data. That means that
the vendor can supply multipart input and have those parts acted on
in the same way as user-data.
The only differences are:
* user-scripts are stored in a different location than user-scripts (to
avoid namespace collision)
* user can disable part handlers by cloud-config settings.
For example, to disable handling of 'part-handlers' in vendor-data,
the user could provide user-data like this:
#cloud-config
vendordata: {excluded: 'text/part-handler'}
=== Examples ===
There are examples in the examples subdirectory.
Additionally, the 'tools' directory contains 'write-mime-multipart',
which can be used to easily generate mime-multi-part files from a list
of input files. That data can then be given to an instance.
See 'write-mime-multipart --help' for usage.

View File

@ -29,7 +29,6 @@ import argparse
# file pypi package name to a debian/ubuntu package name.
PKG_MP = {
'argparse': 'python-argparse',
'boto': 'python-boto',
'cheetah': 'python-cheetah',
'configobj': 'python-configobj',
'jsonpatch': 'python-jsonpatch | python-json-patch',

View File

@ -36,7 +36,6 @@ from cloudinit import util
PKG_MP = {
'redhat': {
'argparse': 'python-argparse',
'boto': 'python-boto',
'cheetah': 'python-cheetah',
'configobj': 'python-configobj',
'jsonpatch': 'python-jsonpatch',
@ -48,7 +47,6 @@ PKG_MP = {
},
'suse': {
'argparse': 'python-argparse',
'boto': 'python-boto',
'cheetah': 'python-cheetah',
'configobj': 'python-configobj',
'jsonpatch': 'python-jsonpatch',

View File

@ -25,6 +25,7 @@ Depends: procps,
#end for
python-software-properties | software-properties-common,
\${misc:Depends},
Recommends: sudo
XB-Python-Version: \${python:Versions}
Description: Init scripts for cloud instances
Cloud instances need special scripts to run during initialisation

View File

@ -27,25 +27,3 @@ License: GPL-3
The complete text of the GPL version 3 can be seen in
/usr/share/common-licenses/GPL-3.
Files: cloudinit/boto_utils.py
Copyright: 2006,2007, Mitch Garnaat http://garnaat.org/
License: MIT
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish, dis-
tribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the fol-
lowing conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.

View File

@ -34,6 +34,7 @@ Requires: e2fsprogs
Requires: net-tools
Requires: procps
Requires: shadow-utils
Requires: sudo
# Install pypi 'dynamic' requirements
#for $r in $requires

View File

@ -43,6 +43,7 @@ Requires: iproute2
Requires: e2fsprogs
Requires: net-tools
Requires: procps
Requires: sudo
# Install pypi 'dynamic' requirements
#for $r in $requires

View File

@ -29,8 +29,5 @@ argparse
# Requests handles ssl correctly!
requests
# Boto for ec2
boto
# For patching pieces of cloud-config together
jsonpatch

34
sysvinit/freebsd/cloudconfig Executable file
View File

@ -0,0 +1,34 @@
#!/bin/sh
# PROVIDE: cloudconfig
# REQUIRE: cloudinit cloudinitlocal
# BEFORE: cloudfinal
. /etc/rc.subr
name="cloudconfig"
command="/usr/bin/cloud-init"
start_cmd="cloudconfig_start"
stop_cmd=":"
rcvar="cloudinit_enable"
start_precmd="cloudinit_override"
start_cmd="cloudconfig_start"
: ${cloudinit_config:="/etc/cloud/cloud.cfg"}
cloudinit_override()
{
# If there exist sysconfig/default variable override files use it...
if [ -f /etc/default/cloud-init ]; then
. /etc/default/cloud-init
fi
}
cloudconfig_start()
{
echo "${command} starting"
${command} ${cloudinit_config} modules --mode config
}
load_rc_config $name
run_rc_command "$1"

34
sysvinit/freebsd/cloudfinal Executable file
View File

@ -0,0 +1,34 @@
#!/bin/sh
# PROVIDE: cloudfinal
# REQUIRE: LOGIN cloudinit cloudconfig cloudinitlocal
# REQUIRE: cron mail sshd swaplate
. /etc/rc.subr
name="cloudfinal"
command="/usr/bin/cloud_init"
start_cmd="cloudfinal_start"
stop_cmd=":"
rcvar="cloudinit_enable"
start_precmd="cloudinit_override"
start_cmd="cloudfinal_start"
: ${cloudinit_config:="/etc/cloud/cloud.cfg"}
cloudinit_override()
{
# If there exist sysconfig/default variable override files use it...
if [ -f /etc/default/cloud-init ]; then
. /etc/default/cloud-init
fi
}
cloudfinal_start()
{
echo -n "${command} starting"
${command} ${cloudinit_config} modules --mode final
}
load_rc_config $name
run_rc_command "$1"

34
sysvinit/freebsd/cloudinit Executable file
View File

@ -0,0 +1,34 @@
#!/bin/sh
# PROVIDE: cloudinit
# REQUIRE: FILESYSTEMS NETWORKING cloudinitlocal
# BEFORE: cloudconfig cloudfinal
. /etc/rc.subr
name="cloudinit"
command="/usr/bin/cloud_init"
start_cmd="cloudinit_start"
stop_cmd=":"
rcvar="cloudinit_enable"
start_precmd="cloudinit_override"
start_cmd="cloudinit_start"
: ${cloudinit_config:="/etc/cloud/cloud.cfg"}
cloudinit_override()
{
# If there exist sysconfig/default variable override files use it...
if [ -f /etc/default/cloud-init ]; then
. /etc/default/cloud-init
fi
}
cloudinit_start()
{
echo -n "${command} starting"
${command} ${cloudinit_config} init
}
load_rc_config $name
run_rc_command "$1"

34
sysvinit/freebsd/cloudinitlocal Executable file
View File

@ -0,0 +1,34 @@
#!/bin/sh
# PROVIDE: cloudinitlocal
# REQUIRE: mountcritlocal
# BEFORE: NETWORKING FILESYSTEMS cloudinit cloudconfig cloudfinal
. /etc/rc.subr
name="cloudinitlocal"
command="/usr/bin/cloud-init"
start_cmd="cloudlocal_start"
stop_cmd=":"
rcvar="cloudinit_enable"
start_precmd="cloudinit_override"
start_cmd="cloudlocal_start"
: ${cloudinit_config:="/etc/cloud/cloud.cfg"}
cloudinit_override()
{
# If there exist sysconfig/default variable override files use it...
if [ -f /etc/default/cloud-init ]; then
. /etc/default/cloud-init
fi
}
cloudlocal_start()
{
echo -n "${command} starting"
${command} ${cloudinit_config} init --local
}
load_rc_config $name
run_rc_command "$1"

6
test-requirements.txt Normal file
View File

@ -0,0 +1,6 @@
httpretty>=0.7.1
mocker
nose
pep8
pyflakes
pylint

View File

@ -187,6 +187,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
def populate_dir(path, files):
if not os.path.exists(path):
os.makedirs(path)
for (name, content) in files.iteritems():
with open(os.path.join(path, name), "w") as fp:

View File

@ -196,7 +196,7 @@ class TestCmdlineUrl(MockerTestCase):
mock_readurl = self.mocker.replace(url_helper.readurl,
passthrough=False)
mock_readurl(url, ARGS, KWARGS)
self.mocker.result(util.StringResponse(payload))
self.mocker.result(url_helper.StringResponse(payload))
self.mocker.replay()
self.assertEqual((key, url, None),
@ -212,7 +212,7 @@ class TestCmdlineUrl(MockerTestCase):
mock_readurl = self.mocker.replace(url_helper.readurl,
passthrough=False)
mock_readurl(url, ARGS, KWARGS)
self.mocker.result(util.StringResponse(payload))
self.mocker.result(url_helper.StringResponse(payload))
self.mocker.replay()
self.assertEqual((key, url, payload),
@ -225,7 +225,7 @@ class TestCmdlineUrl(MockerTestCase):
cmdline = "ro %s=%s bar=1" % (key, url)
self.mocker.replace(url_helper.readurl, passthrough=False)
self.mocker.result(util.StringResponse(""))
self.mocker.result(url_helper.StringResponse(""))
self.mocker.replay()
self.assertEqual((None, None, None),

View File

@ -13,6 +13,7 @@ from email.mime.multipart import MIMEMultipart
from cloudinit import handlers
from cloudinit import helpers as c_helpers
from cloudinit import log
from cloudinit.settings import (PER_INSTANCE)
from cloudinit import sources
from cloudinit import stages
from cloudinit import util
@ -24,10 +25,11 @@ from tests.unittests import helpers
class FakeDataSource(sources.DataSource):
def __init__(self, userdata):
def __init__(self, userdata=None, vendordata=None):
sources.DataSource.__init__(self, {}, None, None)
self.metadata = {'instance-id': INSTANCE_ID}
self.userdata_raw = userdata
self.vendordata_raw = vendordata
# FIXME: these tests shouldn't be checking log output??
@ -45,6 +47,11 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
if self._log_handler and self._log:
self._log.removeHandler(self._log_handler)
def _patchIn(self, root):
self.restore()
self.patchOS(root)
self.patchUtils(root)
def capture_log(self, lvl=logging.DEBUG):
log_file = StringIO.StringIO()
self._log_handler = logging.StreamHandler(log_file)
@ -68,13 +75,89 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
self.patchUtils(new_root)
self.patchOS(new_root)
ci.fetch()
ci.consume_userdata()
ci.consume_data()
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
self.assertEquals(2, len(cc))
self.assertEquals('qux', cc['baz'])
self.assertEquals('qux2', cc['bar'])
def test_simple_jsonp_vendor_and_user(self):
# test that user-data wins over vendor
user_blob = '''
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
{ "op": "add", "path": "/bar", "value": "qux2" }
]
'''
vendor_blob = '''
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "quxA" },
{ "op": "add", "path": "/bar", "value": "quxB" },
{ "op": "add", "path": "/foo", "value": "quxC" }
]
'''
new_root = self.makeDir()
self._patchIn(new_root)
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
initer.read_cfg()
initer.initialize()
initer.fetch()
_iid = initer.instancify()
initer.update()
initer.cloudify().run('consume_data',
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
mods = stages.Modules(initer)
(_which_ran, _failures) = mods.run_section('cloud_init_modules')
cfg = mods.cfg
self.assertIn('vendor_data', cfg)
self.assertEquals('qux', cfg['baz'])
self.assertEquals('qux2', cfg['bar'])
self.assertEquals('quxC', cfg['foo'])
def test_simple_jsonp_no_vendor_consumed(self):
# make sure that vendor data is not consumed
user_blob = '''
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
{ "op": "add", "path": "/bar", "value": "qux2" },
{ "op": "add", "path": "/vendor_data", "value": {"enabled": "false"}}
]
'''
vendor_blob = '''
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "quxA" },
{ "op": "add", "path": "/bar", "value": "quxB" },
{ "op": "add", "path": "/foo", "value": "quxC" }
]
'''
new_root = self.makeDir()
self._patchIn(new_root)
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
initer.read_cfg()
initer.initialize()
initer.fetch()
_iid = initer.instancify()
initer.update()
initer.cloudify().run('consume_data',
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
mods = stages.Modules(initer)
(_which_ran, _failures) = mods.run_section('cloud_init_modules')
cfg = mods.cfg
self.assertEquals('qux', cfg['baz'])
self.assertEquals('qux2', cfg['bar'])
self.assertNotIn('foo', cfg)
def test_mixed_cloud_config(self):
blob_cc = '''
#cloud-config
@ -105,12 +188,87 @@ c: d
self.patchUtils(new_root)
self.patchOS(new_root)
ci.fetch()
ci.consume_userdata()
ci.consume_data()
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
self.assertEquals(1, len(cc))
self.assertEquals('c', cc['a'])
def test_vendor_user_yaml_cloud_config(self):
vendor_blob = '''
#cloud-config
a: b
name: vendor
run:
- x
- y
'''
user_blob = '''
#cloud-config
a: c
vendor_data:
enabled: True
prefix: /bin/true
name: user
run:
- z
'''
new_root = self.makeDir()
self._patchIn(new_root)
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
initer.read_cfg()
initer.initialize()
initer.fetch()
_iid = initer.instancify()
initer.update()
initer.cloudify().run('consume_data',
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
mods = stages.Modules(initer)
(_which_ran, _failures) = mods.run_section('cloud_init_modules')
cfg = mods.cfg
self.assertIn('vendor_data', cfg)
self.assertEquals('c', cfg['a'])
self.assertEquals('user', cfg['name'])
self.assertNotIn('x', cfg['run'])
self.assertNotIn('y', cfg['run'])
self.assertIn('z', cfg['run'])
def test_vendordata_script(self):
vendor_blob = '''
#!/bin/bash
echo "test"
'''
user_blob = '''
#cloud-config
vendor_data:
enabled: True
prefix: /bin/true
'''
new_root = self.makeDir()
self._patchIn(new_root)
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
initer.read_cfg()
initer.initialize()
initer.fetch()
_iid = initer.instancify()
initer.update()
initer.cloudify().run('consume_data',
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
mods = stages.Modules(initer)
(_which_ran, _failures) = mods.run_section('cloud_init_modules')
_cfg = mods.cfg
vendor_script = initer.paths.get_ipath_cur('vendor_scripts')
vendor_script_fns = "%s%s/part-001" % (new_root, vendor_script)
self.assertTrue(os.path.exists(vendor_script_fns))
def test_merging_cloud_config(self):
blob = '''
#cloud-config
@ -185,7 +343,7 @@ p: 1
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_userdata()
ci.consume_data()
self.assertIn(
"Unhandled non-multipart (text/x-not-multipart) userdata:",
log_file.getvalue())
@ -221,7 +379,7 @@ c: 4
self.patchUtils(new_root)
self.patchOS(new_root)
ci.fetch()
ci.consume_userdata()
ci.consume_data()
contents = util.load_file(ci.paths.get_ipath("cloud_config"))
contents = util.load_yaml(contents)
self.assertTrue(isinstance(contents, dict))
@ -244,7 +402,7 @@ c: 4
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_userdata()
ci.consume_data()
self.assertIn(
"Unhandled unknown content-type (text/plain)",
log_file.getvalue())
@ -264,7 +422,7 @@ c: 4
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_userdata()
ci.consume_data()
self.assertEqual("", log_file.getvalue())
def test_mime_text_x_shellscript(self):
@ -284,7 +442,7 @@ c: 4
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_userdata()
ci.consume_data()
self.assertEqual("", log_file.getvalue())
def test_mime_text_plain_shell(self):
@ -304,5 +462,5 @@ c: 4
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_userdata()
ci.consume_data()
self.assertEqual("", log_file.getvalue())

View File

@ -285,10 +285,11 @@ class TestConfigDriveDataSource(MockerTestCase):
self.assertEqual(["/dev/vdb", "/dev/zdd"],
ds.find_candidate_devs())
# verify that partitions are not considered
# verify that partitions are considered, that have correct label.
devs_with_answers = {"TYPE=vfat": ["/dev/sda1"],
"TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]}
self.assertEqual([], ds.find_candidate_devs())
self.assertEqual(["/dev/vdb3"],
ds.find_candidate_devs())
finally:
util.find_devs_with = orig_find_devs_with

View File

@ -119,9 +119,10 @@ class TestMAASDataSource(mocker.MockerTestCase):
mock_request(url, headers=None, timeout=mocker.ANY,
data=mocker.ANY, sec_between=mocker.ANY,
ssl_details=mocker.ANY, retries=mocker.ANY,
headers_cb=my_headers_cb)
headers_cb=my_headers_cb,
exception_cb=mocker.ANY)
resp = valid.get(key)
self.mocker.result(util.StringResponse(resp))
self.mocker.result(url_helper.StringResponse(resp))
self.mocker.replay()
(userdata, metadata) = DataSourceMAAS.read_maas_seed_url(my_seed,

View File

@ -97,6 +97,41 @@ class TestNoCloudDataSource(MockerTestCase):
self.assertEqual(dsrc.metadata.get('instance-id'), 'IID')
self.assertTrue(ret)
def test_nocloud_seed_with_vendordata(self):
md = {'instance-id': 'IID', 'dsmode': 'local'}
ud = "USER_DATA_HERE"
vd = "THIS IS MY VENDOR_DATA"
populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
{'user-data': ud, 'meta-data': yaml.safe_dump(md),
'vendor-data': vd})
sys_cfg = {
'datasource': {'NoCloud': {'fs_label': None}}
}
ds = DataSourceNoCloud.DataSourceNoCloud
dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertEqual(dsrc.userdata_raw, ud)
self.assertEqual(dsrc.metadata, md)
self.assertEqual(dsrc.vendordata, vd)
self.assertTrue(ret)
def test_nocloud_no_vendordata(self):
populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
{'user-data': "ud", 'meta-data': "instance-id: IID\n"})
sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
ds = DataSourceNoCloud.DataSourceNoCloud
dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertEqual(dsrc.userdata_raw, "ud")
self.assertFalse(dsrc.vendordata)
self.assertTrue(ret)
class TestParseCommandLineData(MockerTestCase):

View File

@ -258,6 +258,14 @@ iface eth0 inet static
''')
class TestParseShellConfig(MockerTestCase):
def test_no_seconds(self):
cfg = '\n'.join(["foo=bar", "SECONDS=2", "xx=foo"])
# we could test 'sleep 2', but that would make the test run slower.
ret = ds.parse_shell_config(cfg)
self.assertEqual(ret, {"foo": "bar", "xx": "foo"})
def populate_context_dir(path, variables):
data = "# Context variables generated by OpenNebula\n"
for (k, v) in variables.iteritems():

View File

@ -27,6 +27,10 @@ from cloudinit import helpers
from cloudinit.sources import DataSourceSmartOS
from mocker import MockerTestCase
import os
import os.path
import re
import stat
import uuid
MOCK_RETURNS = {
@ -35,7 +39,11 @@ MOCK_RETURNS = {
'disable_iptables_flag': None,
'enable_motd_sys_info': None,
'test-var1': 'some data',
'user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']),
'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']),
'sdc:datacenter_name': 'somewhere2',
'sdc:operator-script': '\n'.join(['bin/true', '']),
'user-data': '\n'.join(['something', '']),
'user-script': '\n'.join(['/bin/true', '']),
}
DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc')
@ -101,6 +109,7 @@ class TestSmartOSDataSource(MockerTestCase):
def setUp(self):
# makeDir comes from MockerTestCase
self.tmp = self.makeDir()
self.legacy_user_d = self.makeDir()
# patch cloud_dir, so our 'seed_dir' is guaranteed empty
self.paths = helpers.Paths({'cloud_dir': self.tmp})
@ -138,6 +147,7 @@ class TestSmartOSDataSource(MockerTestCase):
sys_cfg['datasource'] = sys_cfg.get('datasource', {})
sys_cfg['datasource']['SmartOS'] = ds_cfg
self.apply_patches([(mod, 'LEGACY_USER_D', self.legacy_user_d)])
self.apply_patches([(mod, 'get_serial', _get_serial)])
self.apply_patches([(mod, 'dmi_data', _dmi_data)])
dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None,
@ -194,7 +204,7 @@ class TestSmartOSDataSource(MockerTestCase):
# metadata provided base64_all of true
my_returns = MOCK_RETURNS.copy()
my_returns['base64_all'] = "true"
for k in ('hostname', 'user-data'):
for k in ('hostname', 'cloud-init:user-data'):
my_returns[k] = base64.b64encode(my_returns[k])
dsrc = self._get_ds(mockdata=my_returns)
@ -202,7 +212,7 @@ class TestSmartOSDataSource(MockerTestCase):
self.assertTrue(ret)
self.assertEquals(MOCK_RETURNS['hostname'],
dsrc.metadata['local-hostname'])
self.assertEquals(MOCK_RETURNS['user-data'],
self.assertEquals(MOCK_RETURNS['cloud-init:user-data'],
dsrc.userdata_raw)
self.assertEquals(MOCK_RETURNS['root_authorized_keys'],
dsrc.metadata['public-keys'])
@ -213,9 +223,9 @@ class TestSmartOSDataSource(MockerTestCase):
def test_b64_userdata(self):
my_returns = MOCK_RETURNS.copy()
my_returns['b64-user-data'] = "true"
my_returns['b64-cloud-init:user-data'] = "true"
my_returns['b64-hostname'] = "true"
for k in ('hostname', 'user-data'):
for k in ('hostname', 'cloud-init:user-data'):
my_returns[k] = base64.b64encode(my_returns[k])
dsrc = self._get_ds(mockdata=my_returns)
@ -223,7 +233,8 @@ class TestSmartOSDataSource(MockerTestCase):
self.assertTrue(ret)
self.assertEquals(MOCK_RETURNS['hostname'],
dsrc.metadata['local-hostname'])
self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw)
self.assertEquals(MOCK_RETURNS['cloud-init:user-data'],
dsrc.userdata_raw)
self.assertEquals(MOCK_RETURNS['root_authorized_keys'],
dsrc.metadata['public-keys'])
@ -238,13 +249,131 @@ class TestSmartOSDataSource(MockerTestCase):
self.assertTrue(ret)
self.assertEquals(MOCK_RETURNS['hostname'],
dsrc.metadata['local-hostname'])
self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw)
self.assertEquals(MOCK_RETURNS['cloud-init:user-data'],
dsrc.userdata_raw)
def test_userdata(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw)
self.assertEquals(MOCK_RETURNS['user-data'],
dsrc.metadata['legacy-user-data'])
self.assertEquals(MOCK_RETURNS['cloud-init:user-data'],
dsrc.userdata_raw)
def test_sdc_scripts(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEquals(MOCK_RETURNS['user-script'],
dsrc.metadata['user-script'])
legacy_script_f = "%s/user-script" % self.legacy_user_d
self.assertTrue(os.path.exists(legacy_script_f))
self.assertTrue(os.path.islink(legacy_script_f))
user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
self.assertEquals(user_script_perm, '700')
def test_scripts_shebanged(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEquals(MOCK_RETURNS['user-script'],
dsrc.metadata['user-script'])
legacy_script_f = "%s/user-script" % self.legacy_user_d
self.assertTrue(os.path.exists(legacy_script_f))
self.assertTrue(os.path.islink(legacy_script_f))
shebang = None
with open(legacy_script_f, 'r') as f:
shebang = f.readlines()[0].strip()
self.assertEquals(shebang, "#!/bin/bash")
user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
self.assertEquals(user_script_perm, '700')
def test_scripts_shebang_not_added(self):
"""
Test that the SmartOS requirement that plain text scripts
are executable. This test makes sure that plain texts scripts
with out file magic have it added appropriately by cloud-init.
"""
my_returns = MOCK_RETURNS.copy()
my_returns['user-script'] = '\n'.join(['#!/usr/bin/perl',
'print("hi")', ''])
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEquals(my_returns['user-script'],
dsrc.metadata['user-script'])
legacy_script_f = "%s/user-script" % self.legacy_user_d
self.assertTrue(os.path.exists(legacy_script_f))
self.assertTrue(os.path.islink(legacy_script_f))
shebang = None
with open(legacy_script_f, 'r') as f:
shebang = f.readlines()[0].strip()
self.assertEquals(shebang, "#!/usr/bin/perl")
def test_scripts_removed(self):
"""
Since SmartOS requires that the user script is fetched
each boot, we want to make sure that the information
is backed-up for user-review later.
This tests the behavior of when a script is removed. It makes
sure that a) the previous script is backed-up; and 2) that
there is no script remaining.
"""
script_d = os.path.join(self.tmp, "scripts", "per-boot")
os.makedirs(script_d)
test_script_f = "%s/99_user_script" % script_d
with open(test_script_f, 'w') as f:
f.write("TEST DATA")
my_returns = MOCK_RETURNS.copy()
del my_returns['user-script']
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertFalse(dsrc.metadata['user-script'])
self.assertFalse(os.path.exists(test_script_f))
def test_userdata_removed(self):
"""
User-data in the SmartOS world is supposed to be written to a file
each and every boot. This tests to make sure that in the event the
legacy user-data is removed, the existing user-data is backed-up and
there is no /var/db/user-data left.
"""
user_data_f = "%s/mdata-user-data" % self.legacy_user_d
with open(user_data_f, 'w') as f:
f.write("PREVIOUS")
my_returns = MOCK_RETURNS.copy()
del my_returns['user-data']
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertFalse(dsrc.metadata.get('legacy-user-data'))
found_new = False
for root, _dirs, files in os.walk(self.legacy_user_d):
for name in files:
name_f = os.path.join(root, name)
permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:]
if re.match(r'.*\/mdata-user-data$', name_f):
found_new = True
print name_f
self.assertEquals(permissions, '400')
self.assertFalse(found_new)
def test_disable_iptables_flag(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)

View File

@ -0,0 +1,138 @@
from tests.unittests import helpers
from cloudinit import ec2_utils as eu
import httpretty as hp
class TestEc2Util(helpers.TestCase):
VERSION = 'latest'
@hp.activate
def test_userdata_fetch(self):
hp.register_uri(hp.GET,
'http://169.254.169.254/%s/user-data' % (self.VERSION),
body='stuff',
status=200)
userdata = eu.get_instance_userdata(self.VERSION)
self.assertEquals('stuff', userdata)
@hp.activate
def test_userdata_fetch_fail_not_found(self):
hp.register_uri(hp.GET,
'http://169.254.169.254/%s/user-data' % (self.VERSION),
status=404)
userdata = eu.get_instance_userdata(self.VERSION, retries=0)
self.assertEquals('', userdata)
@hp.activate
def test_userdata_fetch_fail_server_dead(self):
hp.register_uri(hp.GET,
'http://169.254.169.254/%s/user-data' % (self.VERSION),
status=500)
userdata = eu.get_instance_userdata(self.VERSION, retries=0)
self.assertEquals('', userdata)
@hp.activate
def test_userdata_fetch_fail_server_not_found(self):
hp.register_uri(hp.GET,
'http://169.254.169.254/%s/user-data' % (self.VERSION),
status=404)
userdata = eu.get_instance_userdata(self.VERSION)
self.assertEquals('', userdata)
@hp.activate
def test_metadata_fetch_no_keys(self):
base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
hp.register_uri(hp.GET, base_url, status=200,
body="\n".join(['hostname',
'instance-id',
'ami-launch-index']))
hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
status=200, body='ec2.fake.host.name.com')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
status=200, body='123')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'ami-launch-index'),
status=200, body='1')
md = eu.get_instance_metadata(self.VERSION, retries=0)
self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
self.assertEquals(md['instance-id'], '123')
self.assertEquals(md['ami-launch-index'], '1')
@hp.activate
def test_metadata_fetch_key(self):
base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
hp.register_uri(hp.GET, base_url, status=200,
body="\n".join(['hostname',
'instance-id',
'public-keys/']))
hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
status=200, body='ec2.fake.host.name.com')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
status=200, body='123')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'),
status=200, body='0=my-public-key')
hp.register_uri(hp.GET,
eu.combine_url(base_url, 'public-keys/0/openssh-key'),
status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
self.assertEquals(md['instance-id'], '123')
self.assertEquals(1, len(md['public-keys']))
@hp.activate
def test_metadata_fetch_with_2_keys(self):
base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
hp.register_uri(hp.GET, base_url, status=200,
body="\n".join(['hostname',
'instance-id',
'public-keys/']))
hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
status=200, body='ec2.fake.host.name.com')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
status=200, body='123')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'public-keys/'),
status=200,
body="\n".join(['0=my-public-key', '1=my-other-key']))
hp.register_uri(hp.GET,
eu.combine_url(base_url, 'public-keys/0/openssh-key'),
status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
hp.register_uri(hp.GET,
eu.combine_url(base_url, 'public-keys/1/openssh-key'),
status=200, body='ssh-rsa AAAA.....wZEf my-other-key')
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
self.assertEquals(md['instance-id'], '123')
self.assertEquals(2, len(md['public-keys']))
@hp.activate
def test_metadata_fetch_bdm(self):
base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
hp.register_uri(hp.GET, base_url, status=200,
body="\n".join(['hostname',
'instance-id',
'block-device-mapping/']))
hp.register_uri(hp.GET, eu.combine_url(base_url, 'hostname'),
status=200, body='ec2.fake.host.name.com')
hp.register_uri(hp.GET, eu.combine_url(base_url, 'instance-id'),
status=200, body='123')
hp.register_uri(hp.GET,
eu.combine_url(base_url, 'block-device-mapping/'),
status=200,
body="\n".join(['ami', 'ephemeral0']))
hp.register_uri(hp.GET,
eu.combine_url(base_url, 'block-device-mapping/ami'),
status=200,
body="sdb")
hp.register_uri(hp.GET,
eu.combine_url(base_url,
'block-device-mapping/ephemeral0'),
status=200,
body="sdc")
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
self.assertEquals(md['hostname'], 'ec2.fake.host.name.com')
self.assertEquals(md['instance-id'], '123')
bdm = md['block-device-mapping']
self.assertEquals(2, len(bdm))
self.assertEquals(bdm['ami'], 'sdb')
self.assertEquals(bdm['ephemeral0'], 'sdc')

View File

@ -12,50 +12,9 @@ import re
import unittest
# growpart:
# mode: auto # off, on, auto, 'growpart', 'parted'
# mode: auto # off, on, auto, 'growpart'
# devices: ['root']
HELP_PARTED_NO_RESIZE = """
Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...]
Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in
interactive mode.
OPTIONs:
<SNIP>
COMMANDs:
<SNIP>
quit exit program
rescue START END rescue a lost partition near START
and END
resize NUMBER START END resize partition NUMBER and its file
system
rm NUMBER delete partition NUMBER
<SNIP>
Report bugs to bug-parted@gnu.org
"""
HELP_PARTED_RESIZE = """
Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...]
Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in
interactive mode.
OPTIONs:
<SNIP>
COMMANDs:
<SNIP>
quit exit program
rescue START END rescue a lost partition near START
and END
resize NUMBER START END resize partition NUMBER and its file
system
resizepart NUMBER END resize partition NUMBER
rm NUMBER delete partition NUMBER
<SNIP>
Report bugs to bug-parted@gnu.org
"""
HELP_GROWPART_RESIZE = """
growpart disk partition
rewrite partition table so that partition takes up all the space it can
@ -122,11 +81,8 @@ class TestConfig(MockerTestCase):
# Order must be correct
self.mocker.order()
@unittest.skip("until LP: #1212444 fixed")
def test_no_resizers_auto_is_fine(self):
subp = self.mocker.replace(util.subp, passthrough=False)
subp(['parted', '--help'], env={'LANG': 'C'})
self.mocker.result((HELP_PARTED_NO_RESIZE, ""))
subp(['growpart', '--help'], env={'LANG': 'C'})
self.mocker.result((HELP_GROWPART_NO_RESIZE, ""))
self.mocker.replay()
@ -144,15 +100,14 @@ class TestConfig(MockerTestCase):
self.assertRaises(ValueError, self.handle, self.name, config,
self.cloud_init, self.log, self.args)
@unittest.skip("until LP: #1212444 fixed")
def test_mode_auto_prefers_parted(self):
def test_mode_auto_prefers_growpart(self):
subp = self.mocker.replace(util.subp, passthrough=False)
subp(['parted', '--help'], env={'LANG': 'C'})
self.mocker.result((HELP_PARTED_RESIZE, ""))
subp(['growpart', '--help'], env={'LANG': 'C'})
self.mocker.result((HELP_GROWPART_RESIZE, ""))
self.mocker.replay()
ret = cc_growpart.resizer_factory(mode="auto")
self.assertTrue(isinstance(ret, cc_growpart.ResizeParted))
self.assertTrue(isinstance(ret, cc_growpart.ResizeGrowPart))
def test_handle_with_no_growpart_entry(self):
#if no 'growpart' entry in config, then mode=auto should be used

View File

@ -0,0 +1,40 @@
from cloudinit import util
from mocker import MockerTestCase
from tests.unittests.helpers import populate_dir
class TestPathPrefix2Dict(MockerTestCase):
def setUp(self):
self.tmp = self.makeDir()
def test_required_only(self):
dirdata = {'f1': 'f1content', 'f2': 'f2content'}
populate_dir(self.tmp, dirdata)
ret = util.pathprefix2dict(self.tmp, required=['f1', 'f2'])
self.assertEqual(dirdata, ret)
def test_required_missing(self):
dirdata = {'f1': 'f1content'}
populate_dir(self.tmp, dirdata)
kwargs = {'required': ['f1', 'f2']}
self.assertRaises(ValueError, util.pathprefix2dict, self.tmp, **kwargs)
def test_no_required_and_optional(self):
dirdata = {'f1': 'f1c', 'f2': 'f2c'}
populate_dir(self.tmp, dirdata)
ret = util.pathprefix2dict(self.tmp, required=None,
optional=['f1', 'f2'])
self.assertEqual(dirdata, ret)
def test_required_and_optional(self):
dirdata = {'f1': 'f1c', 'f2': 'f2c'}
populate_dir(self.tmp, dirdata)
ret = util.pathprefix2dict(self.tmp, required=['f1'], optional=['f2'])
self.assertEqual(dirdata, ret)
# vi: ts=4 expandtab

View File

@ -35,8 +35,8 @@ class TestMergeRun(helpers.FilesystemMockingTestCase):
initer.datasource.userdata_raw = ud
_iid = initer.instancify()
initer.update()
initer.cloudify().run('consume_userdata',
initer.consume_userdata,
initer.cloudify().run('consume_data',
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)
mirrors = initer.distro.get_option('package_mirrors')

View File

@ -66,8 +66,8 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
initer.update()
self.assertTrue(os.path.islink("var/lib/cloud/instance"))
initer.cloudify().run('consume_userdata',
initer.consume_userdata,
initer.cloudify().run('consume_data',
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE)

View File

@ -1,32 +1,23 @@
#!/bin/sh
#!/usr/bin/env python
set -e
import os
import sys
find_root() {
local topd
if [ -z "${CLOUD_INIT_TOP_D}" ]; then
topd=$(cd "$(dirname "${0}")" && cd .. && pwd)
else
topd=$(cd "${CLOUD_INIT_TOP_D}" && pwd)
fi
[ $? -eq 0 -a -f "${topd}/setup.py" ] || return
ROOT_DIR="$topd"
}
fail() { echo "$0:" "$@" 1>&2; exit 1; }
if 'CLOUD_INIT_TOP_D' in os.environ:
topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
else:
topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if ! find_root; then
fail "Unable to locate 'setup.py' file that should " \
"exist in the cloud-init root directory."
fi
for fname in ("setup.py", "requirements.txt"):
if not os.path.isfile(os.path.join(topd, fname)):
sys.stderr.write("Unable to locate '%s' file that should "
"exist in cloud-init root directory." % fname)
sys.exit(1)
REQUIRES="$ROOT_DIR/Requires"
with open(os.path.join(topd, "requirements.txt"), "r") as fp:
for line in fp:
if not line.strip() or line.startswith("#"):
continue
sys.stdout.write(line)
if [ ! -e "$REQUIRES" ]; then
fail "Unable to find 'Requires' file located at '$REQUIRES'"
fi
# Filter out comments and empty lines
DEPS=$(sed -n -e 's,#.*,,' -e '/./p' "$REQUIRES") &&
[ -n "$DEPS" ] ||
fail "failed to read deps from '${REQUIRES}'"
echo "$DEPS" | sort -d -f
sys.exit(0)

View File

@ -1,32 +1,26 @@
#!/bin/sh
#!/usr/bin/env python
set -e
import os
import re
import sys
find_root() {
local topd
if [ -z "${CLOUD_INIT_TOP_D}" ]; then
topd=$(cd "$(dirname "${0}")" && cd .. && pwd)
else
topd=$(cd "${CLOUD_INIT_TOP_D}" && pwd)
fi
[ $? -eq 0 -a -f "${topd}/setup.py" ] || return
ROOT_DIR="$topd"
}
fail() { echo "$0:" "$@" 1>&2; exit 1; }
if 'CLOUD_INIT_TOP_D' in os.environ:
topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
else:
topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if ! find_root; then
fail "Unable to locate 'setup.py' file that should " \
"exist in the cloud-init root directory."
fi
for fname in ("setup.py", "ChangeLog"):
if not os.path.isfile(os.path.join(topd, fname)):
sys.stderr.write("Unable to locate '%s' file that should "
"exist in cloud-init root directory." % fname)
sys.exit(1)
CHNG_LOG="$ROOT_DIR/ChangeLog"
vermatch = re.compile(r"^[0-9]+[.][0-9]+[.][0-9]+:$")
if [ ! -e "$CHNG_LOG" ]; then
fail "Unable to find 'ChangeLog' file located at '$CHNG_LOG'"
fi
with open(os.path.join(topd, "ChangeLog"), "r") as fp:
for line in fp:
if vermatch.match(line):
sys.stdout.write(line.strip()[:-1] + "\n")
break
VERSION=$(sed -n '/^[0-9]\+[.][0-9]\+[.][0-9]\+:/ {s/://; p; :a;n; ba; }' \
"$CHNG_LOG") &&
[ -n "$VERSION" ] ||
fail "failed to get version from '$CHNG_LOG'"
echo "$VERSION"
sys.exit(0)

View File

@ -1,15 +1,7 @@
#!/bin/bash
ci_files='cloudinit/*.py cloudinit/config/*.py'
test_files=$(find tests -name "*.py")
def_files="$ci_files $test_files"
if [ $# -eq 0 ]; then
files=( )
for f in $def_files; do
[ -f "$f" ] || { echo "failed, $f not a file" 1>&2; exit 1; }
files[${#files[@]}]=${f}
done
files=( bin/cloud-init $(find * -name "*.py" -type f) )
else
files=( "$@" );
fi
@ -44,4 +36,3 @@ cmd=(
echo -e "\nRunning 'cloudinit' pep8:"
echo "${cmd[@]}"
"${cmd[@]}"

View File

@ -1,7 +1,7 @@
#!/bin/bash
if [ $# -eq 0 ]; then
files=( $(find * -name "*.py" -type f) )
files=( bin/cloud-init $(find * -name "*.py" -type f) )
else
files=( "$@" );
fi
@ -16,6 +16,7 @@ cmd=(
--rcfile=$RC_FILE
--disable=R
--disable=I
--dummy-variables-rgx="_"
"${files[@]}"
)