merge from trunk at revno 799
This commit is contained in:
commit
639af3f314
@ -42,6 +42,15 @@
|
||||
in config (LP: #1115833).
|
||||
- improve debian support in sysvinit scripts, package build scripts, and
|
||||
split sources.list template to be distro specific.
|
||||
- support for resizing btrfs root filesystems [Blair Zajac]
|
||||
- fix issue when writing ssh keys to .ssh/authorized_keys (LP: #1136343)
|
||||
- upstart: cloud-init-nonet.conf trap the TERM signal, so that dmesg or other
|
||||
output does not get a 'killed by TERM signal' message.
|
||||
- support resizing partitions via growpart or parted (LP: #1136936)
|
||||
- allow specifying apt-get command in distro config ('apt_get_command')
|
||||
- support different and user-suppliable merging algorithms for cloud-config
|
||||
(LP: #1023179)
|
||||
|
||||
0.7.1:
|
||||
- sysvinit: fix missing dependency in cloud-init job for RHEL 5.6
|
||||
- config-drive: map hostname to local-hostname (LP: #1061964)
|
||||
|
272
cloudinit/config/cc_growpart.py
Normal file
272
cloudinit/config/cc_growpart.py
Normal file
@ -0,0 +1,272 @@
|
||||
# vi: ts=4 expandtab
|
||||
#
|
||||
# Copyright (C) 2011 Canonical Ltd.
|
||||
#
|
||||
# Author: Scott Moser <scott.moser@canonical.com>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 3, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import stat
|
||||
|
||||
from cloudinit import log as logging
|
||||
from cloudinit.settings import PER_ALWAYS
|
||||
from cloudinit import util
|
||||
|
||||
frequency = PER_ALWAYS
|
||||
|
||||
DEFAULT_CONFIG = {
|
||||
'mode': 'auto',
|
||||
'devices': ['/'],
|
||||
}
|
||||
|
||||
|
||||
def enum(**enums):
|
||||
return type('Enum', (), enums)
|
||||
|
||||
|
||||
RESIZE = enum(SKIPPED="SKIPPED", CHANGED="CHANGED", NOCHANGE="NOCHANGE",
|
||||
FAILED="FAILED")
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def resizer_factory(mode):
|
||||
resize_class = None
|
||||
if mode == "auto":
|
||||
for (_name, resizer) in RESIZERS:
|
||||
cur = resizer()
|
||||
if cur.available():
|
||||
resize_class = cur
|
||||
break
|
||||
|
||||
if not resize_class:
|
||||
raise ValueError("No resizers available")
|
||||
|
||||
else:
|
||||
mmap = {}
|
||||
for (k, v) in RESIZERS:
|
||||
mmap[k] = v
|
||||
|
||||
if mode not in mmap:
|
||||
raise TypeError("unknown resize mode %s" % mode)
|
||||
|
||||
mclass = mmap[mode]()
|
||||
if mclass.available():
|
||||
resize_class = mclass
|
||||
|
||||
if not resize_class:
|
||||
raise ValueError("mode %s not available" % mode)
|
||||
|
||||
return resize_class
|
||||
|
||||
|
||||
class ResizeFailedException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ResizeParted(object):
|
||||
def available(self):
|
||||
myenv = os.environ.copy()
|
||||
myenv['LANG'] = 'C'
|
||||
|
||||
try:
|
||||
(out, _err) = util.subp(["parted", "--help"], env=myenv)
|
||||
if re.search(r"COMMAND.*resizepart\s+", out, re.DOTALL):
|
||||
return True
|
||||
|
||||
except util.ProcessExecutionError:
|
||||
pass
|
||||
return False
|
||||
|
||||
def resize(self, diskdev, partnum, partdev):
|
||||
before = get_size(partdev)
|
||||
try:
|
||||
util.subp(["parted", "resizepart", diskdev, partnum])
|
||||
except util.ProcessExecutionError as e:
|
||||
raise ResizeFailedException(e)
|
||||
|
||||
return (before, get_size(partdev))
|
||||
|
||||
|
||||
class ResizeGrowPart(object):
|
||||
def available(self):
|
||||
myenv = os.environ.copy()
|
||||
myenv['LANG'] = 'C'
|
||||
|
||||
try:
|
||||
(out, _err) = util.subp(["growpart", "--help"], env=myenv)
|
||||
if re.search(r"--update\s+", out, re.DOTALL):
|
||||
return True
|
||||
|
||||
except util.ProcessExecutionError:
|
||||
pass
|
||||
return False
|
||||
|
||||
def resize(self, diskdev, partnum, partdev):
|
||||
before = get_size(partdev)
|
||||
try:
|
||||
util.subp(["growpart", '--dry-run', diskdev, partnum])
|
||||
except util.ProcessExecutionError as e:
|
||||
if e.exit_code != 1:
|
||||
util.logexc(LOG, ("Failed growpart --dry-run for (%s, %s)" %
|
||||
(diskdev, partnum)))
|
||||
raise ResizeFailedException(e)
|
||||
return (before, before)
|
||||
|
||||
try:
|
||||
util.subp(["growpart", diskdev, partnum])
|
||||
except util.ProcessExecutionError as e:
|
||||
util.logexc(LOG, "Failed: growpart %s %s" % (diskdev, partnum))
|
||||
raise ResizeFailedException(e)
|
||||
|
||||
return (before, get_size(partdev))
|
||||
|
||||
|
||||
def get_size(filename):
|
||||
fd = os.open(filename, os.O_RDONLY)
|
||||
try:
|
||||
return os.lseek(fd, 0, os.SEEK_END)
|
||||
finally:
|
||||
os.close(fd)
|
||||
|
||||
|
||||
def device_part_info(devpath):
|
||||
# convert an entry in /dev/ to parent disk and partition number
|
||||
|
||||
# input of /dev/vdb or /dev/disk/by-label/foo
|
||||
# rpath is hopefully a real-ish path in /dev (vda, sdb..)
|
||||
rpath = os.path.realpath(devpath)
|
||||
|
||||
bname = os.path.basename(rpath)
|
||||
syspath = "/sys/class/block/%s" % bname
|
||||
|
||||
if not os.path.exists(syspath):
|
||||
raise ValueError("%s had no syspath (%s)" % (devpath, syspath))
|
||||
|
||||
ptpath = os.path.join(syspath, "partition")
|
||||
if not os.path.exists(ptpath):
|
||||
raise TypeError("%s not a partition" % devpath)
|
||||
|
||||
ptnum = util.load_file(ptpath).rstrip()
|
||||
|
||||
# for a partition, real syspath is something like:
|
||||
# /sys/devices/pci0000:00/0000:00:04.0/virtio1/block/vda/vda1
|
||||
rsyspath = os.path.realpath(syspath)
|
||||
disksyspath = os.path.dirname(rsyspath)
|
||||
|
||||
diskmajmin = util.load_file(os.path.join(disksyspath, "dev")).rstrip()
|
||||
diskdevpath = os.path.realpath("/dev/block/%s" % diskmajmin)
|
||||
|
||||
# diskdevpath has something like 253:0
|
||||
# and udev has put links in /dev/block/253:0 to the device name in /dev/
|
||||
return (diskdevpath, ptnum)
|
||||
|
||||
|
||||
def devent2dev(devent):
|
||||
if devent.startswith("/dev/"):
|
||||
return devent
|
||||
else:
|
||||
result = util.get_mount_info(devent)
|
||||
if not result:
|
||||
raise ValueError("Could not determine device of '%s' % dev_ent")
|
||||
return result[0]
|
||||
|
||||
|
||||
def resize_devices(resizer, devices):
|
||||
# returns a tuple of tuples containing (entry-in-devices, action, message)
|
||||
info = []
|
||||
for devent in devices:
|
||||
try:
|
||||
blockdev = devent2dev(devent)
|
||||
except ValueError as e:
|
||||
info.append((devent, RESIZE.SKIPPED,
|
||||
"unable to convert to device: %s" % e,))
|
||||
continue
|
||||
|
||||
try:
|
||||
statret = os.stat(blockdev)
|
||||
except OSError as e:
|
||||
info.append((devent, RESIZE.SKIPPED,
|
||||
"stat of '%s' failed: %s" % (blockdev, e),))
|
||||
continue
|
||||
|
||||
if not stat.S_ISBLK(statret.st_mode):
|
||||
info.append((devent, RESIZE.SKIPPED,
|
||||
"device '%s' not a block device" % blockdev,))
|
||||
continue
|
||||
|
||||
try:
|
||||
(disk, ptnum) = device_part_info(blockdev)
|
||||
except (TypeError, ValueError) as e:
|
||||
info.append((devent, RESIZE.SKIPPED,
|
||||
"device_part_info(%s) failed: %s" % (blockdev, e),))
|
||||
continue
|
||||
|
||||
try:
|
||||
(old, new) = resizer.resize(disk, ptnum, blockdev)
|
||||
if old == new:
|
||||
info.append((devent, RESIZE.NOCHANGE,
|
||||
"no change necessary (%s, %s)" % (disk, ptnum),))
|
||||
else:
|
||||
info.append((devent, RESIZE.CHANGED,
|
||||
"changed (%s, %s) from %s to %s" %
|
||||
(disk, ptnum, old, new),))
|
||||
|
||||
except ResizeFailedException as e:
|
||||
info.append((devent, RESIZE.FAILED,
|
||||
"failed to resize: disk=%s, ptnum=%s: %s" %
|
||||
(disk, ptnum, e),))
|
||||
|
||||
return info
|
||||
|
||||
|
||||
def handle(_name, cfg, _cloud, log, _args):
|
||||
if 'growpart' not in cfg:
|
||||
log.debug("No 'growpart' entry in cfg. Using default: %s" %
|
||||
DEFAULT_CONFIG)
|
||||
cfg['growpart'] = DEFAULT_CONFIG
|
||||
|
||||
mycfg = cfg.get('growpart')
|
||||
if not isinstance(mycfg, dict):
|
||||
log.warn("'growpart' in config was not a dict")
|
||||
return
|
||||
|
||||
mode = mycfg.get('mode', "auto")
|
||||
if util.is_false(mode):
|
||||
log.debug("growpart disabled: mode=%s" % mode)
|
||||
return
|
||||
|
||||
devices = util.get_cfg_option_list(cfg, "devices", ["/"])
|
||||
if not len(devices):
|
||||
log.debug("growpart: empty device list")
|
||||
return
|
||||
|
||||
try:
|
||||
resizer = resizer_factory(mode)
|
||||
except (ValueError, TypeError) as e:
|
||||
log.debug("growpart unable to find resizer for '%s': %s" % (mode, e))
|
||||
if mode != "auto":
|
||||
raise e
|
||||
return
|
||||
|
||||
resized = resize_devices(resizer, devices)
|
||||
for (entry, action, msg) in resized:
|
||||
if action == RESIZE.CHANGED:
|
||||
log.info("'%s' resized: %s" % (entry, msg))
|
||||
else:
|
||||
log.debug("'%s' %s: %s" % (entry, action, msg))
|
||||
|
||||
RESIZERS = (('parted', ResizeParted), ('growpart', ResizeGrowPart))
|
@ -24,6 +24,7 @@ from StringIO import StringIO
|
||||
|
||||
from configobj import ConfigObj
|
||||
|
||||
from cloudinit import type_utils
|
||||
from cloudinit import util
|
||||
|
||||
from cloudinit.settings import PER_INSTANCE
|
||||
@ -58,7 +59,8 @@ def handle(_name, cfg, cloud, log, _args):
|
||||
if not isinstance(ls_cloudcfg, (dict)):
|
||||
raise RuntimeError(("'landscape' key existed in config,"
|
||||
" but not a dictionary type,"
|
||||
" is a %s instead"), util.obj_name(ls_cloudcfg))
|
||||
" is a %s instead"),
|
||||
type_utils.obj_name(ls_cloudcfg))
|
||||
if not ls_cloudcfg:
|
||||
return
|
||||
|
||||
|
@ -22,6 +22,7 @@ from string import whitespace # pylint: disable=W0402
|
||||
|
||||
import re
|
||||
|
||||
from cloudinit import type_utils
|
||||
from cloudinit import util
|
||||
|
||||
# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
|
||||
@ -60,7 +61,7 @@ def handle(_name, cfg, cloud, log, _args):
|
||||
# skip something that wasn't a list
|
||||
if not isinstance(cfgmnt[i], list):
|
||||
log.warn("Mount option %s not a list, got a %s instead",
|
||||
(i + 1), util.obj_name(cfgmnt[i]))
|
||||
(i + 1), type_utils.obj_name(cfgmnt[i]))
|
||||
continue
|
||||
|
||||
startname = str(cfgmnt[i][0])
|
||||
|
@ -75,7 +75,7 @@ def load_power_state(cfg):
|
||||
','.join(opt_map.keys()))
|
||||
|
||||
delay = pstate.get("delay", "now")
|
||||
if delay != "now" and not re.match("\+[0-9]+", delay):
|
||||
if delay != "now" and not re.match(r"\+[0-9]+", delay):
|
||||
raise TypeError("power_state[delay] must be 'now' or '+m' (minutes).")
|
||||
|
||||
args = ["shutdown", opt_map[mode], delay]
|
||||
|
@ -27,43 +27,30 @@ from cloudinit import util
|
||||
|
||||
frequency = PER_ALWAYS
|
||||
|
||||
|
||||
def _resize_btrfs(mount_point, devpth): # pylint: disable=W0613
|
||||
return ('btrfs', 'filesystem', 'resize', 'max', mount_point)
|
||||
|
||||
|
||||
def _resize_ext(mount_point, devpth): # pylint: disable=W0613
|
||||
return ('resize2fs', devpth)
|
||||
|
||||
|
||||
def _resize_xfs(mount_point, devpth): # pylint: disable=W0613
|
||||
return ('xfs_growfs', devpth)
|
||||
|
||||
# Do not use a dictionary as these commands should be able to be used
|
||||
# for multiple filesystem types if possible, e.g. one command for
|
||||
# ext2, ext3 and ext4.
|
||||
RESIZE_FS_PREFIXES_CMDS = [
|
||||
('ext', 'resize2fs'),
|
||||
('xfs', 'xfs_growfs'),
|
||||
('btrfs', _resize_btrfs),
|
||||
('ext', _resize_ext),
|
||||
('xfs', _resize_xfs),
|
||||
]
|
||||
|
||||
NOBLOCK = "noblock"
|
||||
|
||||
|
||||
def nodeify_path(devpth, where, log):
|
||||
try:
|
||||
st_dev = os.stat(where).st_dev
|
||||
dev = os.makedev(os.major(st_dev), os.minor(st_dev))
|
||||
os.mknod(devpth, 0400 | stat.S_IFBLK, dev)
|
||||
return st_dev
|
||||
except:
|
||||
if util.is_container():
|
||||
log.debug("Inside container, ignoring mknod failure in resizefs")
|
||||
return
|
||||
log.warn("Failed to make device node to resize %s at %s",
|
||||
where, devpth)
|
||||
raise
|
||||
|
||||
|
||||
def get_fs_type(st_dev, path, log):
|
||||
try:
|
||||
dev_entries = util.find_devs_with(tag='TYPE', oformat='value',
|
||||
no_cache=True, path=path)
|
||||
if not dev_entries:
|
||||
return None
|
||||
return dev_entries[0].strip()
|
||||
except util.ProcessExecutionError:
|
||||
util.logexc(log, ("Failed to get filesystem type"
|
||||
" of maj=%s, min=%s for path %s"),
|
||||
os.major(st_dev), os.minor(st_dev), path)
|
||||
raise
|
||||
|
||||
|
||||
def handle(name, cfg, _cloud, log, args):
|
||||
if len(args) != 0:
|
||||
resize_root = args[0]
|
||||
@ -80,52 +67,47 @@ def handle(name, cfg, _cloud, log, args):
|
||||
|
||||
# TODO(harlowja): allow what is to be resized to be configurable??
|
||||
resize_what = "/"
|
||||
with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.",
|
||||
dir=resize_root_d, delete=True) as tfh:
|
||||
devpth = tfh.name
|
||||
result = util.get_mount_info(resize_what, log)
|
||||
if not result:
|
||||
log.warn("Could not determine filesystem type of %s", resize_what)
|
||||
return
|
||||
|
||||
# Delete the file so that mknod will work
|
||||
# but don't change the file handle to know that its
|
||||
# removed so that when a later call that recreates
|
||||
# occurs this temporary file will still benefit from
|
||||
# auto deletion
|
||||
tfh.unlink_now()
|
||||
(devpth, fs_type, mount_point) = result
|
||||
|
||||
st_dev = nodeify_path(devpth, resize_what, log)
|
||||
fs_type = get_fs_type(st_dev, devpth, log)
|
||||
if not fs_type:
|
||||
log.warn("Could not determine filesystem type of %s", resize_what)
|
||||
return
|
||||
# Ensure the path is a block device.
|
||||
if not stat.S_ISBLK(os.stat(devpth).st_mode):
|
||||
log.debug("The %s device which was found for mount point %s for %s "
|
||||
"is not a block device" % (devpth, mount_point, resize_what))
|
||||
return
|
||||
|
||||
resizer = None
|
||||
fstype_lc = fs_type.lower()
|
||||
for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
|
||||
if fstype_lc.startswith(pfix):
|
||||
resizer = root_cmd
|
||||
break
|
||||
resizer = None
|
||||
fstype_lc = fs_type.lower()
|
||||
for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
|
||||
if fstype_lc.startswith(pfix):
|
||||
resizer = root_cmd
|
||||
break
|
||||
|
||||
if not resizer:
|
||||
log.warn("Not resizing unknown filesystem type %s for %s",
|
||||
fs_type, resize_what)
|
||||
return
|
||||
if not resizer:
|
||||
log.warn("Not resizing unknown filesystem type %s for %s",
|
||||
fs_type, resize_what)
|
||||
return
|
||||
|
||||
log.debug("Resizing %s (%s) using %s", resize_what, fs_type, resizer)
|
||||
resize_cmd = [resizer, devpth]
|
||||
resize_cmd = resizer(resize_what, devpth)
|
||||
log.debug("Resizing %s (%s) using %s", resize_what, fs_type,
|
||||
' '.join(resize_cmd))
|
||||
|
||||
if resize_root == NOBLOCK:
|
||||
# Fork to a child that will run
|
||||
# the resize command
|
||||
util.fork_cb(do_resize, resize_cmd, log)
|
||||
# Don't delete the file now in the parent
|
||||
tfh.delete = False
|
||||
else:
|
||||
do_resize(resize_cmd, log)
|
||||
if resize_root == NOBLOCK:
|
||||
# Fork to a child that will run
|
||||
# the resize command
|
||||
util.fork_cb(do_resize, resize_cmd, log)
|
||||
else:
|
||||
do_resize(resize_cmd, log)
|
||||
|
||||
action = 'Resized'
|
||||
if resize_root == NOBLOCK:
|
||||
action = 'Resizing (via forking)'
|
||||
log.debug("%s root filesystem (type=%s, maj=%i, min=%i, val=%s)",
|
||||
action, fs_type, os.major(st_dev), os.minor(st_dev), resize_root)
|
||||
log.debug("%s root filesystem (type=%s, val=%s)", action, fs_type,
|
||||
resize_root)
|
||||
|
||||
|
||||
def do_resize(resize_cmd, log):
|
||||
|
@ -126,7 +126,7 @@ def apply_credentials(keys, user, disable_root, disable_root_opts):
|
||||
|
||||
keys = set(keys)
|
||||
if user:
|
||||
ssh_util.setup_user_keys(keys, user, '')
|
||||
ssh_util.setup_user_keys(keys, user)
|
||||
|
||||
if disable_root:
|
||||
if not user:
|
||||
@ -135,4 +135,4 @@ def apply_credentials(keys, user, disable_root, disable_root_opts):
|
||||
else:
|
||||
key_prefix = ''
|
||||
|
||||
ssh_util.setup_user_keys(keys, 'root', key_prefix)
|
||||
ssh_util.setup_user_keys(keys, 'root', options=key_prefix)
|
||||
|
@ -31,6 +31,7 @@ import re
|
||||
from cloudinit import importer
|
||||
from cloudinit import log as logging
|
||||
from cloudinit import ssh_util
|
||||
from cloudinit import type_utils
|
||||
from cloudinit import util
|
||||
|
||||
from cloudinit.distros.parsers import hosts
|
||||
@ -73,7 +74,7 @@ class Distro(object):
|
||||
self._apply_hostname(hostname)
|
||||
|
||||
@abc.abstractmethod
|
||||
def package_command(self, cmd, args=None):
|
||||
def package_command(self, cmd, args=None, pkgs=None):
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
@ -370,7 +371,7 @@ class Distro(object):
|
||||
# Import SSH keys
|
||||
if 'ssh_authorized_keys' in kwargs:
|
||||
keys = set(kwargs['ssh_authorized_keys']) or []
|
||||
ssh_util.setup_user_keys(keys, name, key_prefix=None)
|
||||
ssh_util.setup_user_keys(keys, name, options=None)
|
||||
|
||||
return True
|
||||
|
||||
@ -445,7 +446,7 @@ class Distro(object):
|
||||
lines.append("%s %s" % (user, rules))
|
||||
else:
|
||||
msg = "Can not create sudoers rule addition with type %r"
|
||||
raise TypeError(msg % (util.obj_name(rules)))
|
||||
raise TypeError(msg % (type_utils.obj_name(rules)))
|
||||
content = "\n".join(lines)
|
||||
content += "\n" # trailing newline
|
||||
|
||||
@ -568,7 +569,7 @@ def _normalize_groups(grp_cfg):
|
||||
c_grp_cfg[k] = [v]
|
||||
else:
|
||||
raise TypeError("Bad group member type %s" %
|
||||
util.obj_name(v))
|
||||
type_utils.obj_name(v))
|
||||
else:
|
||||
if isinstance(v, (list)):
|
||||
c_grp_cfg[k].extend(v)
|
||||
@ -576,13 +577,13 @@ def _normalize_groups(grp_cfg):
|
||||
c_grp_cfg[k].append(v)
|
||||
else:
|
||||
raise TypeError("Bad group member type %s" %
|
||||
util.obj_name(v))
|
||||
type_utils.obj_name(v))
|
||||
elif isinstance(i, (str, basestring)):
|
||||
if i not in c_grp_cfg:
|
||||
c_grp_cfg[i] = []
|
||||
else:
|
||||
raise TypeError("Unknown group name type %s" %
|
||||
util.obj_name(i))
|
||||
type_utils.obj_name(i))
|
||||
grp_cfg = c_grp_cfg
|
||||
groups = {}
|
||||
if isinstance(grp_cfg, (dict)):
|
||||
@ -591,7 +592,7 @@ def _normalize_groups(grp_cfg):
|
||||
else:
|
||||
raise TypeError(("Group config must be list, dict "
|
||||
" or string types only and not %s") %
|
||||
util.obj_name(grp_cfg))
|
||||
type_utils.obj_name(grp_cfg))
|
||||
return groups
|
||||
|
||||
|
||||
@ -622,7 +623,7 @@ def _normalize_users(u_cfg, def_user_cfg=None):
|
||||
ad_ucfg.append(v)
|
||||
else:
|
||||
raise TypeError(("Unmappable user value type %s"
|
||||
" for key %s") % (util.obj_name(v), k))
|
||||
" for key %s") % (type_utils.obj_name(v), k))
|
||||
u_cfg = ad_ucfg
|
||||
elif isinstance(u_cfg, (str, basestring)):
|
||||
u_cfg = util.uniq_merge_sorted(u_cfg)
|
||||
@ -647,7 +648,7 @@ def _normalize_users(u_cfg, def_user_cfg=None):
|
||||
else:
|
||||
raise TypeError(("User config must be dictionary/list "
|
||||
" or string types only and not %s") %
|
||||
util.obj_name(user_config))
|
||||
type_utils.obj_name(user_config))
|
||||
|
||||
# Ensure user options are in the right python friendly format
|
||||
if users:
|
||||
@ -740,7 +741,7 @@ def normalize_users_groups(cfg, distro):
|
||||
}
|
||||
if not isinstance(old_user, (dict)):
|
||||
LOG.warn(("Format for 'user' key must be a string or "
|
||||
"dictionary and not %s"), util.obj_name(old_user))
|
||||
"dictionary and not %s"), type_utils.obj_name(old_user))
|
||||
old_user = {}
|
||||
|
||||
# If no old user format, then assume the distro
|
||||
@ -766,7 +767,7 @@ def normalize_users_groups(cfg, distro):
|
||||
if not isinstance(base_users, (list, dict, str, basestring)):
|
||||
LOG.warn(("Format for 'users' key must be a comma separated string"
|
||||
" or a dictionary or a list and not %s"),
|
||||
util.obj_name(base_users))
|
||||
type_utils.obj_name(base_users))
|
||||
base_users = []
|
||||
|
||||
if old_user:
|
||||
@ -776,7 +777,7 @@ def normalize_users_groups(cfg, distro):
|
||||
# Just add it on at the end...
|
||||
base_users.append({'name': 'default'})
|
||||
elif isinstance(base_users, (dict)):
|
||||
base_users['default'] = base_users.get('default', True)
|
||||
base_users['default'] = dict(base_users).get('default', True)
|
||||
elif isinstance(base_users, (str, basestring)):
|
||||
# Just append it on to be re-parsed later
|
||||
base_users += ",default"
|
||||
|
@ -33,6 +33,10 @@ from cloudinit.settings import PER_INSTANCE
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
APT_GET_COMMAND = ('apt-get', '--option=Dpkg::Options::=--force-confold',
|
||||
'--option=Dpkg::options::=--force-unsafe-io',
|
||||
'--assume-yes', '--quiet')
|
||||
|
||||
|
||||
class Distro(distros.Distro):
|
||||
hostname_conf_fn = "/etc/hostname"
|
||||
@ -142,13 +146,15 @@ class Distro(distros.Distro):
|
||||
# This ensures that the correct tz will be used for the system
|
||||
util.copy(tz_file, self.tz_local_fn)
|
||||
|
||||
def package_command(self, command, args=None, pkgs=[]):
|
||||
def package_command(self, command, args=None, pkgs=None):
|
||||
if pkgs is None:
|
||||
pkgs = []
|
||||
|
||||
e = os.environ.copy()
|
||||
# See: http://tiny.cc/kg91fw
|
||||
# Or: http://tiny.cc/mh91fw
|
||||
e['DEBIAN_FRONTEND'] = 'noninteractive'
|
||||
cmd = ['apt-get', '--option', 'Dpkg::Options::=--force-confold',
|
||||
'--assume-yes', '--quiet']
|
||||
cmd = list(self.get_option("apt_get_command", APT_GET_COMMAND))
|
||||
|
||||
if args and isinstance(args, str):
|
||||
cmd.append(args)
|
||||
|
@ -208,7 +208,10 @@ class Distro(distros.Distro):
|
||||
# This ensures that the correct tz will be used for the system
|
||||
util.copy(tz_file, self.tz_local_fn)
|
||||
|
||||
def package_command(self, command, args=None, pkgs=[]):
|
||||
def package_command(self, command, args=None, pkgs=None):
|
||||
if pkgs is None:
|
||||
pkgs = []
|
||||
|
||||
cmd = ['yum']
|
||||
# If enabled, then yum will be tolerant of errors on the command line
|
||||
# with regard to packages.
|
||||
|
@ -27,6 +27,7 @@ from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES)
|
||||
|
||||
from cloudinit import importer
|
||||
from cloudinit import log as logging
|
||||
from cloudinit import type_utils
|
||||
from cloudinit import util
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -69,7 +70,6 @@ INCLUSION_SRCH = sorted(list(INCLUSION_TYPES_MAP.keys()),
|
||||
|
||||
|
||||
class Handler(object):
|
||||
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
def __init__(self, frequency, version=2):
|
||||
@ -77,53 +77,66 @@ class Handler(object):
|
||||
self.frequency = frequency
|
||||
|
||||
def __repr__(self):
|
||||
return "%s: [%s]" % (util.obj_name(self), self.list_types())
|
||||
return "%s: [%s]" % (type_utils.obj_name(self), self.list_types())
|
||||
|
||||
@abc.abstractmethod
|
||||
def list_types(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def handle_part(self, data, ctype, filename, payload, frequency):
|
||||
return self._handle_part(data, ctype, filename, payload, frequency)
|
||||
|
||||
@abc.abstractmethod
|
||||
def _handle_part(self, data, ctype, filename, payload, frequency):
|
||||
def handle_part(self, *args, **kwargs):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def run_part(mod, data, ctype, filename, payload, frequency):
|
||||
def run_part(mod, data, filename, payload, frequency, headers):
|
||||
mod_freq = mod.frequency
|
||||
if not (mod_freq == PER_ALWAYS or
|
||||
(frequency == PER_INSTANCE and mod_freq == PER_INSTANCE)):
|
||||
return
|
||||
mod_ver = mod.handler_version
|
||||
# Sanity checks on version (should be an int convertable)
|
||||
try:
|
||||
mod_ver = mod.handler_version
|
||||
mod_ver = int(mod_ver)
|
||||
except:
|
||||
except (TypeError, ValueError, AttributeError):
|
||||
mod_ver = 1
|
||||
content_type = headers['Content-Type']
|
||||
try:
|
||||
LOG.debug("Calling handler %s (%s, %s, %s) with frequency %s",
|
||||
mod, ctype, filename, mod_ver, frequency)
|
||||
if mod_ver >= 2:
|
||||
mod, content_type, filename, mod_ver, frequency)
|
||||
if mod_ver == 3:
|
||||
# Treat as v. 3 which does get a frequency + headers
|
||||
mod.handle_part(data, content_type, filename,
|
||||
payload, frequency, headers)
|
||||
elif mod_ver == 2:
|
||||
# Treat as v. 2 which does get a frequency
|
||||
mod.handle_part(data, ctype, filename, payload, frequency)
|
||||
else:
|
||||
mod.handle_part(data, content_type, filename,
|
||||
payload, frequency)
|
||||
elif mod_ver == 1:
|
||||
# Treat as v. 1 which gets no frequency
|
||||
mod.handle_part(data, ctype, filename, payload)
|
||||
mod.handle_part(data, content_type, filename, payload)
|
||||
else:
|
||||
raise ValueError("Unknown module version %s" % (mod_ver))
|
||||
except:
|
||||
util.logexc(LOG, ("Failed calling handler %s (%s, %s, %s)"
|
||||
" with frequency %s"),
|
||||
mod, ctype, filename,
|
||||
mod, content_type, filename,
|
||||
mod_ver, frequency)
|
||||
|
||||
|
||||
def call_begin(mod, data, frequency):
|
||||
run_part(mod, data, CONTENT_START, None, None, frequency)
|
||||
# Create a fake header set
|
||||
headers = {
|
||||
'Content-Type': CONTENT_START,
|
||||
}
|
||||
run_part(mod, data, None, None, frequency, headers)
|
||||
|
||||
|
||||
def call_end(mod, data, frequency):
|
||||
run_part(mod, data, CONTENT_END, None, None, frequency)
|
||||
# Create a fake header set
|
||||
headers = {
|
||||
'Content-Type': CONTENT_END,
|
||||
}
|
||||
run_part(mod, data, None, None, frequency, headers)
|
||||
|
||||
|
||||
def walker_handle_handler(pdata, _ctype, _filename, payload):
|
||||
@ -173,26 +186,27 @@ def _escape_string(text):
|
||||
return text
|
||||
|
||||
|
||||
def walker_callback(pdata, ctype, filename, payload):
|
||||
if ctype in PART_CONTENT_TYPES:
|
||||
walker_handle_handler(pdata, ctype, filename, payload)
|
||||
def walker_callback(data, filename, payload, headers):
|
||||
content_type = headers['Content-Type']
|
||||
if content_type in PART_CONTENT_TYPES:
|
||||
walker_handle_handler(data, content_type, filename, payload)
|
||||
return
|
||||
handlers = pdata['handlers']
|
||||
if ctype in pdata['handlers']:
|
||||
run_part(handlers[ctype], pdata['data'], ctype, filename,
|
||||
payload, pdata['frequency'])
|
||||
handlers = data['handlers']
|
||||
if content_type in handlers:
|
||||
run_part(handlers[content_type], data['data'], filename,
|
||||
payload, data['frequency'], headers)
|
||||
elif payload:
|
||||
# Extract the first line or 24 bytes for displaying in the log
|
||||
start = _extract_first_or_bytes(payload, 24)
|
||||
details = "'%s...'" % (_escape_string(start))
|
||||
if ctype == NOT_MULTIPART_TYPE:
|
||||
if content_type == NOT_MULTIPART_TYPE:
|
||||
LOG.warning("Unhandled non-multipart (%s) userdata: %s",
|
||||
ctype, details)
|
||||
content_type, details)
|
||||
else:
|
||||
LOG.warning("Unhandled unknown content-type (%s) userdata: %s",
|
||||
ctype, details)
|
||||
content_type, details)
|
||||
else:
|
||||
LOG.debug("empty payload of type %s" % ctype)
|
||||
LOG.debug("Empty payload of type %s", content_type)
|
||||
|
||||
|
||||
# Callback is a function that will be called with
|
||||
@ -212,7 +226,10 @@ def walk(msg, callback, data):
|
||||
if not filename:
|
||||
filename = PART_FN_TPL % (partnum)
|
||||
|
||||
callback(data, ctype, filename, part.get_payload(decode=True))
|
||||
headers = dict(part)
|
||||
LOG.debug(headers)
|
||||
headers['Content-Type'] = ctype
|
||||
callback(data, filename, part.get_payload(decode=True), headers)
|
||||
partnum = partnum + 1
|
||||
|
||||
|
||||
|
@ -56,7 +56,8 @@ class BootHookPartHandler(handlers.Handler):
|
||||
util.write_file(filepath, contents, 0700)
|
||||
return filepath
|
||||
|
||||
def _handle_part(self, _data, ctype, filename, payload, _frequency):
|
||||
def handle_part(self, _data, ctype, filename, # pylint: disable=W0221
|
||||
payload, frequency): # pylint: disable=W0613
|
||||
if ctype in handlers.CONTENT_SIGNALS:
|
||||
return
|
||||
|
||||
|
@ -22,41 +22,103 @@
|
||||
|
||||
from cloudinit import handlers
|
||||
from cloudinit import log as logging
|
||||
from cloudinit import mergers
|
||||
from cloudinit import util
|
||||
|
||||
from cloudinit.settings import (PER_ALWAYS)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
MERGE_HEADER = 'Merge-Type'
|
||||
DEF_MERGERS = mergers.default_mergers()
|
||||
|
||||
|
||||
class CloudConfigPartHandler(handlers.Handler):
|
||||
def __init__(self, paths, **_kwargs):
|
||||
handlers.Handler.__init__(self, PER_ALWAYS)
|
||||
self.cloud_buf = []
|
||||
handlers.Handler.__init__(self, PER_ALWAYS, version=3)
|
||||
self.cloud_buf = None
|
||||
self.cloud_fn = paths.get_ipath("cloud_config")
|
||||
self.file_names = []
|
||||
self.mergers = [DEF_MERGERS]
|
||||
|
||||
def list_types(self):
|
||||
return [
|
||||
handlers.type_from_starts_with("#cloud-config"),
|
||||
]
|
||||
|
||||
def _write_cloud_config(self, buf):
|
||||
def _write_cloud_config(self):
|
||||
if not self.cloud_fn:
|
||||
return
|
||||
lines = [str(b) for b in buf]
|
||||
payload = "\n".join(lines)
|
||||
util.write_file(self.cloud_fn, payload, 0600)
|
||||
# Capture which files we merged from...
|
||||
file_lines = []
|
||||
if self.file_names:
|
||||
file_lines.append("# from %s files" % (len(self.file_names)))
|
||||
for fn in self.file_names:
|
||||
file_lines.append("# %s" % (fn))
|
||||
file_lines.append("")
|
||||
if self.cloud_buf is not None:
|
||||
# Something was actually gathered....
|
||||
lines = [
|
||||
"#cloud-config",
|
||||
'',
|
||||
]
|
||||
lines.extend(file_lines)
|
||||
lines.append(util.yaml_dumps(self.cloud_buf))
|
||||
else:
|
||||
lines = []
|
||||
util.write_file(self.cloud_fn, "\n".join(lines), 0600)
|
||||
|
||||
def _handle_part(self, _data, ctype, filename, payload, _frequency):
|
||||
def _extract_mergers(self, payload, headers):
|
||||
merge_header_headers = ''
|
||||
for h in [MERGE_HEADER, 'X-%s' % (MERGE_HEADER)]:
|
||||
tmp_h = headers.get(h, '')
|
||||
if tmp_h:
|
||||
merge_header_headers = tmp_h
|
||||
break
|
||||
# Select either the merge-type from the content
|
||||
# or the merge type from the headers or default to our own set
|
||||
# if neither exists (or is empty) from the later.
|
||||
payload_yaml = util.load_yaml(payload)
|
||||
mergers_yaml = mergers.dict_extract_mergers(payload_yaml)
|
||||
mergers_header = mergers.string_extract_mergers(merge_header_headers)
|
||||
all_mergers = []
|
||||
all_mergers.extend(mergers_yaml)
|
||||
all_mergers.extend(mergers_header)
|
||||
if not all_mergers:
|
||||
all_mergers = DEF_MERGERS
|
||||
return all_mergers
|
||||
|
||||
def _merge_part(self, payload, headers):
|
||||
next_mergers = self._extract_mergers(payload, headers)
|
||||
# Use the merger list from the last call, since it is the one
|
||||
# that will be defining how to merge with the next payload.
|
||||
curr_mergers = list(self.mergers[-1])
|
||||
LOG.debug("Merging by applying %s", curr_mergers)
|
||||
self.mergers.append(next_mergers)
|
||||
merger = mergers.construct(curr_mergers)
|
||||
if self.cloud_buf is None:
|
||||
# First time through, merge with an empty dict...
|
||||
self.cloud_buf = {}
|
||||
self.cloud_buf = merger.merge(self.cloud_buf,
|
||||
util.load_yaml(payload))
|
||||
|
||||
def _reset(self):
|
||||
self.file_names = []
|
||||
self.cloud_buf = None
|
||||
self.mergers = [DEF_MERGERS]
|
||||
|
||||
def handle_part(self, _data, ctype, filename, # pylint: disable=W0221
|
||||
payload, _frequency, headers): # pylint: disable=W0613
|
||||
if ctype == handlers.CONTENT_START:
|
||||
self.cloud_buf = []
|
||||
self._reset()
|
||||
return
|
||||
if ctype == handlers.CONTENT_END:
|
||||
self._write_cloud_config(self.cloud_buf)
|
||||
self.cloud_buf = []
|
||||
self._write_cloud_config()
|
||||
self._reset()
|
||||
return
|
||||
|
||||
filename = util.clean_filename(filename)
|
||||
if not filename:
|
||||
filename = '??'
|
||||
self.cloud_buf.extend(["#%s" % (filename), str(payload)])
|
||||
try:
|
||||
self._merge_part(payload, headers)
|
||||
self.file_names.append(filename)
|
||||
except:
|
||||
util.logexc(LOG, "Failed at merging in cloud config part from %s",
|
||||
filename)
|
||||
|
@ -41,7 +41,8 @@ class ShellScriptPartHandler(handlers.Handler):
|
||||
handlers.type_from_starts_with("#!"),
|
||||
]
|
||||
|
||||
def _handle_part(self, _data, ctype, filename, payload, _frequency):
|
||||
def handle_part(self, _data, ctype, filename, # pylint: disable=W0221
|
||||
payload, frequency): # pylint: disable=W0613
|
||||
if ctype in handlers.CONTENT_SIGNALS:
|
||||
# TODO(harlowja): maybe delete existing things here
|
||||
return
|
||||
|
@ -42,7 +42,8 @@ class UpstartJobPartHandler(handlers.Handler):
|
||||
handlers.type_from_starts_with("#upstart-job"),
|
||||
]
|
||||
|
||||
def _handle_part(self, _data, ctype, filename, payload, frequency):
|
||||
def handle_part(self, _data, ctype, filename, # pylint: disable=W0221
|
||||
payload, frequency):
|
||||
if ctype in handlers.CONTENT_SIGNALS:
|
||||
return
|
||||
|
||||
@ -65,6 +66,14 @@ class UpstartJobPartHandler(handlers.Handler):
|
||||
path = os.path.join(self.upstart_dir, filename)
|
||||
util.write_file(path, payload, 0644)
|
||||
|
||||
# if inotify support is not present in the root filesystem
|
||||
# (overlayroot) then we need to tell upstart to re-read /etc
|
||||
util.subp(["initctl", "reload-configuration"], capture=False)
|
||||
# FIXME LATER (LP: #1124384)
|
||||
# a bug in upstart means that invoking reload-configuration
|
||||
# at this stage in boot causes havoc. So, until that is fixed
|
||||
# we will not do that. However, I'd like to be able to easily
|
||||
# test to see if this bug is still present in an image with
|
||||
# a newer upstart. So, a boot hook could easiliy write this file.
|
||||
if os.path.exists("/run/cloud-init-upstart-reload"):
|
||||
# if inotify support is not present in the root filesystem
|
||||
# (overlayroot) then we need to tell upstart to re-read /etc
|
||||
|
||||
util.subp(["initctl", "reload-configuration"], capture=False)
|
||||
|
@ -32,6 +32,7 @@ from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
|
||||
CFG_ENV_NAME)
|
||||
|
||||
from cloudinit import log as logging
|
||||
from cloudinit import type_utils
|
||||
from cloudinit import util
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -68,7 +69,7 @@ class FileLock(object):
|
||||
self.fn = fn
|
||||
|
||||
def __str__(self):
|
||||
return "<%s using file %r>" % (util.obj_name(self), self.fn)
|
||||
return "<%s using file %r>" % (type_utils.obj_name(self), self.fn)
|
||||
|
||||
|
||||
def canon_sem_name(name):
|
||||
|
154
cloudinit/mergers/__init__.py
Normal file
154
cloudinit/mergers/__init__.py
Normal file
@ -0,0 +1,154 @@
|
||||
# vi: ts=4 expandtab
|
||||
#
|
||||
# Copyright (C) 2012 Yahoo! Inc.
|
||||
#
|
||||
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 3, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import re
|
||||
|
||||
from cloudinit import importer
|
||||
from cloudinit import log as logging
|
||||
from cloudinit import type_utils
|
||||
|
||||
NAME_MTCH = re.compile(r"(^[a-zA-Z_][A-Za-z0-9_]*)\((.*?)\)$")
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
DEF_MERGE_TYPE = "list()+dict()+str()"
|
||||
|
||||
|
||||
class UnknownMerger(object):
|
||||
# Named differently so auto-method finding
|
||||
# doesn't pick this up if there is ever a type
|
||||
# named "unknown"
|
||||
def _handle_unknown(self, _meth_wanted, value, _merge_with):
|
||||
return value
|
||||
|
||||
# This merging will attempt to look for a '_on_X' method
|
||||
# in our own object for a given object Y with type X,
|
||||
# if found it will be called to perform the merge of a source
|
||||
# object and a object to merge_with.
|
||||
#
|
||||
# If not found the merge will be given to a '_handle_unknown'
|
||||
# function which can decide what to do wit the 2 values.
|
||||
def merge(self, source, merge_with):
|
||||
type_name = type_utils.obj_name(source)
|
||||
type_name = type_name.lower()
|
||||
method_name = "_on_%s" % (type_name)
|
||||
meth = None
|
||||
args = [source, merge_with]
|
||||
if hasattr(self, method_name):
|
||||
meth = getattr(self, method_name)
|
||||
if not meth:
|
||||
meth = self._handle_unknown
|
||||
args.insert(0, method_name)
|
||||
return meth(*args)
|
||||
|
||||
|
||||
class LookupMerger(UnknownMerger):
|
||||
def __init__(self, lookups=None):
|
||||
UnknownMerger.__init__(self)
|
||||
if lookups is None:
|
||||
self._lookups = []
|
||||
else:
|
||||
self._lookups = lookups
|
||||
|
||||
# For items which can not be merged by the parent this object
|
||||
# will lookup in a internally maintained set of objects and
|
||||
# find which one of those objects can perform the merge. If
|
||||
# any of the contained objects have the needed method, they
|
||||
# will be called to perform the merge.
|
||||
def _handle_unknown(self, meth_wanted, value, merge_with):
|
||||
meth = None
|
||||
for merger in self._lookups:
|
||||
if hasattr(merger, meth_wanted):
|
||||
# First one that has that method/attr gets to be
|
||||
# the one that will be called
|
||||
meth = getattr(merger, meth_wanted)
|
||||
break
|
||||
if not meth:
|
||||
return UnknownMerger._handle_unknown(self, meth_wanted,
|
||||
value, merge_with)
|
||||
return meth(value, merge_with)
|
||||
|
||||
|
||||
def dict_extract_mergers(config):
|
||||
parsed_mergers = []
|
||||
raw_mergers = config.get('merge_how')
|
||||
if raw_mergers is None:
|
||||
raw_mergers = config.get('merge_type')
|
||||
if raw_mergers is None:
|
||||
return parsed_mergers
|
||||
if isinstance(raw_mergers, (str, basestring)):
|
||||
return string_extract_mergers(raw_mergers)
|
||||
for m in raw_mergers:
|
||||
if isinstance(m, (dict)):
|
||||
name = m['name']
|
||||
name = name.replace("-", "_").strip()
|
||||
opts = m['settings']
|
||||
else:
|
||||
name = m[0]
|
||||
if len(m) >= 2:
|
||||
opts = m[1:]
|
||||
else:
|
||||
opts = []
|
||||
if name:
|
||||
parsed_mergers.append((name, opts))
|
||||
return parsed_mergers
|
||||
|
||||
|
||||
def string_extract_mergers(merge_how):
|
||||
parsed_mergers = []
|
||||
for m_name in merge_how.split("+"):
|
||||
# Canonicalize the name (so that it can be found
|
||||
# even when users alter it in various ways)
|
||||
m_name = m_name.lower().strip()
|
||||
m_name = m_name.replace("-", "_")
|
||||
if not m_name:
|
||||
continue
|
||||
match = NAME_MTCH.match(m_name)
|
||||
if not match:
|
||||
msg = ("Matcher identifer '%s' is not in the right format" %
|
||||
(m_name))
|
||||
raise ValueError(msg)
|
||||
(m_name, m_ops) = match.groups()
|
||||
m_ops = m_ops.strip().split(",")
|
||||
m_ops = [m.strip().lower() for m in m_ops if m.strip()]
|
||||
parsed_mergers.append((m_name, m_ops))
|
||||
return parsed_mergers
|
||||
|
||||
|
||||
def default_mergers():
|
||||
return tuple(string_extract_mergers(DEF_MERGE_TYPE))
|
||||
|
||||
|
||||
def construct(parsed_mergers):
|
||||
mergers_to_be = []
|
||||
for (m_name, m_ops) in parsed_mergers:
|
||||
merger_locs = importer.find_module(m_name,
|
||||
[__name__],
|
||||
['Merger'])
|
||||
if not merger_locs:
|
||||
msg = "Could not find merger named '%s'" % (m_name)
|
||||
raise ImportError(msg)
|
||||
else:
|
||||
mod = importer.import_module(merger_locs[0])
|
||||
mod_attr = getattr(mod, 'Merger')
|
||||
mergers_to_be.append((mod_attr, m_ops))
|
||||
# Now form them...
|
||||
mergers = []
|
||||
root = LookupMerger(mergers)
|
||||
for (attr, opts) in mergers_to_be:
|
||||
mergers.append(attr(root, opts))
|
||||
return root
|
48
cloudinit/mergers/dict.py
Normal file
48
cloudinit/mergers/dict.py
Normal file
@ -0,0 +1,48 @@
|
||||
# vi: ts=4 expandtab
|
||||
#
|
||||
# Copyright (C) 2012 Yahoo! Inc.
|
||||
#
|
||||
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 3, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
class Merger(object):
|
||||
def __init__(self, merger, opts):
|
||||
self._merger = merger
|
||||
self._overwrite = 'overwrite' in opts
|
||||
|
||||
# This merging algorithm will attempt to merge with
|
||||
# another dictionary, on encountering any other type of object
|
||||
# it will not merge with said object, but will instead return
|
||||
# the original value
|
||||
#
|
||||
# On encountering a dictionary, it will create a new dictionary
|
||||
# composed of the original and the one to merge with, if 'overwrite'
|
||||
# is enabled then keys that exist in the original will be overwritten
|
||||
# by keys in the one to merge with (and associated values). Otherwise
|
||||
# if not in overwrite mode the 2 conflicting keys themselves will
|
||||
# be merged.
|
||||
def _on_dict(self, value, merge_with):
|
||||
if not isinstance(merge_with, (dict)):
|
||||
return value
|
||||
merged = dict(value)
|
||||
for (k, v) in merge_with.items():
|
||||
if k in merged:
|
||||
if not self._overwrite:
|
||||
merged[k] = self._merger.merge(merged[k], v)
|
||||
else:
|
||||
merged[k] = v
|
||||
else:
|
||||
merged[k] = v
|
||||
return merged
|
50
cloudinit/mergers/list.py
Normal file
50
cloudinit/mergers/list.py
Normal file
@ -0,0 +1,50 @@
|
||||
# vi: ts=4 expandtab
|
||||
#
|
||||
# Copyright (C) 2012 Yahoo! Inc.
|
||||
#
|
||||
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 3, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
class Merger(object):
|
||||
def __init__(self, merger, opts):
|
||||
self._merger = merger
|
||||
self._discard_non = 'discard_non_list' in opts
|
||||
self._extend = 'extend' in opts
|
||||
|
||||
def _on_tuple(self, value, merge_with):
|
||||
return self._on_list(list(value), merge_with)
|
||||
|
||||
# On encountering a list or tuple type this action will be applied
|
||||
# a new list will be returned, if the value to merge with is itself
|
||||
# a list and we have been told to 'extend', then the value here will
|
||||
# be extended with the other list. If in 'extend' mode then we will
|
||||
# attempt to merge instead, which means that values from the list
|
||||
# to merge with will replace values in te original list (they will
|
||||
# also be merged recursively).
|
||||
#
|
||||
# If the value to merge with is not a list, and we are set to discared
|
||||
# then no modifications will take place, otherwise we will just append
|
||||
# the value to merge with onto the end of our own list.
|
||||
def _on_list(self, value, merge_with):
|
||||
new_value = list(value)
|
||||
if isinstance(merge_with, (tuple, list)):
|
||||
if self._extend:
|
||||
new_value.extend(merge_with)
|
||||
else:
|
||||
return new_value
|
||||
else:
|
||||
if not self._discard_non:
|
||||
new_value.append(merge_with)
|
||||
return new_value
|
39
cloudinit/mergers/str.py
Normal file
39
cloudinit/mergers/str.py
Normal file
@ -0,0 +1,39 @@
|
||||
# vi: ts=4 expandtab
|
||||
#
|
||||
# Copyright (C) 2012 Yahoo! Inc.
|
||||
#
|
||||
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 3, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
class Merger(object):
|
||||
def __init__(self, _merger, opts):
|
||||
self._append = 'append' in opts
|
||||
|
||||
# On encountering a unicode object to merge value with
|
||||
# we will for now just proxy into the string method to let it handle it.
|
||||
def _on_unicode(self, value, merge_with):
|
||||
return self._on_str(value, merge_with)
|
||||
|
||||
# On encountering a string object to merge with we will
|
||||
# perform the following action, if appending we will
|
||||
# merge them together, otherwise we will just return value.
|
||||
def _on_str(self, value, merge_with):
|
||||
if not self._append:
|
||||
return value
|
||||
else:
|
||||
if isinstance(value, (unicode)):
|
||||
return value + unicode(merge_with)
|
||||
else:
|
||||
return value + str(merge_with)
|
@ -30,6 +30,7 @@ import os.path
|
||||
from cloudinit import log as logging
|
||||
from cloudinit import sources
|
||||
from cloudinit import util
|
||||
|
||||
from cloudinit.util import ProcessExecutionError
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -91,8 +92,8 @@ class DataSourceAltCloud(sources.DataSource):
|
||||
self.supported_seed_starts = ("/", "file://")
|
||||
|
||||
def __str__(self):
|
||||
mstr = "%s [seed=%s]" % (util.obj_name(self), self.seed)
|
||||
return mstr
|
||||
root = sources.DataSource.__str__(self)
|
||||
return "%s [seed=%s]" % (root, self.seed)
|
||||
|
||||
def get_cloud_type(self):
|
||||
'''
|
||||
|
@ -48,9 +48,6 @@ class DataSourceCloudStack(sources.DataSource):
|
||||
raise RuntimeError("No virtual router found!")
|
||||
self.metadata_address = "http://%s/" % (vr_addr)
|
||||
|
||||
def __str__(self):
|
||||
return util.obj_name(self)
|
||||
|
||||
def _get_url_settings(self):
|
||||
mcfg = self.ds_cfg
|
||||
if not mcfg:
|
||||
|
@ -51,7 +51,9 @@ class DataSourceConfigDrive(sources.DataSource):
|
||||
self.ec2_metadata = None
|
||||
|
||||
def __str__(self):
|
||||
mstr = "%s [%s,ver=%s]" % (util.obj_name(self), self.dsmode,
|
||||
root = sources.DataSource.__str__(self)
|
||||
mstr = "%s [%s,ver=%s]" % (root,
|
||||
self.dsmode,
|
||||
self.version)
|
||||
mstr += "[source=%s]" % (self.source)
|
||||
return mstr
|
||||
@ -152,7 +154,7 @@ class DataSourceConfigDrive(sources.DataSource):
|
||||
return False
|
||||
|
||||
md = results['metadata']
|
||||
md = util.mergedict(md, DEFAULT_METADATA)
|
||||
md = util.mergemanydict([md, DEFAULT_METADATA])
|
||||
|
||||
# Perform some metadata 'fixups'
|
||||
#
|
||||
|
@ -49,9 +49,6 @@ class DataSourceEc2(sources.DataSource):
|
||||
self.seed_dir = os.path.join(paths.seed_dir, "ec2")
|
||||
self.api_ver = DEF_MD_VERSION
|
||||
|
||||
def __str__(self):
|
||||
return util.obj_name(self)
|
||||
|
||||
def get_data(self):
|
||||
seed_ret = {}
|
||||
if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
|
||||
|
@ -50,7 +50,8 @@ class DataSourceMAAS(sources.DataSource):
|
||||
self.oauth_clockskew = None
|
||||
|
||||
def __str__(self):
|
||||
return "%s [%s]" % (util.obj_name(self), self.base_url)
|
||||
root = sources.DataSource.__str__(self)
|
||||
return "%s [%s]" % (root, self.base_url)
|
||||
|
||||
def get_data(self):
|
||||
mcfg = self.ds_cfg
|
||||
|
@ -40,9 +40,8 @@ class DataSourceNoCloud(sources.DataSource):
|
||||
self.supported_seed_starts = ("/", "file://")
|
||||
|
||||
def __str__(self):
|
||||
mstr = "%s [seed=%s][dsmode=%s]" % (util.obj_name(self),
|
||||
self.seed, self.dsmode)
|
||||
return mstr
|
||||
root = sources.DataSource.__str__(self)
|
||||
return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
|
||||
|
||||
def get_data(self):
|
||||
defaults = {
|
||||
@ -65,7 +64,7 @@ class DataSourceNoCloud(sources.DataSource):
|
||||
# Check to see if the seed dir has data.
|
||||
seedret = {}
|
||||
if util.read_optional_seed(seedret, base=self.seed_dir + "/"):
|
||||
md = util.mergedict(md, seedret['meta-data'])
|
||||
md = util.mergemanydict([md, seedret['meta-data']])
|
||||
ud = seedret['user-data']
|
||||
found.append(self.seed_dir)
|
||||
LOG.debug("Using seeded cache data from %s", self.seed_dir)
|
||||
@ -82,15 +81,15 @@ class DataSourceNoCloud(sources.DataSource):
|
||||
if self.ds_cfg['user-data']:
|
||||
ud = self.ds_cfg['user-data']
|
||||
if self.ds_cfg['meta-data'] is not False:
|
||||
md = util.mergedict(md, self.ds_cfg['meta-data'])
|
||||
md = util.mergemanydict([md, self.ds_cfg['meta-data']])
|
||||
if 'ds_config' not in found:
|
||||
found.append("ds_config")
|
||||
|
||||
if self.ds_cfg.get('fs_label', "cidata"):
|
||||
label = self.ds_cfg.get('fs_label', "cidata")
|
||||
if label is not None:
|
||||
fslist = util.find_devs_with("TYPE=vfat")
|
||||
fslist.extend(util.find_devs_with("TYPE=iso9660"))
|
||||
|
||||
label = self.ds_cfg.get('fs_label')
|
||||
label_list = util.find_devs_with("LABEL=%s" % label)
|
||||
devlist = list(set(fslist) & set(label_list))
|
||||
devlist.sort(reverse=True)
|
||||
@ -100,7 +99,7 @@ class DataSourceNoCloud(sources.DataSource):
|
||||
LOG.debug("Attempting to use data from %s", dev)
|
||||
|
||||
(newmd, newud) = util.mount_cb(dev, util.read_seeded)
|
||||
md = util.mergedict(newmd, md)
|
||||
md = util.mergemanydict([newmd, md])
|
||||
ud = newud
|
||||
|
||||
# For seed from a device, the default mode is 'net'.
|
||||
@ -150,11 +149,11 @@ class DataSourceNoCloud(sources.DataSource):
|
||||
LOG.debug("Using seeded cache data from %s", seedfrom)
|
||||
|
||||
# Values in the command line override those from the seed
|
||||
md = util.mergedict(md, md_seed)
|
||||
md = util.mergemanydict([md, md_seed])
|
||||
found.append(seedfrom)
|
||||
|
||||
# Now that we have exhausted any other places merge in the defaults
|
||||
md = util.mergedict(md, defaults)
|
||||
md = util.mergemanydict([md, defaults])
|
||||
|
||||
# Update the network-interfaces if metadata had 'network-interfaces'
|
||||
# entry and this is the local datasource, or 'seedfrom' was used
|
||||
|
@ -18,7 +18,6 @@
|
||||
|
||||
from cloudinit import log as logging
|
||||
from cloudinit import sources
|
||||
from cloudinit import util
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -41,9 +40,6 @@ class DataSourceNone(sources.DataSource):
|
||||
def get_instance_id(self):
|
||||
return 'iid-datasource-none'
|
||||
|
||||
def __str__(self):
|
||||
return util.obj_name(self)
|
||||
|
||||
@property
|
||||
def is_disconnected(self):
|
||||
return True
|
||||
|
@ -43,7 +43,8 @@ class DataSourceOVF(sources.DataSource):
|
||||
self.supported_seed_starts = ("/", "file://")
|
||||
|
||||
def __str__(self):
|
||||
return "%s [seed=%s]" % (util.obj_name(self), self.seed)
|
||||
root = sources.DataSource.__str__(self)
|
||||
return "%s [seed=%s]" % (root, self.seed)
|
||||
|
||||
def get_data(self):
|
||||
found = []
|
||||
@ -93,11 +94,11 @@ class DataSourceOVF(sources.DataSource):
|
||||
(md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
|
||||
LOG.debug("Using seeded cache data from %s", seedfrom)
|
||||
|
||||
md = util.mergedict(md, md_seed)
|
||||
md = util.mergemanydict([md, md_seed])
|
||||
found.append(seedfrom)
|
||||
|
||||
# Now that we have exhausted any other places merge in the defaults
|
||||
md = util.mergedict(md, defaults)
|
||||
md = util.mergemanydict([md, defaults])
|
||||
|
||||
self.seed = ",".join(found)
|
||||
self.metadata = md
|
||||
|
@ -25,6 +25,7 @@ import os
|
||||
|
||||
from cloudinit import importer
|
||||
from cloudinit import log as logging
|
||||
from cloudinit import type_utils
|
||||
from cloudinit import user_data as ud
|
||||
from cloudinit import util
|
||||
|
||||
@ -52,7 +53,7 @@ class DataSource(object):
|
||||
self.userdata = None
|
||||
self.metadata = None
|
||||
self.userdata_raw = None
|
||||
name = util.obj_name(self)
|
||||
name = type_utils.obj_name(self)
|
||||
if name.startswith(DS_PREFIX):
|
||||
name = name[len(DS_PREFIX):]
|
||||
self.ds_cfg = util.get_cfg_by_path(self.sys_cfg,
|
||||
@ -62,6 +63,9 @@ class DataSource(object):
|
||||
else:
|
||||
self.ud_proc = ud_proc
|
||||
|
||||
def __str__(self):
|
||||
return type_utils.obj_name(self)
|
||||
|
||||
def get_userdata(self, apply_filter=False):
|
||||
if self.userdata is None:
|
||||
self.userdata = self.ud_proc.process(self.get_userdata_raw())
|
||||
@ -214,7 +218,7 @@ def normalize_pubkey_data(pubkey_data):
|
||||
|
||||
def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list):
|
||||
ds_list = list_sources(cfg_list, ds_deps, pkg_list)
|
||||
ds_names = [util.obj_name(f) for f in ds_list]
|
||||
ds_names = [type_utils.obj_name(f) for f in ds_list]
|
||||
LOG.debug("Searching for data source in: %s", ds_names)
|
||||
|
||||
for cls in ds_list:
|
||||
@ -222,7 +226,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list):
|
||||
LOG.debug("Seeing if we can get any data from %s", cls)
|
||||
s = cls(sys_cfg, distro, paths)
|
||||
if s.get_data():
|
||||
return (s, util.obj_name(cls))
|
||||
return (s, type_utils.obj_name(cls))
|
||||
except Exception:
|
||||
util.logexc(LOG, "Getting data from %s failed", cls)
|
||||
|
||||
|
@ -19,9 +19,6 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from StringIO import StringIO
|
||||
|
||||
import csv
|
||||
import os
|
||||
import pwd
|
||||
|
||||
@ -33,6 +30,15 @@ LOG = logging.getLogger(__name__)
|
||||
# See: man sshd_config
|
||||
DEF_SSHD_CFG = "/etc/ssh/sshd_config"
|
||||
|
||||
# taken from openssh source key.c/key_type_from_name
|
||||
VALID_KEY_TYPES = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa",
|
||||
"ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com",
|
||||
"ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com",
|
||||
"ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com",
|
||||
"ecdsa-sha2-nistp256-cert-v01@openssh.com",
|
||||
"ecdsa-sha2-nistp384-cert-v01@openssh.com",
|
||||
"ecdsa-sha2-nistp521-cert-v01@openssh.com")
|
||||
|
||||
|
||||
class AuthKeyLine(object):
|
||||
def __init__(self, source, keytype=None, base64=None,
|
||||
@ -43,11 +49,8 @@ class AuthKeyLine(object):
|
||||
self.keytype = keytype
|
||||
self.source = source
|
||||
|
||||
def empty(self):
|
||||
if (not self.base64 and
|
||||
not self.comment and not self.keytype and not self.options):
|
||||
return True
|
||||
return False
|
||||
def valid(self):
|
||||
return (self.base64 and self.keytype)
|
||||
|
||||
def __str__(self):
|
||||
toks = []
|
||||
@ -107,62 +110,47 @@ class AuthKeyLineParser(object):
|
||||
i = i + 1
|
||||
|
||||
options = ent[0:i]
|
||||
options_lst = []
|
||||
|
||||
# Now use a csv parser to pull the options
|
||||
# out of the above string that we just found an endpoint for.
|
||||
#
|
||||
# No quoting so we don't mess up any of the quoting that
|
||||
# is already there.
|
||||
reader = csv.reader(StringIO(options), quoting=csv.QUOTE_NONE)
|
||||
for row in reader:
|
||||
for e in row:
|
||||
# Only keep non-empty csv options
|
||||
e = e.strip()
|
||||
if e:
|
||||
options_lst.append(e)
|
||||
# Return the rest of the string in 'remain'
|
||||
remain = ent[i:].lstrip()
|
||||
return (options, remain)
|
||||
|
||||
# Now take the rest of the items before the string
|
||||
# as long as there is room to do this...
|
||||
toks = []
|
||||
if i + 1 < len(ent):
|
||||
rest = ent[i + 1:]
|
||||
toks = rest.split(None, 2)
|
||||
return (options_lst, toks)
|
||||
|
||||
def _form_components(self, src_line, toks, options=None):
|
||||
components = {}
|
||||
if len(toks) == 1:
|
||||
components['base64'] = toks[0]
|
||||
elif len(toks) == 2:
|
||||
components['base64'] = toks[0]
|
||||
components['comment'] = toks[1]
|
||||
elif len(toks) == 3:
|
||||
components['keytype'] = toks[0]
|
||||
components['base64'] = toks[1]
|
||||
components['comment'] = toks[2]
|
||||
components['options'] = options
|
||||
if not components:
|
||||
return AuthKeyLine(src_line)
|
||||
else:
|
||||
return AuthKeyLine(src_line, **components)
|
||||
|
||||
def parse(self, src_line, def_opt=None):
|
||||
def parse(self, src_line, options=None):
|
||||
# modeled after opensshes auth2-pubkey.c:user_key_allowed2
|
||||
line = src_line.rstrip("\r\n")
|
||||
if line.startswith("#") or line.strip() == '':
|
||||
return AuthKeyLine(src_line)
|
||||
else:
|
||||
ent = line.strip()
|
||||
toks = ent.split(None, 3)
|
||||
if len(toks) < 4:
|
||||
return self._form_components(src_line, toks, def_opt)
|
||||
else:
|
||||
(options, toks) = self._extract_options(ent)
|
||||
if options:
|
||||
options = ",".join(options)
|
||||
else:
|
||||
options = def_opt
|
||||
return self._form_components(src_line, toks, options)
|
||||
|
||||
def parse_ssh_key(ent):
|
||||
# return ketype, key, [comment]
|
||||
toks = ent.split(None, 2)
|
||||
if len(toks) < 2:
|
||||
raise TypeError("To few fields: %s" % len(toks))
|
||||
if toks[0] not in VALID_KEY_TYPES:
|
||||
raise TypeError("Invalid keytype %s" % toks[0])
|
||||
|
||||
# valid key type and 2 or 3 fields:
|
||||
if len(toks) == 2:
|
||||
# no comment in line
|
||||
toks.append("")
|
||||
|
||||
return toks
|
||||
|
||||
ent = line.strip()
|
||||
try:
|
||||
(keytype, base64, comment) = parse_ssh_key(ent)
|
||||
except TypeError:
|
||||
(keyopts, remain) = self._extract_options(ent)
|
||||
if options is None:
|
||||
options = keyopts
|
||||
|
||||
try:
|
||||
(keytype, base64, comment) = parse_ssh_key(remain)
|
||||
except TypeError:
|
||||
return AuthKeyLine(src_line)
|
||||
|
||||
return AuthKeyLine(src_line, keytype=keytype, base64=base64,
|
||||
comment=comment, options=options)
|
||||
|
||||
|
||||
def parse_authorized_keys(fname):
|
||||
@ -186,11 +174,11 @@ def update_authorized_keys(old_entries, keys):
|
||||
|
||||
for i in range(0, len(old_entries)):
|
||||
ent = old_entries[i]
|
||||
if ent.empty() or not ent.base64:
|
||||
if not ent.valid():
|
||||
continue
|
||||
# Replace those with the same base64
|
||||
for k in keys:
|
||||
if k.empty() or not k.base64:
|
||||
if not ent.valid():
|
||||
continue
|
||||
if k.base64 == ent.base64:
|
||||
# Replace it with our better one
|
||||
@ -249,7 +237,7 @@ def extract_authorized_keys(username):
|
||||
return (auth_key_fn, parse_authorized_keys(auth_key_fn))
|
||||
|
||||
|
||||
def setup_user_keys(keys, username, key_prefix):
|
||||
def setup_user_keys(keys, username, options=None):
|
||||
# Make sure the users .ssh dir is setup accordingly
|
||||
(ssh_dir, pwent) = users_ssh_info(username)
|
||||
if not os.path.isdir(ssh_dir):
|
||||
@ -260,7 +248,7 @@ def setup_user_keys(keys, username, key_prefix):
|
||||
parser = AuthKeyLineParser()
|
||||
key_entries = []
|
||||
for k in keys:
|
||||
key_entries.append(parser.parse(str(k), def_opt=key_prefix))
|
||||
key_entries.append(parser.parse(str(k), options=options))
|
||||
|
||||
# Extract the old and make the new
|
||||
(auth_key_fn, auth_key_entries) = extract_authorized_keys(username)
|
||||
|
@ -43,6 +43,7 @@ from cloudinit import helpers
|
||||
from cloudinit import importer
|
||||
from cloudinit import log as logging
|
||||
from cloudinit import sources
|
||||
from cloudinit import type_utils
|
||||
from cloudinit import util
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -211,7 +212,7 @@ class Init(object):
|
||||
# Any config provided???
|
||||
pkg_list = self.cfg.get('datasource_pkg_list') or []
|
||||
# Add the defaults at the end
|
||||
for n in ['', util.obj_name(sources)]:
|
||||
for n in ['', type_utils.obj_name(sources)]:
|
||||
if n not in pkg_list:
|
||||
pkg_list.append(n)
|
||||
cfg_list = self.cfg.get('datasource_list') or []
|
||||
@ -271,7 +272,7 @@ class Init(object):
|
||||
dp = self.paths.get_cpath('data')
|
||||
|
||||
# Write what the datasource was and is..
|
||||
ds = "%s: %s" % (util.obj_name(self.datasource), self.datasource)
|
||||
ds = "%s: %s" % (type_utils.obj_name(self.datasource), self.datasource)
|
||||
previous_ds = None
|
||||
ds_fn = os.path.join(idir, 'datasource')
|
||||
try:
|
||||
@ -488,7 +489,7 @@ class Modules(object):
|
||||
else:
|
||||
raise TypeError(("Failed to read '%s' item in config,"
|
||||
" unknown type %s") %
|
||||
(item, util.obj_name(item)))
|
||||
(item, type_utils.obj_name(item)))
|
||||
return module_list
|
||||
|
||||
def _fixup_modules(self, raw_mods):
|
||||
@ -506,7 +507,7 @@ class Modules(object):
|
||||
# Reset it so when ran it will get set to a known value
|
||||
freq = None
|
||||
mod_locs = importer.find_module(mod_name,
|
||||
['', util.obj_name(config)],
|
||||
['', type_utils.obj_name(config)],
|
||||
['handle'])
|
||||
if not mod_locs:
|
||||
LOG.warn("Could not find module named %s", mod_name)
|
||||
|
34
cloudinit/type_utils.py
Normal file
34
cloudinit/type_utils.py
Normal file
@ -0,0 +1,34 @@
|
||||
# vi: ts=4 expandtab
|
||||
#
|
||||
# Copyright (C) 2012 Canonical Ltd.
|
||||
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
|
||||
# Copyright (C) 2012 Yahoo! Inc.
|
||||
#
|
||||
# Author: Scott Moser <scott.moser@canonical.com>
|
||||
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
|
||||
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 3, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
# pylint: disable=C0302
|
||||
|
||||
import types
|
||||
|
||||
|
||||
def obj_name(obj):
|
||||
if isinstance(obj, (types.TypeType,
|
||||
types.ModuleType,
|
||||
types.FunctionType,
|
||||
types.LambdaType)):
|
||||
return str(obj.__name__)
|
||||
return obj_name(obj.__class__)
|
@ -43,15 +43,16 @@ import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import types
|
||||
import urlparse
|
||||
|
||||
import yaml
|
||||
|
||||
from cloudinit import importer
|
||||
from cloudinit import log as logging
|
||||
from cloudinit import mergers
|
||||
from cloudinit import safeyaml
|
||||
from cloudinit import url_helper
|
||||
from cloudinit import type_utils
|
||||
from cloudinit import version
|
||||
|
||||
from cloudinit.settings import (CFG_BUILTIN)
|
||||
@ -219,11 +220,12 @@ def fork_cb(child_cb, *args):
|
||||
os._exit(0) # pylint: disable=W0212
|
||||
except:
|
||||
logexc(LOG, ("Failed forking and"
|
||||
" calling callback %s"), obj_name(child_cb))
|
||||
" calling callback %s"),
|
||||
type_utils.obj_name(child_cb))
|
||||
os._exit(1) # pylint: disable=W0212
|
||||
else:
|
||||
LOG.debug("Forked child %s who will run callback %s",
|
||||
fid, obj_name(child_cb))
|
||||
fid, type_utils.obj_name(child_cb))
|
||||
|
||||
|
||||
def is_true(val, addons=None):
|
||||
@ -537,40 +539,26 @@ def make_url(scheme, host, port=None,
|
||||
return urlparse.urlunparse(pieces)
|
||||
|
||||
|
||||
def obj_name(obj):
|
||||
if isinstance(obj, (types.TypeType,
|
||||
types.ModuleType,
|
||||
types.FunctionType,
|
||||
types.LambdaType)):
|
||||
return str(obj.__name__)
|
||||
return obj_name(obj.__class__)
|
||||
|
||||
|
||||
def mergemanydict(srcs, reverse=False):
|
||||
if reverse:
|
||||
srcs = reversed(srcs)
|
||||
m_cfg = {}
|
||||
merge_how = [mergers.default_mergers()]
|
||||
for a_cfg in srcs:
|
||||
if a_cfg:
|
||||
m_cfg = mergedict(m_cfg, a_cfg)
|
||||
# Take the last merger as the one that
|
||||
# will define how to merge next...
|
||||
mergers_to_apply = list(merge_how[-1])
|
||||
merger = mergers.construct(mergers_to_apply)
|
||||
m_cfg = merger.merge(m_cfg, a_cfg)
|
||||
# If the config has now has new merger set,
|
||||
# extract them to be used next time...
|
||||
new_mergers = mergers.dict_extract_mergers(m_cfg)
|
||||
if new_mergers:
|
||||
merge_how.append(new_mergers)
|
||||
return m_cfg
|
||||
|
||||
|
||||
def mergedict(src, cand):
|
||||
"""
|
||||
Merge values from C{cand} into C{src}.
|
||||
If C{src} has a key C{cand} will not override.
|
||||
Nested dictionaries are merged recursively.
|
||||
"""
|
||||
if isinstance(src, dict) and isinstance(cand, dict):
|
||||
for (k, v) in cand.iteritems():
|
||||
if k not in src:
|
||||
src[k] = v
|
||||
else:
|
||||
src[k] = mergedict(src[k], v)
|
||||
return src
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def chdir(ndir):
|
||||
curr = os.getcwd()
|
||||
@ -713,7 +701,7 @@ def load_yaml(blob, default=None, allowed=(dict,)):
|
||||
# Yes this will just be caught, but thats ok for now...
|
||||
raise TypeError(("Yaml load allows %s root types,"
|
||||
" but got %s instead") %
|
||||
(allowed, obj_name(converted)))
|
||||
(allowed, type_utils.obj_name(converted)))
|
||||
loaded = converted
|
||||
except (yaml.YAMLError, TypeError, ValueError):
|
||||
if len(blob) == 0:
|
||||
@ -782,7 +770,7 @@ def read_conf_with_confd(cfgfile):
|
||||
if not isinstance(confd, (str, basestring)):
|
||||
raise TypeError(("Config file %s contains 'conf_d' "
|
||||
"with non-string type %s") %
|
||||
(cfgfile, obj_name(confd)))
|
||||
(cfgfile, type_utils.obj_name(confd)))
|
||||
else:
|
||||
confd = str(confd).strip()
|
||||
elif os.path.isdir("%s.d" % cfgfile):
|
||||
@ -793,7 +781,7 @@ def read_conf_with_confd(cfgfile):
|
||||
|
||||
# Conf.d settings override input configuration
|
||||
confd_cfg = read_conf_d(confd)
|
||||
return mergedict(confd_cfg, cfg)
|
||||
return mergemanydict([confd_cfg, cfg])
|
||||
|
||||
|
||||
def read_cc_from_cmdline(cmdline=None):
|
||||
@ -948,7 +936,7 @@ def is_resolvable(name):
|
||||
for (_fam, _stype, _proto, cname, sockaddr) in result:
|
||||
badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
|
||||
badips.add(sockaddr[0])
|
||||
except socket.gaierror:
|
||||
except (socket.gaierror, socket.error):
|
||||
pass
|
||||
_DNS_REDIRECT_IP = badips
|
||||
if badresults:
|
||||
@ -961,7 +949,7 @@ def is_resolvable(name):
|
||||
if addr in _DNS_REDIRECT_IP:
|
||||
return False
|
||||
return True
|
||||
except socket.gaierror:
|
||||
except (socket.gaierror, socket.error):
|
||||
return False
|
||||
|
||||
|
||||
@ -1540,7 +1528,7 @@ def shellify(cmdlist, add_header=True):
|
||||
else:
|
||||
raise RuntimeError(("Unable to shellify type %s"
|
||||
" which is not a list or string")
|
||||
% (obj_name(args)))
|
||||
% (type_utils.obj_name(args)))
|
||||
LOG.debug("Shellified %s commands.", cmds_made)
|
||||
return content
|
||||
|
||||
@ -1599,7 +1587,7 @@ def get_proc_env(pid):
|
||||
fn = os.path.join("/proc/", str(pid), "environ")
|
||||
try:
|
||||
contents = load_file(fn)
|
||||
toks = contents.split("\0")
|
||||
toks = contents.split("\x00")
|
||||
for tok in toks:
|
||||
if tok == "":
|
||||
continue
|
||||
@ -1655,3 +1643,106 @@ def expand_package_list(version_fmt, pkgs):
|
||||
raise RuntimeError("Invalid package type.")
|
||||
|
||||
return pkglist
|
||||
|
||||
|
||||
def parse_mount_info(path, mountinfo_lines, log=LOG):
|
||||
"""Return the mount information for PATH given the lines from
|
||||
/proc/$$/mountinfo."""
|
||||
|
||||
path_elements = [e for e in path.split('/') if e]
|
||||
devpth = None
|
||||
fs_type = None
|
||||
match_mount_point = None
|
||||
match_mount_point_elements = None
|
||||
for i, line in enumerate(mountinfo_lines):
|
||||
parts = line.split()
|
||||
|
||||
# Completely fail if there is anything in any line that is
|
||||
# unexpected, as continuing to parse past a bad line could
|
||||
# cause an incorrect result to be returned, so it's better
|
||||
# return nothing than an incorrect result.
|
||||
|
||||
# The minimum number of elements in a valid line is 10.
|
||||
if len(parts) < 10:
|
||||
log.debug("Line %d has two few columns (%d): %s",
|
||||
i + 1, len(parts), line)
|
||||
return None
|
||||
|
||||
mount_point = parts[4]
|
||||
mount_point_elements = [e for e in mount_point.split('/') if e]
|
||||
|
||||
# Ignore mounts deeper than the path in question.
|
||||
if len(mount_point_elements) > len(path_elements):
|
||||
continue
|
||||
|
||||
# Ignore mounts where the common path is not the same.
|
||||
l = min(len(mount_point_elements), len(path_elements))
|
||||
if mount_point_elements[0:l] != path_elements[0:l]:
|
||||
continue
|
||||
|
||||
# Ignore mount points higher than an already seen mount
|
||||
# point.
|
||||
if (match_mount_point_elements is not None and
|
||||
len(match_mount_point_elements) > len(mount_point_elements)):
|
||||
continue
|
||||
|
||||
# Find the '-' which terminates a list of optional columns to
|
||||
# find the filesystem type and the path to the device. See
|
||||
# man 5 proc for the format of this file.
|
||||
try:
|
||||
i = parts.index('-')
|
||||
except ValueError:
|
||||
log.debug("Did not find column named '-' in line %d: %s",
|
||||
i + 1, line)
|
||||
return None
|
||||
|
||||
# Get the path to the device.
|
||||
try:
|
||||
fs_type = parts[i + 1]
|
||||
devpth = parts[i + 2]
|
||||
except IndexError:
|
||||
log.debug("Too few columns after '-' column in line %d: %s",
|
||||
i + 1, line)
|
||||
return None
|
||||
|
||||
match_mount_point = mount_point
|
||||
match_mount_point_elements = mount_point_elements
|
||||
|
||||
if devpth and fs_type and match_mount_point:
|
||||
return (devpth, fs_type, match_mount_point)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def get_mount_info(path, log=LOG):
|
||||
# Use /proc/$$/mountinfo to find the device where path is mounted.
|
||||
# This is done because with a btrfs filesystem using os.stat(path)
|
||||
# does not return the ID of the device.
|
||||
#
|
||||
# Here, / has a device of 18 (decimal).
|
||||
#
|
||||
# $ stat /
|
||||
# File: '/'
|
||||
# Size: 234 Blocks: 0 IO Block: 4096 directory
|
||||
# Device: 12h/18d Inode: 256 Links: 1
|
||||
# Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root)
|
||||
# Access: 2013-01-13 07:31:04.358011255 +0000
|
||||
# Modify: 2013-01-13 18:48:25.930011255 +0000
|
||||
# Change: 2013-01-13 18:48:25.930011255 +0000
|
||||
# Birth: -
|
||||
#
|
||||
# Find where / is mounted:
|
||||
#
|
||||
# $ mount | grep ' / '
|
||||
# /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo)
|
||||
#
|
||||
# And the device ID for /dev/vda1 is not 18:
|
||||
#
|
||||
# $ ls -l /dev/vda1
|
||||
# brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1
|
||||
#
|
||||
# So use /proc/$$/mountinfo to find the device underlying the
|
||||
# input path.
|
||||
mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
|
||||
lines = load_file(mountinfo_path).splitlines()
|
||||
return parse_mount_info(path, lines, log)
|
||||
|
@ -26,6 +26,7 @@ cloud_init_modules:
|
||||
- migrator
|
||||
- bootcmd
|
||||
- write-files
|
||||
- growpart
|
||||
- resizefs
|
||||
- set_hostname
|
||||
- update_hostname
|
||||
|
24
doc/examples/cloud-config-growpart.txt
Normal file
24
doc/examples/cloud-config-growpart.txt
Normal file
@ -0,0 +1,24 @@
|
||||
#cloud-config
|
||||
#
|
||||
# growpart entry is a dict, if it is not present at all
|
||||
# in config, then the default is used ({'mode': 'auto', 'devices': ['/']})
|
||||
#
|
||||
# mode:
|
||||
# values:
|
||||
# * auto: use any option possible (growpart or parted)
|
||||
# if none are available, do not warn, but debug.
|
||||
# * growpart: use growpart to grow partitions
|
||||
# if growpart is not available, this is an error.
|
||||
# * parted: use parted (parted resizepart) to resize partitions
|
||||
# if parted is not available, this is an error.
|
||||
# * off, false
|
||||
#
|
||||
# devices:
|
||||
# a list of things to resize.
|
||||
# items can be filesystem paths or devices (in /dev)
|
||||
# examples:
|
||||
# devices: [/, /dev/vdb1]
|
||||
#
|
||||
growpart:
|
||||
mode: auto
|
||||
devices: ['/']
|
188
doc/merging.rst
Normal file
188
doc/merging.rst
Normal file
@ -0,0 +1,188 @@
|
||||
Overview
|
||||
--------
|
||||
|
||||
This was done because it has been a common feature request that there be a
|
||||
way to specify how cloud-config yaml "dictionaries" are merged together when
|
||||
there are multiple yamls to merge together (say when performing an #include).
|
||||
|
||||
Since previously the merging algorithm was very simple and would only overwrite
|
||||
and not append lists, or strings, and so on it was decided to create a new and
|
||||
improved way to merge dictionaries (and there contained objects) together in a
|
||||
way that is customizable, thus allowing for users who provide cloud-config data
|
||||
to determine exactly how there objects will be merged.
|
||||
|
||||
For example.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
#cloud-config (1)
|
||||
run_cmd:
|
||||
- bash1
|
||||
- bash2
|
||||
|
||||
#cloud-config (2)
|
||||
run_cmd:
|
||||
- bash3
|
||||
- bash4
|
||||
|
||||
The previous way of merging the following 2 objects would result in a final
|
||||
cloud-config object that contains the following.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
#cloud-config (merged)
|
||||
run_cmd:
|
||||
- bash3
|
||||
- bash4
|
||||
|
||||
Typically this is not what users want, instead they would likely prefer:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
#cloud-config (merged)
|
||||
run_cmd:
|
||||
- bash1
|
||||
- bash2
|
||||
- bash3
|
||||
- bash4
|
||||
|
||||
This way makes it easier to combine the various cloud-config objects you have
|
||||
into a more useful list, thus reducing duplication that would have had to
|
||||
occur in the previous method to accomplish the same result.
|
||||
|
||||
Customizability
|
||||
---------------
|
||||
|
||||
Since the above merging algorithm may not always be the desired merging
|
||||
algorithm (like how the previous merging algorithm was not always the preferred
|
||||
one) the concept of customizing how merging can be done was introduced through
|
||||
a new concept call 'merge classes'.
|
||||
|
||||
A merge class is a class defintion which provides functions that can be used
|
||||
to merge a given type with another given type.
|
||||
|
||||
An example of one of these merging classes is the following:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Merger(object):
|
||||
def __init__(self, merger, opts):
|
||||
self._merger = merger
|
||||
self._overwrite = 'overwrite' in opts
|
||||
|
||||
# This merging algorithm will attempt to merge with
|
||||
# another dictionary, on encountering any other type of object
|
||||
# it will not merge with said object, but will instead return
|
||||
# the original value
|
||||
#
|
||||
# On encountering a dictionary, it will create a new dictionary
|
||||
# composed of the original and the one to merge with, if 'overwrite'
|
||||
# is enabled then keys that exist in the original will be overwritten
|
||||
# by keys in the one to merge with (and associated values). Otherwise
|
||||
# if not in overwrite mode the 2 conflicting keys themselves will
|
||||
# be merged.
|
||||
def _on_dict(self, value, merge_with):
|
||||
if not isinstance(merge_with, (dict)):
|
||||
return value
|
||||
merged = dict(value)
|
||||
for (k, v) in merge_with.items():
|
||||
if k in merged:
|
||||
if not self._overwrite:
|
||||
merged[k] = self._merger.merge(merged[k], v)
|
||||
else:
|
||||
merged[k] = v
|
||||
else:
|
||||
merged[k] = v
|
||||
return merged
|
||||
|
||||
As you can see there is a '_on_dict' method here that will be given a source value
|
||||
and a value to merge with. The result will be the merged object. This code itself
|
||||
is called by another merging class which 'directs' the merging to happen by
|
||||
analyzing the types of the objects to merge and attempting to find a know object
|
||||
that will merge that type. I will avoid pasting that here, but it can be found
|
||||
in the `mergers/__init__.py` file (see `LookupMerger` and `UnknownMerger`).
|
||||
|
||||
So following the typical cloud-init way of allowing source code to be downloaded
|
||||
and used dynamically, it is possible for users to inject there own merging files
|
||||
to handle specific types of merging as they choose (the basic ones included will
|
||||
handle lists, dicts, and strings). Note how each merge can have options associated
|
||||
with it which affect how the merging is performed, for example a dictionary merger
|
||||
can be told to overwrite instead of attempt to merge, or a string merger can be
|
||||
told to append strings instead of discarding other strings to merge with.
|
||||
|
||||
How to activate
|
||||
---------------
|
||||
|
||||
There are a few ways to activate the merging algorithms, and to customize them
|
||||
for your own usage.
|
||||
|
||||
1. The first way involves the usage of MIME messages in cloud-init to specify
|
||||
multipart documents (this is one way in which multiple cloud-config is joined
|
||||
together into a single cloud-config). Two new headers are looked for, both
|
||||
of which can define the way merging is done (the first header to exist wins).
|
||||
These new headers (in lookup order) are 'Merge-Type' and 'X-Merge-Type'. The value
|
||||
should be a string which will satisfy the new merging format defintion (see
|
||||
below for this format).
|
||||
2. The second way is actually specifying the merge-type in the body of the
|
||||
cloud-config dictionary. There are 2 ways to specify this, either as a string
|
||||
or as a dictionary (see format below). The keys that are looked up for this
|
||||
definition are the following (in order), 'merge_how', 'merge_type'.
|
||||
|
||||
String format
|
||||
********
|
||||
|
||||
The string format that is expected is the following.
|
||||
|
||||
::
|
||||
|
||||
classname1(option1,option2)+classname2(option3,option4)....
|
||||
|
||||
The class name there will be connected to class names used when looking for the
|
||||
class that can be used to merge and options provided will be given to the class
|
||||
on construction of that class.
|
||||
|
||||
For example, the default string that is used when none is provided is the following:
|
||||
|
||||
::
|
||||
|
||||
list(extend)+dict()+str(append)
|
||||
|
||||
Dictionary format
|
||||
********
|
||||
|
||||
In cases where a dictionary can be used to specify the same information as the
|
||||
string format (ie option #2 of above) it can be used, for example.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{'merge_how': [{'name': 'list', 'settings': ['extend']},
|
||||
{'name': 'dict', 'settings': []},
|
||||
{'name': 'str', 'settings': ['append']}]}
|
||||
|
||||
This would be the equivalent format for default string format but in dictionary
|
||||
form instead of string form.
|
||||
|
||||
Specifying multiple types and its effect
|
||||
----------------------------------------
|
||||
|
||||
Now you may be asking yourself, if I specify a merge-type header or dictionary
|
||||
for every cloud-config that I provide, what exactly happens?
|
||||
|
||||
The answer is that when merging, a stack of 'merging classes' is kept, the
|
||||
first one on that stack is the default merging classes, this set of mergers
|
||||
will be used when the first cloud-config is merged with the initial empty
|
||||
cloud-config dictionary. If the cloud-config that was just merged provided a
|
||||
set of merging classes (via the above formats) then those merging classes will
|
||||
be pushed onto the stack. Now if there is a second cloud-config to be merged then
|
||||
the merging classes from the cloud-config before the first will be used (not the
|
||||
default) and so on. This way a cloud-config can decide how it will merge with a
|
||||
cloud-config dictionary coming after it.
|
||||
|
||||
Other uses
|
||||
----------
|
||||
|
||||
The default merging algorithm for merging 'conf.d' yaml files (which form a initial
|
||||
yaml config for cloud-init) was also changed to use this mechanism so its full
|
||||
benefits (and customization) can also be used there as well. Other places that
|
||||
used the previous merging are also similar now extensible (metadata merging for
|
||||
example).
|
@ -17,13 +17,13 @@ from cloudinit import version
|
||||
# General information about the project.
|
||||
project = 'Cloud-Init'
|
||||
|
||||
# -- General configuration -----------------------------------------------------
|
||||
# -- General configuration ----------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = [
|
||||
'sphinx.ext.intersphinx',
|
||||
]
|
||||
@ -55,7 +55,7 @@ exclude_patterns = []
|
||||
# output. They are ignored by default.
|
||||
show_authors = False
|
||||
|
||||
# -- Options for HTML output ---------------------------------------------------
|
||||
# -- Options for HTML output --------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
|
@ -24,6 +24,7 @@ Summary
|
||||
topics/examples
|
||||
topics/datasources
|
||||
topics/modules
|
||||
topics/merging
|
||||
topics/moreinfo
|
||||
topics/hacking
|
||||
|
||||
|
5
doc/rtd/topics/merging.rst
Normal file
5
doc/rtd/topics/merging.rst
Normal file
@ -0,0 +1,5 @@
|
||||
=========
|
||||
Merging
|
||||
=========
|
||||
|
||||
.. include:: ../../merging.rst
|
@ -175,6 +175,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
|
||||
def patchOS(self, new_root):
|
||||
patch_funcs = {
|
||||
os.path: ['isfile', 'exists', 'islink', 'isdir'],
|
||||
os: ['listdir'],
|
||||
}
|
||||
for (mod, funcs) in patch_funcs.items():
|
||||
for f in funcs:
|
||||
@ -183,6 +184,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
|
||||
setattr(mod, f, trap_func)
|
||||
self.patched_funcs.append((mod, f, func))
|
||||
|
||||
|
||||
def populate_dir(path, files):
|
||||
os.makedirs(path)
|
||||
for (name, content) in files.iteritems():
|
||||
|
@ -22,7 +22,8 @@ class FakeModule(handlers.Handler):
|
||||
def list_types(self):
|
||||
return self.types
|
||||
|
||||
def _handle_part(self, data, ctype, filename, payload, frequency):
|
||||
def handle_part(self, _data, ctype, filename, # pylint: disable=W0221
|
||||
payload, frequency):
|
||||
pass
|
||||
|
||||
|
||||
@ -103,6 +104,9 @@ class TestHandlerHandlePart(MockerTestCase):
|
||||
self.filename = "fake filename"
|
||||
self.payload = "fake payload"
|
||||
self.frequency = settings.PER_INSTANCE
|
||||
self.headers = {
|
||||
'Content-Type': self.ctype,
|
||||
}
|
||||
|
||||
def test_normal_version_1(self):
|
||||
"""
|
||||
@ -118,8 +122,8 @@ class TestHandlerHandlePart(MockerTestCase):
|
||||
self.payload)
|
||||
self.mocker.replay()
|
||||
|
||||
handlers.run_part(mod_mock, self.data, self.ctype, self.filename,
|
||||
self.payload, self.frequency)
|
||||
handlers.run_part(mod_mock, self.data, self.filename,
|
||||
self.payload, self.frequency, self.headers)
|
||||
|
||||
def test_normal_version_2(self):
|
||||
"""
|
||||
@ -135,8 +139,8 @@ class TestHandlerHandlePart(MockerTestCase):
|
||||
self.payload, self.frequency)
|
||||
self.mocker.replay()
|
||||
|
||||
handlers.run_part(mod_mock, self.data, self.ctype, self.filename,
|
||||
self.payload, self.frequency)
|
||||
handlers.run_part(mod_mock, self.data, self.filename,
|
||||
self.payload, self.frequency, self.headers)
|
||||
|
||||
def test_modfreq_per_always(self):
|
||||
"""
|
||||
@ -152,8 +156,8 @@ class TestHandlerHandlePart(MockerTestCase):
|
||||
self.payload)
|
||||
self.mocker.replay()
|
||||
|
||||
handlers.run_part(mod_mock, self.data, self.ctype, self.filename,
|
||||
self.payload, self.frequency)
|
||||
handlers.run_part(mod_mock, self.data, self.filename,
|
||||
self.payload, self.frequency, self.headers)
|
||||
|
||||
def test_no_handle_when_modfreq_once(self):
|
||||
"""C{handle_part} is not called if frequency is once."""
|
||||
@ -163,8 +167,8 @@ class TestHandlerHandlePart(MockerTestCase):
|
||||
self.mocker.result(settings.PER_ONCE)
|
||||
self.mocker.replay()
|
||||
|
||||
handlers.run_part(mod_mock, self.data, self.ctype, self.filename,
|
||||
self.payload, self.frequency)
|
||||
handlers.run_part(mod_mock, self.data, self.filename,
|
||||
self.payload, self.frequency, self.headers)
|
||||
|
||||
def test_exception_is_caught(self):
|
||||
"""Exceptions within C{handle_part} are caught and logged."""
|
||||
@ -178,8 +182,8 @@ class TestHandlerHandlePart(MockerTestCase):
|
||||
self.mocker.throw(Exception())
|
||||
self.mocker.replay()
|
||||
|
||||
handlers.run_part(mod_mock, self.data, self.ctype, self.filename,
|
||||
self.payload, self.frequency)
|
||||
handlers.run_part(mod_mock, self.data, self.filename,
|
||||
self.payload, self.frequency, self.headers)
|
||||
|
||||
|
||||
class TestCmdlineUrl(MockerTestCase):
|
||||
|
@ -1,8 +1,9 @@
|
||||
"""Tests of the built-in user data handlers."""
|
||||
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from mocker import MockerTestCase
|
||||
from tests.unittests import helpers as test_helpers
|
||||
|
||||
from cloudinit import handlers
|
||||
from cloudinit import helpers
|
||||
@ -13,7 +14,7 @@ from cloudinit.handlers import upstart_job
|
||||
from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE)
|
||||
|
||||
|
||||
class TestBuiltins(MockerTestCase):
|
||||
class TestBuiltins(test_helpers.FilesystemMockingTestCase):
|
||||
|
||||
def test_upstart_frequency_no_out(self):
|
||||
c_root = self.makeDir()
|
||||
@ -34,16 +35,21 @@ class TestBuiltins(MockerTestCase):
|
||||
None, None, None)
|
||||
self.assertEquals(0, len(os.listdir(up_root)))
|
||||
|
||||
@unittest.skip("until LP: #1124384 fixed")
|
||||
def test_upstart_frequency_single(self):
|
||||
# files should be written out when frequency is ! per-instance
|
||||
c_root = self.makeDir()
|
||||
up_root = self.makeDir()
|
||||
paths = helpers.Paths({
|
||||
'cloud_dir': c_root,
|
||||
'upstart_dir': up_root,
|
||||
})
|
||||
new_root = self.makeDir()
|
||||
freq = PER_INSTANCE
|
||||
|
||||
self.patchOS(new_root)
|
||||
self.patchUtils(new_root)
|
||||
paths = helpers.Paths({
|
||||
'upstart_dir': "/etc/upstart",
|
||||
})
|
||||
|
||||
util.ensure_dir("/run")
|
||||
util.ensure_dir("/etc/upstart")
|
||||
|
||||
mock_subp = self.mocker.replace(util.subp, passthrough=False)
|
||||
mock_subp(["initctl", "reload-configuration"], capture=False)
|
||||
self.mocker.replay()
|
||||
@ -55,4 +61,5 @@ class TestBuiltins(MockerTestCase):
|
||||
'test.conf', 'blah', freq)
|
||||
h.handle_part('', handlers.CONTENT_END,
|
||||
None, None, None)
|
||||
self.assertEquals(1, len(os.listdir(up_root)))
|
||||
|
||||
self.assertEquals(1, len(os.listdir('/etc/upstart')))
|
||||
|
@ -1,7 +1,7 @@
|
||||
from cloudinit import helpers
|
||||
from tests.unittests.helpers import populate_dir
|
||||
from cloudinit.sources import DataSourceNoCloud
|
||||
from cloudinit import util
|
||||
from tests.unittests.helpers import populate_dir
|
||||
|
||||
from mocker import MockerTestCase
|
||||
import os
|
||||
|
255
tests/unittests/test_handler/test_handler_growpart.py
Normal file
255
tests/unittests/test_handler/test_handler_growpart.py
Normal file
@ -0,0 +1,255 @@
|
||||
from mocker import MockerTestCase
|
||||
|
||||
from cloudinit import cloud
|
||||
from cloudinit import util
|
||||
|
||||
from cloudinit.config import cc_growpart
|
||||
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
# growpart:
|
||||
# mode: auto # off, on, auto, 'growpart', 'parted'
|
||||
# devices: ['root']
|
||||
|
||||
HELP_PARTED_NO_RESIZE = """
|
||||
Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...]
|
||||
Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in
|
||||
interactive mode.
|
||||
|
||||
OPTIONs:
|
||||
<SNIP>
|
||||
|
||||
COMMANDs:
|
||||
<SNIP>
|
||||
quit exit program
|
||||
rescue START END rescue a lost partition near START
|
||||
and END
|
||||
resize NUMBER START END resize partition NUMBER and its file
|
||||
system
|
||||
rm NUMBER delete partition NUMBER
|
||||
<SNIP>
|
||||
Report bugs to bug-parted@gnu.org
|
||||
"""
|
||||
|
||||
HELP_PARTED_RESIZE = """
|
||||
Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...]
|
||||
Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in
|
||||
interactive mode.
|
||||
|
||||
OPTIONs:
|
||||
<SNIP>
|
||||
|
||||
COMMANDs:
|
||||
<SNIP>
|
||||
quit exit program
|
||||
rescue START END rescue a lost partition near START
|
||||
and END
|
||||
resize NUMBER START END resize partition NUMBER and its file
|
||||
system
|
||||
resizepart NUMBER END resize partition NUMBER
|
||||
rm NUMBER delete partition NUMBER
|
||||
<SNIP>
|
||||
Report bugs to bug-parted@gnu.org
|
||||
"""
|
||||
|
||||
HELP_GROWPART_RESIZE = """
|
||||
growpart disk partition
|
||||
rewrite partition table so that partition takes up all the space it can
|
||||
options:
|
||||
-h | --help print Usage and exit
|
||||
<SNIP>
|
||||
-u | --update R update the the kernel partition table info after growing
|
||||
this requires kernel support and 'partx --update'
|
||||
R is one of:
|
||||
- 'auto' : [default] update partition if possible
|
||||
<SNIP>
|
||||
Example:
|
||||
- growpart /dev/sda 1
|
||||
Resize partition 1 on /dev/sda
|
||||
"""
|
||||
|
||||
HELP_GROWPART_NO_RESIZE = """
|
||||
growpart disk partition
|
||||
rewrite partition table so that partition takes up all the space it can
|
||||
options:
|
||||
-h | --help print Usage and exit
|
||||
<SNIP>
|
||||
Example:
|
||||
- growpart /dev/sda 1
|
||||
Resize partition 1 on /dev/sda
|
||||
"""
|
||||
|
||||
|
||||
class TestDisabled(MockerTestCase):
|
||||
def setUp(self):
|
||||
super(TestDisabled, self).setUp()
|
||||
self.name = "growpart"
|
||||
self.cloud_init = None
|
||||
self.log = logging.getLogger("TestDisabled")
|
||||
self.args = []
|
||||
|
||||
self.handle = cc_growpart.handle
|
||||
|
||||
def test_mode_off(self):
|
||||
#Test that nothing is done if mode is off.
|
||||
|
||||
# this really only verifies that resizer_factory isn't called
|
||||
config = {'growpart': {'mode': 'off'}}
|
||||
self.mocker.replace(cc_growpart.resizer_factory,
|
||||
passthrough=False)
|
||||
self.mocker.replay()
|
||||
|
||||
self.handle(self.name, config, self.cloud_init, self.log, self.args)
|
||||
|
||||
|
||||
class TestConfig(MockerTestCase):
|
||||
def setUp(self):
|
||||
super(TestConfig, self).setUp()
|
||||
self.name = "growpart"
|
||||
self.paths = None
|
||||
self.cloud = cloud.Cloud(None, self.paths, None, None, None)
|
||||
self.log = logging.getLogger("TestConfig")
|
||||
self.args = []
|
||||
os.environ = {}
|
||||
|
||||
self.cloud_init = None
|
||||
self.handle = cc_growpart.handle
|
||||
|
||||
# Order must be correct
|
||||
self.mocker.order()
|
||||
|
||||
def test_no_resizers_auto_is_fine(self):
|
||||
subp = self.mocker.replace(util.subp, passthrough=False)
|
||||
subp(['parted', '--help'], env={'LANG': 'C'})
|
||||
self.mocker.result((HELP_PARTED_NO_RESIZE, ""))
|
||||
subp(['growpart', '--help'], env={'LANG': 'C'})
|
||||
self.mocker.result((HELP_GROWPART_NO_RESIZE, ""))
|
||||
self.mocker.replay()
|
||||
|
||||
config = {'growpart': {'mode': 'auto'}}
|
||||
self.handle(self.name, config, self.cloud_init, self.log, self.args)
|
||||
|
||||
def test_no_resizers_mode_growpart_is_exception(self):
|
||||
subp = self.mocker.replace(util.subp, passthrough=False)
|
||||
subp(['growpart', '--help'], env={'LANG': 'C'})
|
||||
self.mocker.result((HELP_GROWPART_NO_RESIZE, ""))
|
||||
self.mocker.replay()
|
||||
|
||||
config = {'growpart': {'mode': "growpart"}}
|
||||
self.assertRaises(ValueError, self.handle, self.name, config,
|
||||
self.cloud_init, self.log, self.args)
|
||||
|
||||
def test_mode_auto_prefers_parted(self):
|
||||
subp = self.mocker.replace(util.subp, passthrough=False)
|
||||
subp(['parted', '--help'], env={'LANG': 'C'})
|
||||
self.mocker.result((HELP_PARTED_RESIZE, ""))
|
||||
self.mocker.replay()
|
||||
|
||||
ret = cc_growpart.resizer_factory(mode="auto")
|
||||
self.assertTrue(isinstance(ret, cc_growpart.ResizeParted))
|
||||
|
||||
def test_handle_with_no_growpart_entry(self):
|
||||
#if no 'growpart' entry in config, then mode=auto should be used
|
||||
|
||||
myresizer = object()
|
||||
|
||||
factory = self.mocker.replace(cc_growpart.resizer_factory,
|
||||
passthrough=False)
|
||||
rsdevs = self.mocker.replace(cc_growpart.resize_devices,
|
||||
passthrough=False)
|
||||
factory("auto")
|
||||
self.mocker.result(myresizer)
|
||||
rsdevs(myresizer, ["/"])
|
||||
self.mocker.result((("/", cc_growpart.RESIZE.CHANGED, "my-message",),))
|
||||
self.mocker.replay()
|
||||
|
||||
try:
|
||||
orig_resizers = cc_growpart.RESIZERS
|
||||
cc_growpart.RESIZERS = (('mysizer', object),)
|
||||
self.handle(self.name, {}, self.cloud_init, self.log, self.args)
|
||||
finally:
|
||||
cc_growpart.RESIZERS = orig_resizers
|
||||
|
||||
|
||||
class TestResize(MockerTestCase):
|
||||
def setUp(self):
|
||||
super(TestResize, self).setUp()
|
||||
self.name = "growpart"
|
||||
self.log = logging.getLogger("TestResize")
|
||||
|
||||
# Order must be correct
|
||||
self.mocker.order()
|
||||
|
||||
def test_simple_devices(self):
|
||||
#test simple device list
|
||||
# this patches out devent2dev, os.stat, and device_part_info
|
||||
# so in the end, doesn't test a lot
|
||||
devs = ["/dev/XXda1", "/dev/YYda2"]
|
||||
devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5L,
|
||||
st_nlink=1, st_uid=0, st_gid=6, st_size=0,
|
||||
st_atime=0, st_mtime=0, st_ctime=0)
|
||||
enoent = ["/dev/NOENT"]
|
||||
real_stat = os.stat
|
||||
resize_calls = []
|
||||
|
||||
class myresizer(object):
|
||||
def resize(self, diskdev, partnum, partdev):
|
||||
resize_calls.append((diskdev, partnum, partdev))
|
||||
if partdev == "/dev/YYda2":
|
||||
return (1024, 2048)
|
||||
return (1024, 1024) # old size, new size
|
||||
|
||||
def mystat(path):
|
||||
if path in devs:
|
||||
return devstat_ret
|
||||
if path in enoent:
|
||||
e = OSError("%s: does not exist" % path)
|
||||
e.errno = errno.ENOENT
|
||||
raise e
|
||||
return real_stat(path)
|
||||
|
||||
try:
|
||||
opinfo = cc_growpart.device_part_info
|
||||
cc_growpart.device_part_info = simple_device_part_info
|
||||
os.stat = mystat
|
||||
|
||||
resized = cc_growpart.resize_devices(myresizer(), devs + enoent)
|
||||
|
||||
def find(name, res):
|
||||
for f in res:
|
||||
if f[0] == name:
|
||||
return f
|
||||
return None
|
||||
|
||||
self.assertEqual(cc_growpart.RESIZE.NOCHANGE,
|
||||
find("/dev/XXda1", resized)[1])
|
||||
self.assertEqual(cc_growpart.RESIZE.CHANGED,
|
||||
find("/dev/YYda2", resized)[1])
|
||||
self.assertEqual(cc_growpart.RESIZE.SKIPPED,
|
||||
find(enoent[0], resized)[1])
|
||||
#self.assertEqual(resize_calls,
|
||||
#[("/dev/XXda", "1", "/dev/XXda1"),
|
||||
#("/dev/YYda", "2", "/dev/YYda2")])
|
||||
finally:
|
||||
cc_growpart.device_part_info = opinfo
|
||||
os.stat = real_stat
|
||||
|
||||
|
||||
def simple_device_part_info(devpath):
|
||||
# simple stupid return (/dev/vda, 1) for /dev/vda
|
||||
ret = re.search("([^0-9]*)([0-9]*)$", devpath)
|
||||
x = (ret.group(1), ret.group(2))
|
||||
return x
|
||||
|
||||
|
||||
class Bunch:
|
||||
st_mode = None # fix pylint complaint
|
||||
|
||||
def __init__(self, **kwds):
|
||||
self.__dict__.update(kwds)
|
||||
|
||||
|
||||
# vi: ts=4 expandtab
|
@ -1,62 +1,142 @@
|
||||
from mocker import MockerTestCase
|
||||
from tests.unittests import helpers
|
||||
|
||||
from cloudinit import util
|
||||
from cloudinit import mergers
|
||||
|
||||
|
||||
class TestMergeDict(MockerTestCase):
|
||||
def test_simple_merge(self):
|
||||
"""Test simple non-conflict merge."""
|
||||
source = {"key1": "value1"}
|
||||
candidate = {"key2": "value2"}
|
||||
result = util.mergedict(source, candidate)
|
||||
self.assertEqual({"key1": "value1", "key2": "value2"}, result)
|
||||
class TestSimpleRun(helpers.MockerTestCase):
|
||||
def test_basic_merge(self):
|
||||
source = {
|
||||
'Blah': ['blah2'],
|
||||
'Blah3': 'c',
|
||||
}
|
||||
merge_with = {
|
||||
'Blah2': ['blah3'],
|
||||
'Blah3': 'b',
|
||||
'Blah': ['123'],
|
||||
}
|
||||
# Basic merge should not do thing special
|
||||
merge_how = "list()+dict()+str()"
|
||||
merger_set = mergers.string_extract_mergers(merge_how)
|
||||
self.assertEquals(3, len(merger_set))
|
||||
merger = mergers.construct(merger_set)
|
||||
merged = merger.merge(source, merge_with)
|
||||
self.assertEquals(merged['Blah'], ['blah2'])
|
||||
self.assertEquals(merged['Blah2'], ['blah3'])
|
||||
self.assertEquals(merged['Blah3'], 'c')
|
||||
|
||||
def test_nested_merge(self):
|
||||
"""Test nested merge."""
|
||||
source = {"key1": {"key1.1": "value1.1"}}
|
||||
candidate = {"key1": {"key1.2": "value1.2"}}
|
||||
result = util.mergedict(source, candidate)
|
||||
self.assertEqual(
|
||||
{"key1": {"key1.1": "value1.1", "key1.2": "value1.2"}}, result)
|
||||
def test_dict_overwrite(self):
|
||||
source = {
|
||||
'Blah': ['blah2'],
|
||||
}
|
||||
merge_with = {
|
||||
'Blah': ['123'],
|
||||
}
|
||||
# Now lets try a dict overwrite
|
||||
merge_how = "list()+dict(overwrite)+str()"
|
||||
merger_set = mergers.string_extract_mergers(merge_how)
|
||||
self.assertEquals(3, len(merger_set))
|
||||
merger = mergers.construct(merger_set)
|
||||
merged = merger.merge(source, merge_with)
|
||||
self.assertEquals(merged['Blah'], ['123'])
|
||||
|
||||
def test_merge_does_not_override(self):
|
||||
"""Test that candidate doesn't override source."""
|
||||
source = {"key1": "value1", "key2": "value2"}
|
||||
candidate = {"key1": "value2", "key2": "NEW VALUE"}
|
||||
result = util.mergedict(source, candidate)
|
||||
self.assertEqual(source, result)
|
||||
def test_string_append(self):
|
||||
source = {
|
||||
'Blah': 'blah2',
|
||||
}
|
||||
merge_with = {
|
||||
'Blah': '345',
|
||||
}
|
||||
merge_how = "list()+dict()+str(append)"
|
||||
merger_set = mergers.string_extract_mergers(merge_how)
|
||||
self.assertEquals(3, len(merger_set))
|
||||
merger = mergers.construct(merger_set)
|
||||
merged = merger.merge(source, merge_with)
|
||||
self.assertEquals(merged['Blah'], 'blah2345')
|
||||
|
||||
def test_empty_candidate(self):
|
||||
"""Test empty candidate doesn't change source."""
|
||||
source = {"key": "value"}
|
||||
candidate = {}
|
||||
result = util.mergedict(source, candidate)
|
||||
self.assertEqual(source, result)
|
||||
def test_list_extend(self):
|
||||
source = ['abc']
|
||||
merge_with = ['123']
|
||||
merge_how = "list(extend)+dict()+str()"
|
||||
merger_set = mergers.string_extract_mergers(merge_how)
|
||||
self.assertEquals(3, len(merger_set))
|
||||
merger = mergers.construct(merger_set)
|
||||
merged = merger.merge(source, merge_with)
|
||||
self.assertEquals(merged, ['abc', '123'])
|
||||
|
||||
def test_empty_source(self):
|
||||
"""Test empty source is replaced by candidate."""
|
||||
source = {}
|
||||
candidate = {"key": "value"}
|
||||
result = util.mergedict(source, candidate)
|
||||
self.assertEqual(candidate, result)
|
||||
def test_deep_merge(self):
|
||||
source = {
|
||||
'a': [1, 'b', 2],
|
||||
'b': 'blahblah',
|
||||
'c': {
|
||||
'e': [1, 2, 3],
|
||||
'f': 'bigblobof',
|
||||
'iamadict': {
|
||||
'ok': 'ok',
|
||||
}
|
||||
},
|
||||
'run': [
|
||||
'runme',
|
||||
'runme2',
|
||||
],
|
||||
'runmereally': [
|
||||
'e', ['a'], 'd',
|
||||
],
|
||||
}
|
||||
merge_with = {
|
||||
'a': ['e', 'f', 'g'],
|
||||
'b': 'more',
|
||||
'c': {
|
||||
'a': 'b',
|
||||
'f': 'stuff',
|
||||
},
|
||||
'run': [
|
||||
'morecmd',
|
||||
'moremoremore',
|
||||
],
|
||||
'runmereally': [
|
||||
'blah', ['b'], 'e',
|
||||
],
|
||||
}
|
||||
merge_how = "list(extend)+dict()+str(append)"
|
||||
merger_set = mergers.string_extract_mergers(merge_how)
|
||||
self.assertEquals(3, len(merger_set))
|
||||
merger = mergers.construct(merger_set)
|
||||
merged = merger.merge(source, merge_with)
|
||||
self.assertEquals(merged['a'], [1, 'b', 2, 'e', 'f', 'g'])
|
||||
self.assertEquals(merged['b'], 'blahblahmore')
|
||||
self.assertEquals(merged['c']['f'], 'bigblobofstuff')
|
||||
self.assertEquals(merged['run'], ['runme', 'runme2', 'morecmd',
|
||||
'moremoremore'])
|
||||
self.assertEquals(merged['runmereally'], ['e', ['a'], 'd', 'blah',
|
||||
['b'], 'e'])
|
||||
|
||||
def test_non_dict_candidate(self):
|
||||
"""Test non-dict candidate is discarded."""
|
||||
source = {"key": "value"}
|
||||
candidate = "not a dict"
|
||||
result = util.mergedict(source, candidate)
|
||||
self.assertEqual(source, result)
|
||||
|
||||
def test_non_dict_source(self):
|
||||
"""Test non-dict source is not modified with a dict candidate."""
|
||||
source = "not a dict"
|
||||
candidate = {"key": "value"}
|
||||
result = util.mergedict(source, candidate)
|
||||
self.assertEqual(source, result)
|
||||
|
||||
def test_neither_dict(self):
|
||||
"""Test if neither candidate or source is dict source wins."""
|
||||
source = "source"
|
||||
candidate = "candidate"
|
||||
result = util.mergedict(source, candidate)
|
||||
self.assertEqual(source, result)
|
||||
def test_dict_overwrite_layered(self):
|
||||
source = {
|
||||
'Blah3': {
|
||||
'f': '3',
|
||||
'g': {
|
||||
'a': 'b',
|
||||
}
|
||||
}
|
||||
}
|
||||
merge_with = {
|
||||
'Blah3': {
|
||||
'e': '2',
|
||||
'g': {
|
||||
'e': 'f',
|
||||
}
|
||||
}
|
||||
}
|
||||
merge_how = "list()+dict()+str()"
|
||||
merger_set = mergers.string_extract_mergers(merge_how)
|
||||
self.assertEquals(3, len(merger_set))
|
||||
merger = mergers.construct(merger_set)
|
||||
merged = merger.merge(source, merge_with)
|
||||
self.assertEquals(merged['Blah3'], {
|
||||
'e': '2',
|
||||
'f': '3',
|
||||
'g': {
|
||||
'a': 'b',
|
||||
'e': 'f',
|
||||
}
|
||||
})
|
||||
|
101
tests/unittests/test_sshutil.py
Normal file
101
tests/unittests/test_sshutil.py
Normal file
@ -0,0 +1,101 @@
|
||||
from cloudinit import ssh_util
|
||||
from unittest import TestCase
|
||||
|
||||
|
||||
VALID_CONTENT = {
|
||||
'dsa': (
|
||||
"AAAAB3NzaC1kc3MAAACBAIrjOQSlSea19bExXBMBKBvcLhBoVvNBjCppNzllipF"
|
||||
"W4jgIOMcNanULRrZGjkOKat6MWJNetSbV1E6IOFDQ16rQgsh/OvYU9XhzM8seLa"
|
||||
"A21VszZuhIV7/2DE3vxu7B54zVzueG1O1Deq6goQCRGWBUnqO2yluJiG4HzrnDa"
|
||||
"jzRAAAAFQDMPO96qXd4F5A+5b2f2MO7SpVomQAAAIBpC3K2zIbDLqBBs1fn7rsv"
|
||||
"KcJvwihdlVjG7UXsDB76P2GNqVG+IlYPpJZ8TO/B/fzTMtrdXp9pSm9OY1+BgN4"
|
||||
"REsZ2WNcvfgY33aWaEM+ieCcQigvxrNAF2FTVcbUIIxAn6SmHuQSWrLSfdHc8H7"
|
||||
"hsrgeUPPdzjBD/cv2ZmqwZ1AAAAIAplIsScrJut5wJMgyK1JG0Kbw9JYQpLe95P"
|
||||
"obB069g8+mYR8U0fysmTEdR44mMu0VNU5E5OhTYoTGfXrVrkR134LqFM2zpVVbE"
|
||||
"JNDnIqDHxTkc6LY2vu8Y2pQ3/bVnllZZOda2oD5HQ7ovygQa6CH+fbaZHbdDUX/"
|
||||
"5z7u2rVAlDw=="
|
||||
),
|
||||
'ecdsa': (
|
||||
"AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBITrGBB3cgJ"
|
||||
"J7fPxvtMW9H3oRisNpJ3OAslxZeyP7I0A9BPAW0RQIwHVtVnM7zrp4nI+JLZov/"
|
||||
"Ql7lc2leWL7CY="
|
||||
),
|
||||
'rsa': (
|
||||
"AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5oz"
|
||||
"emNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbD"
|
||||
"c1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q"
|
||||
"7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhT"
|
||||
"YWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07"
|
||||
"/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw=="
|
||||
),
|
||||
}
|
||||
|
||||
TEST_OPTIONS = ("no-port-forwarding,no-agent-forwarding,no-X11-forwarding,"
|
||||
'command="echo \'Please login as the user \"ubuntu\" rather than the'
|
||||
'user \"root\".\';echo;sleep 10"')
|
||||
|
||||
|
||||
class TestAuthKeyLineParser(TestCase):
|
||||
def test_simple_parse(self):
|
||||
# test key line with common 3 fields (keytype, base64, comment)
|
||||
parser = ssh_util.AuthKeyLineParser()
|
||||
for ktype in ['rsa', 'ecdsa', 'dsa']:
|
||||
content = VALID_CONTENT[ktype]
|
||||
comment = 'user-%s@host' % ktype
|
||||
line = ' '.join((ktype, content, comment,))
|
||||
key = parser.parse(line)
|
||||
|
||||
self.assertEqual(key.base64, content)
|
||||
self.assertFalse(key.options)
|
||||
self.assertEqual(key.comment, comment)
|
||||
self.assertEqual(key.keytype, ktype)
|
||||
|
||||
def test_parse_no_comment(self):
|
||||
# test key line with key type and base64 only
|
||||
parser = ssh_util.AuthKeyLineParser()
|
||||
for ktype in ['rsa', 'ecdsa', 'dsa']:
|
||||
content = VALID_CONTENT[ktype]
|
||||
line = ' '.join((ktype, content,))
|
||||
key = parser.parse(line)
|
||||
|
||||
self.assertEqual(key.base64, content)
|
||||
self.assertFalse(key.options)
|
||||
self.assertFalse(key.comment)
|
||||
self.assertEqual(key.keytype, ktype)
|
||||
|
||||
def test_parse_with_keyoptions(self):
|
||||
# test key line with options in it
|
||||
parser = ssh_util.AuthKeyLineParser()
|
||||
options = TEST_OPTIONS
|
||||
for ktype in ['rsa', 'ecdsa', 'dsa']:
|
||||
content = VALID_CONTENT[ktype]
|
||||
comment = 'user-%s@host' % ktype
|
||||
line = ' '.join((options, ktype, content, comment,))
|
||||
key = parser.parse(line)
|
||||
|
||||
self.assertEqual(key.base64, content)
|
||||
self.assertEqual(key.options, options)
|
||||
self.assertEqual(key.comment, comment)
|
||||
self.assertEqual(key.keytype, ktype)
|
||||
|
||||
def test_parse_with_options_passed_in(self):
|
||||
# test key line with key type and base64 only
|
||||
parser = ssh_util.AuthKeyLineParser()
|
||||
|
||||
baseline = ' '.join(("rsa", VALID_CONTENT['rsa'], "user@host"))
|
||||
myopts = "no-port-forwarding,no-agent-forwarding"
|
||||
|
||||
key = parser.parse("allowedopt" + " " + baseline)
|
||||
self.assertEqual(key.options, "allowedopt")
|
||||
|
||||
key = parser.parse("overridden_opt " + baseline, options=myopts)
|
||||
self.assertEqual(key.options, myopts)
|
||||
|
||||
def test_parse_invalid_keytype(self):
|
||||
parser = ssh_util.AuthKeyLineParser()
|
||||
key = parser.parse(' '.join(["badkeytype", VALID_CONTENT['rsa']]))
|
||||
|
||||
self.assertFalse(key.valid())
|
||||
|
||||
|
||||
# vi: ts=4 expandtab
|
@ -7,14 +7,17 @@ import os
|
||||
|
||||
from email.mime.base import MIMEBase
|
||||
|
||||
from mocker import MockerTestCase
|
||||
|
||||
from cloudinit import handlers
|
||||
from cloudinit import helpers as c_helpers
|
||||
from cloudinit import log
|
||||
from cloudinit import sources
|
||||
from cloudinit import stages
|
||||
from cloudinit import util
|
||||
|
||||
INSTANCE_ID = "i-testing"
|
||||
|
||||
from tests.unittests import helpers
|
||||
|
||||
|
||||
class FakeDataSource(sources.DataSource):
|
||||
|
||||
@ -26,22 +29,16 @@ class FakeDataSource(sources.DataSource):
|
||||
|
||||
# FIXME: these tests shouldn't be checking log output??
|
||||
# Weirddddd...
|
||||
|
||||
|
||||
class TestConsumeUserData(MockerTestCase):
|
||||
class TestConsumeUserData(helpers.FilesystemMockingTestCase):
|
||||
|
||||
def setUp(self):
|
||||
MockerTestCase.setUp(self)
|
||||
# Replace the write so no actual files
|
||||
# get written out...
|
||||
self.mock_write = self.mocker.replace("cloudinit.util.write_file",
|
||||
passthrough=False)
|
||||
helpers.FilesystemMockingTestCase.setUp(self)
|
||||
self._log = None
|
||||
self._log_file = None
|
||||
self._log_handler = None
|
||||
|
||||
def tearDown(self):
|
||||
MockerTestCase.tearDown(self)
|
||||
helpers.FilesystemMockingTestCase.tearDown(self)
|
||||
if self._log_handler and self._log:
|
||||
self._log.removeHandler(self._log_handler)
|
||||
|
||||
@ -53,13 +50,77 @@ class TestConsumeUserData(MockerTestCase):
|
||||
self._log.addHandler(self._log_handler)
|
||||
return log_file
|
||||
|
||||
def test_merging_cloud_config(self):
|
||||
blob = '''
|
||||
#cloud-config
|
||||
a: b
|
||||
e: f
|
||||
run:
|
||||
- b
|
||||
- c
|
||||
'''
|
||||
message1 = MIMEBase("text", "cloud-config")
|
||||
message1['Merge-Type'] = 'dict()+list(extend)+str(append)'
|
||||
message1.set_payload(blob)
|
||||
|
||||
blob2 = '''
|
||||
#cloud-config
|
||||
a: e
|
||||
e: g
|
||||
run:
|
||||
- stuff
|
||||
- morestuff
|
||||
'''
|
||||
message2 = MIMEBase("text", "cloud-config")
|
||||
message2['X-Merge-Type'] = 'dict()+list(extend)+str()'
|
||||
message2.set_payload(blob2)
|
||||
|
||||
blob3 = '''
|
||||
#cloud-config
|
||||
e:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
p: 1
|
||||
'''
|
||||
message3 = MIMEBase("text", "cloud-config")
|
||||
message3['Merge-Type'] = 'dict()+list()+str()'
|
||||
message3.set_payload(blob3)
|
||||
|
||||
messages = [message1, message2, message3]
|
||||
|
||||
paths = c_helpers.Paths({}, ds=FakeDataSource(''))
|
||||
cloud_cfg = handlers.cloud_config.CloudConfigPartHandler(paths)
|
||||
|
||||
new_root = self.makeDir()
|
||||
self.patchUtils(new_root)
|
||||
self.patchOS(new_root)
|
||||
cloud_cfg.handle_part(None, handlers.CONTENT_START, None, None, None,
|
||||
None)
|
||||
for i, m in enumerate(messages):
|
||||
headers = dict(m)
|
||||
fn = "part-%s" % (i + 1)
|
||||
payload = m.get_payload(decode=True)
|
||||
cloud_cfg.handle_part(None, headers['Content-Type'],
|
||||
fn, payload, None, headers)
|
||||
cloud_cfg.handle_part(None, handlers.CONTENT_END, None, None, None,
|
||||
None)
|
||||
contents = util.load_file(paths.get_ipath('cloud_config'))
|
||||
contents = util.load_yaml(contents)
|
||||
self.assertEquals(contents['run'], ['b', 'c', 'stuff', 'morestuff'])
|
||||
self.assertEquals(contents['a'], 'be')
|
||||
self.assertEquals(contents['e'], 'fg')
|
||||
self.assertEquals(contents['p'], 1)
|
||||
|
||||
def test_unhandled_type_warning(self):
|
||||
"""Raw text without magic is ignored but shows warning."""
|
||||
ci = stages.Init()
|
||||
data = "arbitrary text\n"
|
||||
ci.datasource = FakeDataSource(data)
|
||||
|
||||
self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
|
||||
mock_write = self.mocker.replace("cloudinit.util.write_file",
|
||||
passthrough=False)
|
||||
mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
|
||||
self.mocker.replay()
|
||||
|
||||
log_file = self.capture_log(logging.WARNING)
|
||||
@ -76,7 +137,9 @@ class TestConsumeUserData(MockerTestCase):
|
||||
message.set_payload("Just text")
|
||||
ci.datasource = FakeDataSource(message.as_string())
|
||||
|
||||
self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
|
||||
mock_write = self.mocker.replace("cloudinit.util.write_file",
|
||||
passthrough=False)
|
||||
mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
|
||||
self.mocker.replay()
|
||||
|
||||
log_file = self.capture_log(logging.WARNING)
|
||||
@ -93,8 +156,10 @@ class TestConsumeUserData(MockerTestCase):
|
||||
ci.datasource = FakeDataSource(script)
|
||||
|
||||
outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
|
||||
self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
|
||||
self.mock_write(outpath, script, 0700)
|
||||
mock_write = self.mocker.replace("cloudinit.util.write_file",
|
||||
passthrough=False)
|
||||
mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
|
||||
mock_write(outpath, script, 0700)
|
||||
self.mocker.replay()
|
||||
|
||||
log_file = self.capture_log(logging.WARNING)
|
||||
@ -111,8 +176,10 @@ class TestConsumeUserData(MockerTestCase):
|
||||
ci.datasource = FakeDataSource(message.as_string())
|
||||
|
||||
outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
|
||||
self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
|
||||
self.mock_write(outpath, script, 0700)
|
||||
mock_write = self.mocker.replace("cloudinit.util.write_file",
|
||||
passthrough=False)
|
||||
mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
|
||||
mock_write(outpath, script, 0700)
|
||||
self.mocker.replay()
|
||||
|
||||
log_file = self.capture_log(logging.WARNING)
|
||||
@ -129,8 +196,10 @@ class TestConsumeUserData(MockerTestCase):
|
||||
ci.datasource = FakeDataSource(message.as_string())
|
||||
|
||||
outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
|
||||
self.mock_write(outpath, script, 0700)
|
||||
self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
|
||||
mock_write = self.mocker.replace("cloudinit.util.write_file",
|
||||
passthrough=False)
|
||||
mock_write(outpath, script, 0700)
|
||||
mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
|
||||
self.mocker.replay()
|
||||
|
||||
log_file = self.capture_log(logging.WARNING)
|
||||
|
@ -248,4 +248,99 @@ class TestLoadYaml(TestCase):
|
||||
myobj)
|
||||
|
||||
|
||||
class TestMountinfoParsing(TestCase):
|
||||
precise_ext4_mountinfo = \
|
||||
"""15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
|
||||
16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
|
||||
17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=16422216k,nr_inodes=4105554,mode=755
|
||||
18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
|
||||
19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=6572812k,mode=755
|
||||
20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root rw,errors=remount-ro,data=ordered
|
||||
21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs cgroup rw,mode=755
|
||||
22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw
|
||||
23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw
|
||||
25 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw
|
||||
26 19 0:19 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k
|
||||
27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw
|
||||
28 19 0:21 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755
|
||||
24 21 0:18 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset
|
||||
29 21 0:22 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu
|
||||
30 21 0:23 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct
|
||||
31 21 0:24 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory
|
||||
32 21 0:25 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices
|
||||
33 21 0:26 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer
|
||||
34 21 0:27 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio
|
||||
35 21 0:28 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event
|
||||
36 20 9:0 / /boot rw,relatime - ext4 /dev/md0 rw,data=ordered
|
||||
37 16 0:29 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw
|
||||
39 28 0:30 / /run/user/foobar/gvfs rw,nosuid,nodev,relatime - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000"""
|
||||
|
||||
raring_btrfs_mountinfo = \
|
||||
"""15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
|
||||
16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
|
||||
17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=865556k,nr_inodes=216389,mode=755
|
||||
18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
|
||||
19 20 0:15 / /run rw,nosuid,relatime - tmpfs tmpfs rw,size=348196k,mode=755
|
||||
20 1 0:16 /@ / rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache
|
||||
21 15 0:19 / /sys/fs/fuse/connections rw,relatime - fusectl none rw
|
||||
22 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw
|
||||
23 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw
|
||||
24 19 0:20 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k
|
||||
25 19 0:21 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw
|
||||
26 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755
|
||||
27 20 0:16 /@home /home rw,relatime - btrfs /dev/vda1 rw,compress=lzo,space_cache"""
|
||||
|
||||
def test_invalid_mountinfo(self):
|
||||
line = "20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root rw,errors=remount-ro,data=ordered"
|
||||
elements = line.split()
|
||||
for i in range(len(elements) + 1):
|
||||
lines = [' '.join(elements[0:i])]
|
||||
if i < 10:
|
||||
expected = None
|
||||
else:
|
||||
expected = ('/dev/mapper/vg0-root', 'ext4', '/')
|
||||
self.assertEqual(expected, util.parse_mount_info('/', lines))
|
||||
|
||||
def test_precise_ext4_root(self):
|
||||
lines = TestMountinfoParsing.precise_ext4_mountinfo.splitlines()
|
||||
|
||||
expected = ('/dev/mapper/vg0-root', 'ext4', '/')
|
||||
self.assertEqual(expected, util.parse_mount_info('/', lines))
|
||||
self.assertEqual(expected, util.parse_mount_info('/usr', lines))
|
||||
self.assertEqual(expected, util.parse_mount_info('/usr/bin', lines))
|
||||
|
||||
expected = ('/dev/md0', 'ext4', '/boot')
|
||||
self.assertEqual(expected, util.parse_mount_info('/boot', lines))
|
||||
self.assertEqual(expected, util.parse_mount_info('/boot/grub', lines))
|
||||
|
||||
expected = ('/dev/mapper/vg0-root', 'ext4', '/')
|
||||
self.assertEqual(expected, util.parse_mount_info('/home', lines))
|
||||
self.assertEqual(expected, util.parse_mount_info('/home/me', lines))
|
||||
|
||||
expected = ('tmpfs', 'tmpfs', '/run')
|
||||
self.assertEqual(expected, util.parse_mount_info('/run', lines))
|
||||
|
||||
expected = ('none', 'tmpfs', '/run/lock')
|
||||
self.assertEqual(expected, util.parse_mount_info('/run/lock', lines))
|
||||
|
||||
def test_raring_btrfs_root(self):
|
||||
lines = TestMountinfoParsing.raring_btrfs_mountinfo.splitlines()
|
||||
|
||||
expected = ('/dev/vda1', 'btrfs', '/')
|
||||
self.assertEqual(expected, util.parse_mount_info('/', lines))
|
||||
self.assertEqual(expected, util.parse_mount_info('/usr', lines))
|
||||
self.assertEqual(expected, util.parse_mount_info('/usr/bin', lines))
|
||||
self.assertEqual(expected, util.parse_mount_info('/boot', lines))
|
||||
self.assertEqual(expected, util.parse_mount_info('/boot/grub', lines))
|
||||
|
||||
expected = ('/dev/vda1', 'btrfs', '/home')
|
||||
self.assertEqual(expected, util.parse_mount_info('/home', lines))
|
||||
self.assertEqual(expected, util.parse_mount_info('/home/me', lines))
|
||||
|
||||
expected = ('tmpfs', 'tmpfs', '/run')
|
||||
self.assertEqual(expected, util.parse_mount_info('/run', lines))
|
||||
|
||||
expected = ('none', 'tmpfs', '/run/lock')
|
||||
self.assertEqual(expected, util.parse_mount_info('/run/lock', lines))
|
||||
|
||||
# vi: ts=4 expandtab
|
||||
|
@ -10,19 +10,55 @@ task
|
||||
console output
|
||||
|
||||
script
|
||||
# /run/network/static-network-up-emitted is written by
|
||||
# upstart (via /etc/network/if-up.d/upstart). its presense would
|
||||
# indicate that static-network-up has already fired.
|
||||
EMITTED="/run/network/static-network-up-emitted"
|
||||
[ -e "$EMITTED" -o -e "/var/$EMITTED" ] && exit 0
|
||||
set +e # you cannot trap TERM reliably with 'set -e'
|
||||
SLEEP_CHILD=""
|
||||
|
||||
static_network_up() {
|
||||
local emitted="/run/network/static-network-up-emitted"
|
||||
# /run/network/static-network-up-emitted is written by
|
||||
# upstart (via /etc/network/if-up.d/upstart). its presense would
|
||||
# indicate that static-network-up has already fired.
|
||||
[ -e "$emitted" -o -e "/var/$emitted" ]
|
||||
}
|
||||
msg() {
|
||||
local uptime="" idle=""
|
||||
if [ -r /proc/uptime ]; then
|
||||
read uptime idle < /proc/uptime
|
||||
fi
|
||||
echo "$UPSTART_JOB${uptime:+[${uptime}]}:" "$1"
|
||||
}
|
||||
|
||||
handle_sigterm() {
|
||||
# if we received sigterm and static networking is up then it probably
|
||||
# came from upstart as a result of 'stop on static-network-up'
|
||||
[ -z "$SLEEP_CHILD" ] || kill $SLEEP_CHILD
|
||||
if static_network_up; then
|
||||
msg "static networking is now up"
|
||||
exit 0
|
||||
fi
|
||||
msg "recieved SIGTERM, networking not up"
|
||||
exit 2
|
||||
}
|
||||
|
||||
dowait() {
|
||||
msg "waiting $1 seconds for network device"
|
||||
sleep "$1" &
|
||||
SLEEP_CHILD=$!
|
||||
wait $SLEEP_CHILD
|
||||
SLEEP_CHILD=""
|
||||
}
|
||||
|
||||
trap handle_sigterm TERM
|
||||
|
||||
# static_network_up already occurred
|
||||
static_network_up && exit 0
|
||||
|
||||
# obj.pkl comes from cloud-init-local (or previous boot and
|
||||
# manual_cache_clean)
|
||||
[ -f /var/lib/cloud/instance/obj.pkl ] && exit 0
|
||||
|
||||
short=10; long=120;
|
||||
sleep ${short}
|
||||
echo $UPSTART_JOB "waiting ${long} seconds for a network device."
|
||||
sleep ${long}
|
||||
echo $UPSTART_JOB "gave up waiting for a network device."
|
||||
dowait 10
|
||||
dowait 120
|
||||
msg "gave up waiting for a network device."
|
||||
: > /var/lib/cloud/data/no-net
|
||||
end script
|
||||
# EOF
|
||||
|
Loading…
x
Reference in New Issue
Block a user