Fix merge conflict
This commit is contained in:
commit
c4e470a227
@ -13,10 +13,16 @@
|
|||||||
redirect cloud-init stderr and stdout /var/log/cloud-init-output.log.
|
redirect cloud-init stderr and stdout /var/log/cloud-init-output.log.
|
||||||
- drop support for resizing partitions with parted entirely (LP: #1212492).
|
- drop support for resizing partitions with parted entirely (LP: #1212492).
|
||||||
This was broken as it was anyway.
|
This was broken as it was anyway.
|
||||||
- add support for vendordata.
|
- add support for vendordata in SmartOS and NoCloud datasources.
|
||||||
- drop dependency on boto for crawling ec2 metadata service.
|
- drop dependency on boto for crawling ec2 metadata service.
|
||||||
- add 'Requires' on sudo (for OpenNebula datasource) in rpm specs, and
|
- add 'Requires' on sudo (for OpenNebula datasource) in rpm specs, and
|
||||||
'Recommends' in the debian/control.in [Vlastimil Holer]
|
'Recommends' in the debian/control.in [Vlastimil Holer]
|
||||||
|
- if mount_info reports /dev/root is a device path for /, then convert
|
||||||
|
that to a device via help of kernel cmdline.
|
||||||
|
- configdrive: consider partitions as possible datasources if they have
|
||||||
|
theh correct filesystem label. [Paul Querna]
|
||||||
|
- initial freebsd support [Harm Weites]
|
||||||
|
- fix in is_ipv4 to accept IP addresses with a '0' in them.
|
||||||
0.7.4:
|
0.7.4:
|
||||||
- fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a
|
- fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a
|
||||||
partitioned block device with target filesystem on ephemeral0.1.
|
partitioned block device with target filesystem on ephemeral0.1.
|
||||||
|
@ -14,10 +14,10 @@
|
|||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from StringIO import StringIO
|
|
||||||
from cloudinit import util
|
|
||||||
from cloudinit import type_utils
|
from cloudinit import type_utils
|
||||||
|
from cloudinit import util
|
||||||
import copy
|
import copy
|
||||||
|
from StringIO import StringIO
|
||||||
|
|
||||||
|
|
||||||
def _make_header(text):
|
def _make_header(text):
|
||||||
|
@ -114,6 +114,41 @@ class ResizeGrowPart(object):
|
|||||||
return (before, get_size(partdev))
|
return (before, get_size(partdev))
|
||||||
|
|
||||||
|
|
||||||
|
class ResizeGpart(object):
|
||||||
|
def available(self):
|
||||||
|
if not util.which('gpart'):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def resize(self, diskdev, partnum, partdev):
|
||||||
|
"""
|
||||||
|
GPT disks store metadata at the beginning (primary) and at the
|
||||||
|
end (secondary) of the disk. When launching an image with a
|
||||||
|
larger disk compared to the original image, the secondary copy
|
||||||
|
is lost. Thus, the metadata will be marked CORRUPT, and need to
|
||||||
|
be recovered.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
util.subp(["gpart", "recover", diskdev])
|
||||||
|
except util.ProcessExecutionError as e:
|
||||||
|
if e.exit_code != 0:
|
||||||
|
util.logexc(LOG, "Failed: gpart recover %s", diskdev)
|
||||||
|
raise ResizeFailedException(e)
|
||||||
|
|
||||||
|
before = get_size(partdev)
|
||||||
|
try:
|
||||||
|
util.subp(["gpart", "resize", "-i", partnum, diskdev])
|
||||||
|
except util.ProcessExecutionError as e:
|
||||||
|
util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev)
|
||||||
|
raise ResizeFailedException(e)
|
||||||
|
|
||||||
|
# Since growing the FS requires a reboot, make sure we reboot
|
||||||
|
# first when this module has finished.
|
||||||
|
open('/var/run/reboot-required', 'a').close()
|
||||||
|
|
||||||
|
return (before, get_size(partdev))
|
||||||
|
|
||||||
|
|
||||||
def get_size(filename):
|
def get_size(filename):
|
||||||
fd = os.open(filename, os.O_RDONLY)
|
fd = os.open(filename, os.O_RDONLY)
|
||||||
try:
|
try:
|
||||||
@ -132,6 +167,12 @@ def device_part_info(devpath):
|
|||||||
bname = os.path.basename(rpath)
|
bname = os.path.basename(rpath)
|
||||||
syspath = "/sys/class/block/%s" % bname
|
syspath = "/sys/class/block/%s" % bname
|
||||||
|
|
||||||
|
# FreeBSD doesn't know of sysfs so just get everything we need from
|
||||||
|
# the device, like /dev/vtbd0p2.
|
||||||
|
if util.system_info()["platform"].startswith('FreeBSD'):
|
||||||
|
m = re.search('^(/dev/.+)p([0-9])$', devpath)
|
||||||
|
return (m.group(1), m.group(2))
|
||||||
|
|
||||||
if not os.path.exists(syspath):
|
if not os.path.exists(syspath):
|
||||||
raise ValueError("%s had no syspath (%s)" % (devpath, syspath))
|
raise ValueError("%s had no syspath (%s)" % (devpath, syspath))
|
||||||
|
|
||||||
@ -182,7 +223,8 @@ def resize_devices(resizer, devices):
|
|||||||
"stat of '%s' failed: %s" % (blockdev, e),))
|
"stat of '%s' failed: %s" % (blockdev, e),))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not stat.S_ISBLK(statret.st_mode):
|
if (not stat.S_ISBLK(statret.st_mode) and
|
||||||
|
not stat.S_ISCHR(statret.st_mode)):
|
||||||
info.append((devent, RESIZE.SKIPPED,
|
info.append((devent, RESIZE.SKIPPED,
|
||||||
"device '%s' not a block device" % blockdev,))
|
"device '%s' not a block device" % blockdev,))
|
||||||
continue
|
continue
|
||||||
@ -256,4 +298,4 @@ def handle(_name, cfg, _cloud, log, _args):
|
|||||||
else:
|
else:
|
||||||
log.debug("'%s' %s: %s" % (entry, action, msg))
|
log.debug("'%s' %s: %s" % (entry, action, msg))
|
||||||
|
|
||||||
RESIZERS = (('growpart', ResizeGrowPart),)
|
RESIZERS = (('growpart', ResizeGrowPart), ('gpart', ResizeGpart))
|
||||||
|
@ -22,6 +22,7 @@ from cloudinit import util
|
|||||||
import errno
|
import errno
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
import signal
|
||||||
import subprocess
|
import subprocess
|
||||||
import time
|
import time
|
||||||
|
|
||||||
@ -30,6 +31,24 @@ frequency = PER_INSTANCE
|
|||||||
EXIT_FAIL = 254
|
EXIT_FAIL = 254
|
||||||
|
|
||||||
|
|
||||||
|
def givecmdline(pid):
|
||||||
|
# Returns the cmdline for the given process id. In Linux we can use procfs
|
||||||
|
# for this but on BSD there is /usr/bin/procstat.
|
||||||
|
try:
|
||||||
|
# Example output from procstat -c 1
|
||||||
|
# PID COMM ARGS
|
||||||
|
# 1 init /bin/init --
|
||||||
|
if util.system_info()["platform"].startswith('FreeBSD'):
|
||||||
|
(output, _err) = util.subp(['procstat', '-c', str(pid)])
|
||||||
|
line = output.splitlines()[1]
|
||||||
|
m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line)
|
||||||
|
return m.group(2)
|
||||||
|
else:
|
||||||
|
return util.load_file("/proc/%s/cmdline" % pid)
|
||||||
|
except IOError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def handle(_name, cfg, _cloud, log, _args):
|
def handle(_name, cfg, _cloud, log, _args):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -42,8 +61,8 @@ def handle(_name, cfg, _cloud, log, _args):
|
|||||||
return
|
return
|
||||||
|
|
||||||
mypid = os.getpid()
|
mypid = os.getpid()
|
||||||
cmdline = util.load_file("/proc/%s/cmdline" % mypid)
|
|
||||||
|
|
||||||
|
cmdline = givecmdline(mypid)
|
||||||
if not cmdline:
|
if not cmdline:
|
||||||
log.warn("power_state: failed to get cmdline of current process")
|
log.warn("power_state: failed to get cmdline of current process")
|
||||||
return
|
return
|
||||||
@ -119,8 +138,6 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args):
|
|||||||
msg = None
|
msg = None
|
||||||
end_time = time.time() + timeout
|
end_time = time.time() + timeout
|
||||||
|
|
||||||
cmdline_f = "/proc/%s/cmdline" % pid
|
|
||||||
|
|
||||||
def fatal(msg):
|
def fatal(msg):
|
||||||
if log:
|
if log:
|
||||||
log.warn(msg)
|
log.warn(msg)
|
||||||
@ -134,16 +151,14 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args):
|
|||||||
break
|
break
|
||||||
|
|
||||||
try:
|
try:
|
||||||
cmdline = ""
|
cmdline = givecmdline(pid)
|
||||||
with open(cmdline_f) as fp:
|
|
||||||
cmdline = fp.read()
|
|
||||||
if cmdline != pidcmdline:
|
if cmdline != pidcmdline:
|
||||||
msg = "cmdline changed for %s [now: %s]" % (pid, cmdline)
|
msg = "cmdline changed for %s [now: %s]" % (pid, cmdline)
|
||||||
break
|
break
|
||||||
|
|
||||||
except IOError as ioerr:
|
except IOError as ioerr:
|
||||||
if ioerr.errno in known_errnos:
|
if ioerr.errno in known_errnos:
|
||||||
msg = "pidfile '%s' gone [%d]" % (cmdline_f, ioerr.errno)
|
msg = "pidfile gone [%d]" % ioerr.errno
|
||||||
else:
|
else:
|
||||||
fatal("IOError during wait: %s" % ioerr)
|
fatal("IOError during wait: %s" % ioerr)
|
||||||
break
|
break
|
||||||
|
@ -39,6 +39,10 @@ def _resize_ext(mount_point, devpth): # pylint: disable=W0613
|
|||||||
def _resize_xfs(mount_point, devpth): # pylint: disable=W0613
|
def _resize_xfs(mount_point, devpth): # pylint: disable=W0613
|
||||||
return ('xfs_growfs', devpth)
|
return ('xfs_growfs', devpth)
|
||||||
|
|
||||||
|
|
||||||
|
def _resize_ufs(mount_point, devpth): # pylint: disable=W0613
|
||||||
|
return ('growfs', devpth)
|
||||||
|
|
||||||
# Do not use a dictionary as these commands should be able to be used
|
# Do not use a dictionary as these commands should be able to be used
|
||||||
# for multiple filesystem types if possible, e.g. one command for
|
# for multiple filesystem types if possible, e.g. one command for
|
||||||
# ext2, ext3 and ext4.
|
# ext2, ext3 and ext4.
|
||||||
@ -46,11 +50,31 @@ RESIZE_FS_PREFIXES_CMDS = [
|
|||||||
('btrfs', _resize_btrfs),
|
('btrfs', _resize_btrfs),
|
||||||
('ext', _resize_ext),
|
('ext', _resize_ext),
|
||||||
('xfs', _resize_xfs),
|
('xfs', _resize_xfs),
|
||||||
|
('ufs', _resize_ufs),
|
||||||
]
|
]
|
||||||
|
|
||||||
NOBLOCK = "noblock"
|
NOBLOCK = "noblock"
|
||||||
|
|
||||||
|
|
||||||
|
def rootdev_from_cmdline(cmdline):
|
||||||
|
found = None
|
||||||
|
for tok in cmdline.split():
|
||||||
|
if tok.startswith("root="):
|
||||||
|
found = tok[5:]
|
||||||
|
break
|
||||||
|
if found is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if found.startswith("/dev/"):
|
||||||
|
return found
|
||||||
|
if found.startswith("LABEL="):
|
||||||
|
return "/dev/disk/by-label/" + found[len("LABEL="):]
|
||||||
|
if found.startswith("UUID="):
|
||||||
|
return "/dev/disk/by-uuid/" + found[len("UUID="):]
|
||||||
|
|
||||||
|
return "/dev/" + found
|
||||||
|
|
||||||
|
|
||||||
def handle(name, cfg, _cloud, log, args):
|
def handle(name, cfg, _cloud, log, args):
|
||||||
|
|
||||||
if len(args) != 0:
|
if len(args) != 0:
|
||||||
@ -79,10 +103,20 @@ def handle(name, cfg, _cloud, log, args):
|
|||||||
info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
|
info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
|
||||||
log.debug("resize_info: %s" % info)
|
log.debug("resize_info: %s" % info)
|
||||||
|
|
||||||
|
container = util.is_container()
|
||||||
|
|
||||||
|
if (devpth == "/dev/root" and not os.path.exists(devpth) and
|
||||||
|
not container):
|
||||||
|
devpth = rootdev_from_cmdline(util.get_cmdline())
|
||||||
|
if devpth is None:
|
||||||
|
log.warn("Unable to find device '/dev/root'")
|
||||||
|
return
|
||||||
|
log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
statret = os.stat(devpth)
|
statret = os.stat(devpth)
|
||||||
except OSError as exc:
|
except OSError as exc:
|
||||||
if util.is_container() and exc.errno == errno.ENOENT:
|
if container and exc.errno == errno.ENOENT:
|
||||||
log.debug("Device '%s' did not exist in container. "
|
log.debug("Device '%s' did not exist in container. "
|
||||||
"cannot resize: %s" % (devpth, info))
|
"cannot resize: %s" % (devpth, info))
|
||||||
elif exc.errno == errno.ENOENT:
|
elif exc.errno == errno.ENOENT:
|
||||||
@ -92,8 +126,8 @@ def handle(name, cfg, _cloud, log, args):
|
|||||||
raise exc
|
raise exc
|
||||||
return
|
return
|
||||||
|
|
||||||
if not stat.S_ISBLK(statret.st_mode):
|
if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
|
||||||
if util.is_container():
|
if container:
|
||||||
log.debug("device '%s' not a block device in container."
|
log.debug("device '%s' not a block device in container."
|
||||||
" cannot resize: %s" % (devpth, info))
|
" cannot resize: %s" % (devpth, info))
|
||||||
else:
|
else:
|
||||||
|
@ -39,8 +39,9 @@ from cloudinit.distros.parsers import hosts
|
|||||||
OSFAMILIES = {
|
OSFAMILIES = {
|
||||||
'debian': ['debian', 'ubuntu'],
|
'debian': ['debian', 'ubuntu'],
|
||||||
'redhat': ['fedora', 'rhel'],
|
'redhat': ['fedora', 'rhel'],
|
||||||
'suse': ['sles'],
|
|
||||||
'gentoo': ['gentoo'],
|
'gentoo': ['gentoo'],
|
||||||
|
'freebsd': ['freebsd'],
|
||||||
|
'suse': ['sles']
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
259
cloudinit/distros/freebsd.py
Normal file
259
cloudinit/distros/freebsd.py
Normal file
@ -0,0 +1,259 @@
|
|||||||
|
# vi: ts=4 expandtab
|
||||||
|
#
|
||||||
|
# Copyright (C) 2014 Harm Weites
|
||||||
|
#
|
||||||
|
# Author: Harm Weites <harm@weites.com>
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License version 3, as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from StringIO import StringIO
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from cloudinit import distros
|
||||||
|
from cloudinit import helpers
|
||||||
|
from cloudinit import log as logging
|
||||||
|
from cloudinit import ssh_util
|
||||||
|
from cloudinit import util
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Distro(distros.Distro):
|
||||||
|
rc_conf_fn = "/etc/rc.conf"
|
||||||
|
login_conf_fn = '/etc/login.conf'
|
||||||
|
login_conf_fn_bak = '/etc/login.conf.orig'
|
||||||
|
|
||||||
|
def __init__(self, name, cfg, paths):
|
||||||
|
distros.Distro.__init__(self, name, cfg, paths)
|
||||||
|
# This will be used to restrict certain
|
||||||
|
# calls from repeatly happening (when they
|
||||||
|
# should only happen say once per instance...)
|
||||||
|
self._runner = helpers.Runners(paths)
|
||||||
|
self.osfamily = 'freebsd'
|
||||||
|
|
||||||
|
# Updates a key in /etc/rc.conf.
|
||||||
|
def updatercconf(self, key, value):
|
||||||
|
LOG.debug("updatercconf: %s => %s", key, value)
|
||||||
|
conf = self.loadrcconf()
|
||||||
|
config_changed = False
|
||||||
|
for item in conf:
|
||||||
|
if item == key and conf[item] != value:
|
||||||
|
conf[item] = value
|
||||||
|
LOG.debug("[rc.conf]: Value %s for key %s needs to be changed",
|
||||||
|
value, key)
|
||||||
|
config_changed = True
|
||||||
|
|
||||||
|
if config_changed:
|
||||||
|
LOG.debug("Writing new %s file", self.rc_conf_fn)
|
||||||
|
buf = StringIO()
|
||||||
|
for keyval in conf.items():
|
||||||
|
buf.write("%s=%s\n" % keyval)
|
||||||
|
util.write_file(self.rc_conf_fn, buf.getvalue())
|
||||||
|
|
||||||
|
# Load the contents of /etc/rc.conf and store all keys in a dict.
|
||||||
|
def loadrcconf(self):
|
||||||
|
conf = {}
|
||||||
|
lines = util.load_file(self.rc_conf_fn).splitlines()
|
||||||
|
for line in lines:
|
||||||
|
tok = line.split('=')
|
||||||
|
conf[tok[0]] = tok[1].rstrip()
|
||||||
|
return conf
|
||||||
|
|
||||||
|
def readrcconf(self, key):
|
||||||
|
conf = self.loadrcconf()
|
||||||
|
try:
|
||||||
|
val = conf[key]
|
||||||
|
except KeyError:
|
||||||
|
val = None
|
||||||
|
return val
|
||||||
|
|
||||||
|
def _read_system_hostname(self):
|
||||||
|
sys_hostname = self._read_hostname()
|
||||||
|
return ('rc.conf', sys_hostname)
|
||||||
|
|
||||||
|
def _read_hostname(self, filename, default=None):
|
||||||
|
hostname = None
|
||||||
|
try:
|
||||||
|
hostname = self.readrcconf('hostname')
|
||||||
|
except IOError:
|
||||||
|
pass
|
||||||
|
if not hostname:
|
||||||
|
return default
|
||||||
|
return hostname
|
||||||
|
|
||||||
|
def _select_hostname(self, hostname, fqdn):
|
||||||
|
if not hostname:
|
||||||
|
return fqdn
|
||||||
|
return hostname
|
||||||
|
|
||||||
|
def _write_hostname(self, hostname, filename):
|
||||||
|
self.updatercconf('hostname', hostname)
|
||||||
|
|
||||||
|
def create_group(self, name, members):
|
||||||
|
group_add_cmd = ['pw', '-n', name]
|
||||||
|
if util.is_group(name):
|
||||||
|
LOG.warn("Skipping creation of existing group '%s'", name)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
util.subp(group_add_cmd)
|
||||||
|
LOG.info("Created new group %s", name)
|
||||||
|
except Exception as e:
|
||||||
|
util.logexc(LOG, "Failed to create group %s", name)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
if len(members) > 0:
|
||||||
|
for member in members:
|
||||||
|
if not util.is_user(member):
|
||||||
|
LOG.warn("Unable to add group member '%s' to group '%s'"
|
||||||
|
"; user does not exist.", member, name)
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
util.subp(['pw', 'usermod', '-n', name, '-G', member])
|
||||||
|
LOG.info("Added user '%s' to group '%s'", member, name)
|
||||||
|
except Exception:
|
||||||
|
util.logexc(LOG, "Failed to add user '%s' to group '%s'",
|
||||||
|
member, name)
|
||||||
|
|
||||||
|
def add_user(self, name, **kwargs):
|
||||||
|
if util.is_user(name):
|
||||||
|
LOG.info("User %s already exists, skipping.", name)
|
||||||
|
return False
|
||||||
|
|
||||||
|
adduser_cmd = ['pw', 'useradd', '-n', name]
|
||||||
|
log_adduser_cmd = ['pw', 'useradd', '-n', name]
|
||||||
|
|
||||||
|
adduser_opts = {
|
||||||
|
"homedir": '-d',
|
||||||
|
"gecos": '-c',
|
||||||
|
"primary_group": '-g',
|
||||||
|
"groups": '-G',
|
||||||
|
"passwd": '-h',
|
||||||
|
"shell": '-s',
|
||||||
|
"inactive": '-E',
|
||||||
|
}
|
||||||
|
adduser_flags = {
|
||||||
|
"no_user_group": '--no-user-group',
|
||||||
|
"system": '--system',
|
||||||
|
"no_log_init": '--no-log-init',
|
||||||
|
}
|
||||||
|
|
||||||
|
redact_opts = ['passwd']
|
||||||
|
|
||||||
|
for key, val in kwargs.iteritems():
|
||||||
|
if key in adduser_opts and val and isinstance(val, basestring):
|
||||||
|
adduser_cmd.extend([adduser_opts[key], val])
|
||||||
|
|
||||||
|
# Redact certain fields from the logs
|
||||||
|
if key in redact_opts:
|
||||||
|
log_adduser_cmd.extend([adduser_opts[key], 'REDACTED'])
|
||||||
|
else:
|
||||||
|
log_adduser_cmd.extend([adduser_opts[key], val])
|
||||||
|
|
||||||
|
elif key in adduser_flags and val:
|
||||||
|
adduser_cmd.append(adduser_flags[key])
|
||||||
|
log_adduser_cmd.append(adduser_flags[key])
|
||||||
|
|
||||||
|
if 'no_create_home' in kwargs or 'system' in kwargs:
|
||||||
|
adduser_cmd.append('-d/nonexistent')
|
||||||
|
log_adduser_cmd.append('-d/nonexistent')
|
||||||
|
else:
|
||||||
|
adduser_cmd.append('-d/usr/home/%s' % name)
|
||||||
|
adduser_cmd.append('-m')
|
||||||
|
log_adduser_cmd.append('-d/usr/home/%s' % name)
|
||||||
|
log_adduser_cmd.append('-m')
|
||||||
|
|
||||||
|
# Run the command
|
||||||
|
LOG.info("Adding user %s", name)
|
||||||
|
try:
|
||||||
|
util.subp(adduser_cmd, logstring=log_adduser_cmd)
|
||||||
|
except Exception as e:
|
||||||
|
util.logexc(LOG, "Failed to create user %s", name)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
# TODO:
|
||||||
|
def set_passwd(self, user, passwd, hashed=False):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def lock_passwd(self, name):
|
||||||
|
try:
|
||||||
|
util.subp(['pw', 'usermod', name, '-h', '-'])
|
||||||
|
except Exception as e:
|
||||||
|
util.logexc(LOG, "Failed to lock user %s", name)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
# TODO:
|
||||||
|
def write_sudo_rules(self, name, rules, sudo_file=None):
|
||||||
|
LOG.debug("[write_sudo_rules] Name: %s", name)
|
||||||
|
|
||||||
|
def create_user(self, name, **kwargs):
|
||||||
|
self.add_user(name, **kwargs)
|
||||||
|
|
||||||
|
# Set password if plain-text password provided and non-empty
|
||||||
|
if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
|
||||||
|
self.set_passwd(name, kwargs['plain_text_passwd'])
|
||||||
|
|
||||||
|
# Default locking down the account. 'lock_passwd' defaults to True.
|
||||||
|
# lock account unless lock_password is False.
|
||||||
|
if kwargs.get('lock_passwd', True):
|
||||||
|
self.lock_passwd(name)
|
||||||
|
|
||||||
|
# Configure sudo access
|
||||||
|
if 'sudo' in kwargs:
|
||||||
|
self.write_sudo_rules(name, kwargs['sudo'])
|
||||||
|
|
||||||
|
# Import SSH keys
|
||||||
|
if 'ssh_authorized_keys' in kwargs:
|
||||||
|
keys = set(kwargs['ssh_authorized_keys']) or []
|
||||||
|
ssh_util.setup_user_keys(keys, name, options=None)
|
||||||
|
|
||||||
|
def _write_network(self, settings):
|
||||||
|
return
|
||||||
|
|
||||||
|
def apply_locale(self, locale, out_fn=None):
|
||||||
|
# Adjust the locals value to the new value
|
||||||
|
newconf = StringIO()
|
||||||
|
for line in util.load_file(self.login_conf_fn).splitlines():
|
||||||
|
newconf.write(re.sub(r'^default:',
|
||||||
|
r'default:lang=%s:' % locale, line))
|
||||||
|
newconf.write("\n")
|
||||||
|
|
||||||
|
# Make a backup of login.conf.
|
||||||
|
util.copy(self.login_conf_fn, self.login_conf_fn_bak)
|
||||||
|
|
||||||
|
# And write the new login.conf.
|
||||||
|
util.write_file(self.login_conf_fn, newconf.getvalue())
|
||||||
|
|
||||||
|
try:
|
||||||
|
LOG.debug("Running cap_mkdb for %s", locale)
|
||||||
|
util.subp(['cap_mkdb', self.login_conf_fn])
|
||||||
|
except util.ProcessExecutionError:
|
||||||
|
# cap_mkdb failed, so restore the backup.
|
||||||
|
util.logexc(LOG, "Failed to apply locale %s", locale)
|
||||||
|
try:
|
||||||
|
util.copy(self.login_conf_fn_bak, self.login_conf_fn)
|
||||||
|
except IOError:
|
||||||
|
util.logexc(LOG, "Failed to restore %s backup",
|
||||||
|
self.login_conf_fn)
|
||||||
|
|
||||||
|
def install_packages(self, pkglist):
|
||||||
|
return
|
||||||
|
|
||||||
|
def package_command(self, cmd, args=None, pkgs=None):
|
||||||
|
return
|
||||||
|
|
||||||
|
def set_timezone(self, tz):
|
||||||
|
return
|
||||||
|
|
||||||
|
def update_package_sources(self):
|
||||||
|
return
|
163
cloudinit/distros/net_util.py
Normal file
163
cloudinit/distros/net_util.py
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
# vi: ts=4 expandtab
|
||||||
|
#
|
||||||
|
# Copyright (C) 2012 Canonical Ltd.
|
||||||
|
# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# Copyright (C) 2012 Yahoo! Inc.
|
||||||
|
#
|
||||||
|
# Author: Scott Moser <scott.moser@canonical.com>
|
||||||
|
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
|
||||||
|
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License version 3, as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
|
||||||
|
# This is a util function to translate debian based distro interface blobs as
|
||||||
|
# given in /etc/network/interfaces to an *somewhat* agnostic format for
|
||||||
|
# distributions that use other formats.
|
||||||
|
#
|
||||||
|
# TODO(harlowja) remove when we have python-netcf active...
|
||||||
|
#
|
||||||
|
# The format is the following:
|
||||||
|
# {
|
||||||
|
# <device-name>: {
|
||||||
|
# # All optional (if not existent in original format)
|
||||||
|
# "netmask": <ip>,
|
||||||
|
# "broadcast": <ip>,
|
||||||
|
# "gateway": <ip>,
|
||||||
|
# "address": <ip>,
|
||||||
|
# "bootproto": "static"|"dhcp",
|
||||||
|
# "dns-search": <hostname>,
|
||||||
|
# "hwaddress": <mac-address>,
|
||||||
|
# "auto": True (or non-existent),
|
||||||
|
# "dns-nameservers": [<ip/hostname>, ...],
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# Things to note, comments are removed, if a ubuntu/debian interface is
|
||||||
|
# marked as auto then only then first segment (?) is retained, ie
|
||||||
|
# 'auto eth0 eth0:1' just marks eth0 as auto (not eth0:1).
|
||||||
|
#
|
||||||
|
# Example input:
|
||||||
|
#
|
||||||
|
# auto lo
|
||||||
|
# iface lo inet loopback
|
||||||
|
#
|
||||||
|
# auto eth0
|
||||||
|
# iface eth0 inet static
|
||||||
|
# address 10.0.0.1
|
||||||
|
# netmask 255.255.252.0
|
||||||
|
# broadcast 10.0.0.255
|
||||||
|
# gateway 10.0.0.2
|
||||||
|
# dns-nameservers 98.0.0.1 98.0.0.2
|
||||||
|
#
|
||||||
|
# Example output:
|
||||||
|
# {
|
||||||
|
# "lo": {
|
||||||
|
# "auto": true
|
||||||
|
# },
|
||||||
|
# "eth0": {
|
||||||
|
# "auto": true,
|
||||||
|
# "dns-nameservers": [
|
||||||
|
# "98.0.0.1",
|
||||||
|
# "98.0.0.2"
|
||||||
|
# ],
|
||||||
|
# "broadcast": "10.0.0.255",
|
||||||
|
# "netmask": "255.255.252.0",
|
||||||
|
# "bootproto": "static",
|
||||||
|
# "address": "10.0.0.1",
|
||||||
|
# "gateway": "10.0.0.2"
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
|
||||||
|
def translate_network(settings):
|
||||||
|
# Get the standard cmd, args from the ubuntu format
|
||||||
|
entries = []
|
||||||
|
for line in settings.splitlines():
|
||||||
|
line = line.strip()
|
||||||
|
if not line or line.startswith("#"):
|
||||||
|
continue
|
||||||
|
split_up = line.split(None, 1)
|
||||||
|
if len(split_up) <= 1:
|
||||||
|
continue
|
||||||
|
entries.append(split_up)
|
||||||
|
# Figure out where each iface section is
|
||||||
|
ifaces = []
|
||||||
|
consume = {}
|
||||||
|
for (cmd, args) in entries:
|
||||||
|
if cmd == 'iface':
|
||||||
|
if consume:
|
||||||
|
ifaces.append(consume)
|
||||||
|
consume = {}
|
||||||
|
consume[cmd] = args
|
||||||
|
else:
|
||||||
|
consume[cmd] = args
|
||||||
|
# Check if anything left over to consume
|
||||||
|
absorb = False
|
||||||
|
for (cmd, args) in consume.iteritems():
|
||||||
|
if cmd == 'iface':
|
||||||
|
absorb = True
|
||||||
|
if absorb:
|
||||||
|
ifaces.append(consume)
|
||||||
|
# Now translate
|
||||||
|
real_ifaces = {}
|
||||||
|
for info in ifaces:
|
||||||
|
if 'iface' not in info:
|
||||||
|
continue
|
||||||
|
iface_details = info['iface'].split(None)
|
||||||
|
dev_name = None
|
||||||
|
if len(iface_details) >= 1:
|
||||||
|
dev = iface_details[0].strip().lower()
|
||||||
|
if dev:
|
||||||
|
dev_name = dev
|
||||||
|
if not dev_name:
|
||||||
|
continue
|
||||||
|
iface_info = {}
|
||||||
|
if len(iface_details) >= 3:
|
||||||
|
proto_type = iface_details[2].strip().lower()
|
||||||
|
# Seems like this can be 'loopback' which we don't
|
||||||
|
# really care about
|
||||||
|
if proto_type in ['dhcp', 'static']:
|
||||||
|
iface_info['bootproto'] = proto_type
|
||||||
|
# These can just be copied over
|
||||||
|
for k in ['netmask', 'address', 'gateway', 'broadcast']:
|
||||||
|
if k in info:
|
||||||
|
val = info[k].strip().lower()
|
||||||
|
if val:
|
||||||
|
iface_info[k] = val
|
||||||
|
# Name server info provided??
|
||||||
|
if 'dns-nameservers' in info:
|
||||||
|
iface_info['dns-nameservers'] = info['dns-nameservers'].split()
|
||||||
|
# Name server search info provided??
|
||||||
|
if 'dns-search' in info:
|
||||||
|
iface_info['dns-search'] = info['dns-search'].split()
|
||||||
|
# Is any mac address spoofing going on??
|
||||||
|
if 'hwaddress' in info:
|
||||||
|
hw_info = info['hwaddress'].lower().strip()
|
||||||
|
hw_split = hw_info.split(None, 1)
|
||||||
|
if len(hw_split) == 2 and hw_split[0].startswith('ether'):
|
||||||
|
hw_addr = hw_split[1]
|
||||||
|
if hw_addr:
|
||||||
|
iface_info['hwaddress'] = hw_addr
|
||||||
|
real_ifaces[dev_name] = iface_info
|
||||||
|
# Check for those that should be started on boot via 'auto'
|
||||||
|
for (cmd, args) in entries:
|
||||||
|
if cmd == 'auto':
|
||||||
|
# Seems like auto can be like 'auto eth0 eth0:1' so just get the
|
||||||
|
# first part out as the device name
|
||||||
|
args = args.split(None)
|
||||||
|
if not args:
|
||||||
|
continue
|
||||||
|
dev_name = args[0].strip().lower()
|
||||||
|
if dev_name in real_ifaces:
|
||||||
|
real_ifaces[dev_name]['auto'] = True
|
||||||
|
return real_ifaces
|
@ -25,6 +25,7 @@ from cloudinit import helpers
|
|||||||
from cloudinit import log as logging
|
from cloudinit import log as logging
|
||||||
from cloudinit import util
|
from cloudinit import util
|
||||||
|
|
||||||
|
from cloudinit.distros import net_util
|
||||||
from cloudinit.distros import rhel_util
|
from cloudinit.distros import rhel_util
|
||||||
from cloudinit.settings import PER_INSTANCE
|
from cloudinit.settings import PER_INSTANCE
|
||||||
|
|
||||||
@ -63,7 +64,7 @@ class Distro(distros.Distro):
|
|||||||
|
|
||||||
def _write_network(self, settings):
|
def _write_network(self, settings):
|
||||||
# TODO(harlowja) fix this... since this is the ubuntu format
|
# TODO(harlowja) fix this... since this is the ubuntu format
|
||||||
entries = rhel_util.translate_network(settings)
|
entries = net_util.translate_network(settings)
|
||||||
LOG.debug("Translated ubuntu style network settings %s into %s",
|
LOG.debug("Translated ubuntu style network settings %s into %s",
|
||||||
settings, entries)
|
settings, entries)
|
||||||
# Make the intermediate format as the rhel format...
|
# Make the intermediate format as the rhel format...
|
||||||
|
@ -30,94 +30,6 @@ from cloudinit import util
|
|||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
# This is a util function to translate Debian based distro interface blobs as
|
|
||||||
# given in /etc/network/interfaces to an equivalent format for distributions
|
|
||||||
# that use ifcfg-* style (Red Hat and SUSE).
|
|
||||||
# TODO(harlowja) remove when we have python-netcf active...
|
|
||||||
def translate_network(settings):
|
|
||||||
# Get the standard cmd, args from the ubuntu format
|
|
||||||
entries = []
|
|
||||||
for line in settings.splitlines():
|
|
||||||
line = line.strip()
|
|
||||||
if not line or line.startswith("#"):
|
|
||||||
continue
|
|
||||||
split_up = line.split(None, 1)
|
|
||||||
if len(split_up) <= 1:
|
|
||||||
continue
|
|
||||||
entries.append(split_up)
|
|
||||||
# Figure out where each iface section is
|
|
||||||
ifaces = []
|
|
||||||
consume = {}
|
|
||||||
for (cmd, args) in entries:
|
|
||||||
if cmd == 'iface':
|
|
||||||
if consume:
|
|
||||||
ifaces.append(consume)
|
|
||||||
consume = {}
|
|
||||||
consume[cmd] = args
|
|
||||||
else:
|
|
||||||
consume[cmd] = args
|
|
||||||
# Check if anything left over to consume
|
|
||||||
absorb = False
|
|
||||||
for (cmd, args) in consume.iteritems():
|
|
||||||
if cmd == 'iface':
|
|
||||||
absorb = True
|
|
||||||
if absorb:
|
|
||||||
ifaces.append(consume)
|
|
||||||
# Now translate
|
|
||||||
real_ifaces = {}
|
|
||||||
for info in ifaces:
|
|
||||||
if 'iface' not in info:
|
|
||||||
continue
|
|
||||||
iface_details = info['iface'].split(None)
|
|
||||||
dev_name = None
|
|
||||||
if len(iface_details) >= 1:
|
|
||||||
dev = iface_details[0].strip().lower()
|
|
||||||
if dev:
|
|
||||||
dev_name = dev
|
|
||||||
if not dev_name:
|
|
||||||
continue
|
|
||||||
iface_info = {}
|
|
||||||
if len(iface_details) >= 3:
|
|
||||||
proto_type = iface_details[2].strip().lower()
|
|
||||||
# Seems like this can be 'loopback' which we don't
|
|
||||||
# really care about
|
|
||||||
if proto_type in ['dhcp', 'static']:
|
|
||||||
iface_info['bootproto'] = proto_type
|
|
||||||
# These can just be copied over
|
|
||||||
for k in ['netmask', 'address', 'gateway', 'broadcast']:
|
|
||||||
if k in info:
|
|
||||||
val = info[k].strip().lower()
|
|
||||||
if val:
|
|
||||||
iface_info[k] = val
|
|
||||||
# Name server info provided??
|
|
||||||
if 'dns-nameservers' in info:
|
|
||||||
iface_info['dns-nameservers'] = info['dns-nameservers'].split()
|
|
||||||
# Name server search info provided??
|
|
||||||
if 'dns-search' in info:
|
|
||||||
iface_info['dns-search'] = info['dns-search'].split()
|
|
||||||
# Is any mac address spoofing going on??
|
|
||||||
if 'hwaddress' in info:
|
|
||||||
hw_info = info['hwaddress'].lower().strip()
|
|
||||||
hw_split = hw_info.split(None, 1)
|
|
||||||
if len(hw_split) == 2 and hw_split[0].startswith('ether'):
|
|
||||||
hw_addr = hw_split[1]
|
|
||||||
if hw_addr:
|
|
||||||
iface_info['hwaddress'] = hw_addr
|
|
||||||
real_ifaces[dev_name] = iface_info
|
|
||||||
# Check for those that should be started on boot via 'auto'
|
|
||||||
for (cmd, args) in entries:
|
|
||||||
if cmd == 'auto':
|
|
||||||
# Seems like auto can be like 'auto eth0 eth0:1' so just get the
|
|
||||||
# first part out as the device name
|
|
||||||
args = args.split(None)
|
|
||||||
if not args:
|
|
||||||
continue
|
|
||||||
dev_name = args[0].strip().lower()
|
|
||||||
if dev_name in real_ifaces:
|
|
||||||
real_ifaces[dev_name]['auto'] = True
|
|
||||||
return real_ifaces
|
|
||||||
|
|
||||||
|
|
||||||
# Helper function to update a RHEL/SUSE /etc/sysconfig/* file
|
# Helper function to update a RHEL/SUSE /etc/sysconfig/* file
|
||||||
def update_sysconfig_file(fn, adjustments, allow_empty=False):
|
def update_sysconfig_file(fn, adjustments, allow_empty=False):
|
||||||
if not adjustments:
|
if not adjustments:
|
||||||
|
@ -26,6 +26,7 @@ from cloudinit import helpers
|
|||||||
from cloudinit import log as logging
|
from cloudinit import log as logging
|
||||||
from cloudinit import util
|
from cloudinit import util
|
||||||
|
|
||||||
|
from cloudinit.distros import net_util
|
||||||
from cloudinit.distros import rhel_util
|
from cloudinit.distros import rhel_util
|
||||||
from cloudinit.settings import PER_INSTANCE
|
from cloudinit.settings import PER_INSTANCE
|
||||||
|
|
||||||
@ -54,7 +55,7 @@ class Distro(distros.Distro):
|
|||||||
|
|
||||||
def _write_network(self, settings):
|
def _write_network(self, settings):
|
||||||
# Convert debian settings to ifcfg format
|
# Convert debian settings to ifcfg format
|
||||||
entries = rhel_util.translate_network(settings)
|
entries = net_util.translate_network(settings)
|
||||||
LOG.debug("Translated ubuntu style network settings %s into %s",
|
LOG.debug("Translated ubuntu style network settings %s into %s",
|
||||||
settings, entries)
|
settings, entries)
|
||||||
# Make the intermediate format as the suse format...
|
# Make the intermediate format as the suse format...
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import httplib
|
||||||
from urlparse import (urlparse, urlunparse)
|
from urlparse import (urlparse, urlunparse)
|
||||||
|
|
||||||
import functools
|
import functools
|
||||||
@ -23,9 +24,11 @@ import json
|
|||||||
import urllib
|
import urllib
|
||||||
|
|
||||||
from cloudinit import log as logging
|
from cloudinit import log as logging
|
||||||
|
from cloudinit import url_helper
|
||||||
from cloudinit import util
|
from cloudinit import util
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
SKIP_USERDATA_CODES = frozenset([httplib.NOT_FOUND])
|
||||||
|
|
||||||
|
|
||||||
def maybe_json_object(text):
|
def maybe_json_object(text):
|
||||||
@ -138,20 +141,38 @@ class MetadataMaterializer(object):
|
|||||||
return joined
|
return joined
|
||||||
|
|
||||||
|
|
||||||
|
def _skip_retry_on_codes(status_codes, _request_args, cause):
|
||||||
|
"""Returns if a request should retry based on a given set of codes that
|
||||||
|
case retrying to be stopped/skipped.
|
||||||
|
"""
|
||||||
|
if cause.code in status_codes:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def get_instance_userdata(api_version='latest',
|
def get_instance_userdata(api_version='latest',
|
||||||
metadata_address='http://169.254.169.254',
|
metadata_address='http://169.254.169.254',
|
||||||
ssl_details=None, timeout=5, retries=5):
|
ssl_details=None, timeout=5, retries=5):
|
||||||
ud_url = combine_url(metadata_address, api_version)
|
ud_url = combine_url(metadata_address, api_version)
|
||||||
ud_url = combine_url(ud_url, 'user-data')
|
ud_url = combine_url(ud_url, 'user-data')
|
||||||
|
user_data = ''
|
||||||
try:
|
try:
|
||||||
|
# It is ok for userdata to not exist (thats why we are stopping if
|
||||||
|
# NOT_FOUND occurs) and just in that case returning an empty string.
|
||||||
|
exception_cb = functools.partial(_skip_retry_on_codes,
|
||||||
|
SKIP_USERDATA_CODES)
|
||||||
response = util.read_file_or_url(ud_url,
|
response = util.read_file_or_url(ud_url,
|
||||||
ssl_details=ssl_details,
|
ssl_details=ssl_details,
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
retries=retries)
|
retries=retries,
|
||||||
return str(response)
|
exception_cb=exception_cb)
|
||||||
|
user_data = str(response)
|
||||||
|
except url_helper.UrlError as e:
|
||||||
|
if e.code not in SKIP_USERDATA_CODES:
|
||||||
|
util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
|
||||||
except Exception:
|
except Exception:
|
||||||
util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
|
util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
|
||||||
return ''
|
return user_data
|
||||||
|
|
||||||
|
|
||||||
def get_instance_metadata(api_version='latest',
|
def get_instance_metadata(api_version='latest',
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import cloudinit.util as util
|
import cloudinit.util as util
|
||||||
|
import re
|
||||||
|
|
||||||
from prettytable import PrettyTable
|
from prettytable import PrettyTable
|
||||||
|
|
||||||
@ -40,27 +41,40 @@ def netdev_info(empty=""):
|
|||||||
toks = line.lower().strip().split()
|
toks = line.lower().strip().split()
|
||||||
if toks[0] == "up":
|
if toks[0] == "up":
|
||||||
devs[curdev]['up'] = True
|
devs[curdev]['up'] = True
|
||||||
|
# If the output of ifconfig doesn't contain the required info in the
|
||||||
|
# obvious place, use a regex filter to be sure.
|
||||||
|
elif len(toks) > 1:
|
||||||
|
if re.search(r"flags=\d+<up,", toks[1]):
|
||||||
|
devs[curdev]['up'] = True
|
||||||
|
|
||||||
fieldpost = ""
|
fieldpost = ""
|
||||||
if toks[0] == "inet6":
|
if toks[0] == "inet6":
|
||||||
fieldpost = "6"
|
fieldpost = "6"
|
||||||
|
|
||||||
for i in range(len(toks)):
|
for i in range(len(toks)):
|
||||||
if toks[i] == "hwaddr":
|
if toks[i] == "hwaddr" or toks[i] == "ether":
|
||||||
try:
|
try:
|
||||||
devs[curdev]["hwaddr"] = toks[i + 1]
|
devs[curdev]["hwaddr"] = toks[i + 1]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
pass
|
pass
|
||||||
for field in ("addr", "bcast", "mask"):
|
|
||||||
|
# Couple the different items we're interested in with the correct
|
||||||
|
# field since FreeBSD/CentOS/Fedora differ in the output.
|
||||||
|
ifconfigfields = {
|
||||||
|
"addr:": "addr", "inet": "addr",
|
||||||
|
"bcast:": "bcast", "broadcast": "bcast",
|
||||||
|
"mask:": "mask", "netmask": "mask"
|
||||||
|
}
|
||||||
|
for origfield, field in ifconfigfields.items():
|
||||||
target = "%s%s" % (field, fieldpost)
|
target = "%s%s" % (field, fieldpost)
|
||||||
if devs[curdev].get(target, ""):
|
if devs[curdev].get(target, ""):
|
||||||
continue
|
continue
|
||||||
if toks[i] == "%s:" % field:
|
if toks[i] == "%s" % origfield:
|
||||||
try:
|
try:
|
||||||
devs[curdev][target] = toks[i + 1]
|
devs[curdev][target] = toks[i + 1]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
pass
|
pass
|
||||||
elif toks[i].startswith("%s:" % field):
|
elif toks[i].startswith("%s" % origfield):
|
||||||
devs[curdev][target] = toks[i][len(field) + 1:]
|
devs[curdev][target] = toks[i][len(field) + 1:]
|
||||||
|
|
||||||
if empty != "":
|
if empty != "":
|
||||||
@ -73,15 +87,32 @@ def netdev_info(empty=""):
|
|||||||
|
|
||||||
|
|
||||||
def route_info():
|
def route_info():
|
||||||
(route_out, _err) = util.subp(["route", "-n"])
|
(route_out, _err) = util.subp(["netstat", "-rn"])
|
||||||
routes = []
|
routes = []
|
||||||
entries = route_out.splitlines()[1:]
|
entries = route_out.splitlines()[1:]
|
||||||
for line in entries:
|
for line in entries:
|
||||||
if not line:
|
if not line:
|
||||||
continue
|
continue
|
||||||
toks = line.split()
|
toks = line.split()
|
||||||
if len(toks) < 8 or toks[0] == "Kernel" or toks[0] == "Destination":
|
|
||||||
|
# FreeBSD shows 6 items in the routing table:
|
||||||
|
# Destination Gateway Flags Refs Use Netif Expire
|
||||||
|
# default 10.65.0.1 UGS 0 34920 vtnet0
|
||||||
|
#
|
||||||
|
# Linux netstat shows 2 more:
|
||||||
|
# Destination Gateway Genmask Flags MSS Window irtt Iface
|
||||||
|
# 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
|
||||||
|
if (len(toks) < 6 or toks[0] == "Kernel" or
|
||||||
|
toks[0] == "Destination" or toks[0] == "Internet" or
|
||||||
|
toks[0] == "Internet6" or toks[0] == "Routing"):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if len(toks) < 8:
|
||||||
|
toks.append("-")
|
||||||
|
toks.append("-")
|
||||||
|
toks[7] = toks[5]
|
||||||
|
toks[5] = "-"
|
||||||
|
|
||||||
entry = {
|
entry = {
|
||||||
'destination': toks[0],
|
'destination': toks[0],
|
||||||
'gateway': toks[1],
|
'gateway': toks[1],
|
||||||
@ -92,6 +123,7 @@ def route_info():
|
|||||||
'use': toks[6],
|
'use': toks[6],
|
||||||
'iface': toks[7],
|
'iface': toks[7],
|
||||||
}
|
}
|
||||||
|
|
||||||
routes.append(entry)
|
routes.append(entry)
|
||||||
return routes
|
return routes
|
||||||
|
|
||||||
|
@ -284,8 +284,10 @@ def find_candidate_devs():
|
|||||||
# followed by fstype items, but with dupes removed
|
# followed by fstype items, but with dupes removed
|
||||||
combined = (by_label + [d for d in by_fstype if d not in by_label])
|
combined = (by_label + [d for d in by_fstype if d not in by_label])
|
||||||
|
|
||||||
# We are looking for block device (sda, not sda1), ignore partitions
|
# We are looking for a block device or partition with necessary label or
|
||||||
combined = [d for d in combined if not util.is_partition(d)]
|
# an unpartitioned block device.
|
||||||
|
combined = [d for d in combined
|
||||||
|
if d in by_label or not util.is_partition(d)]
|
||||||
|
|
||||||
return combined
|
return combined
|
||||||
|
|
||||||
|
@ -50,40 +50,47 @@ class DataSourceNoCloud(sources.DataSource):
|
|||||||
}
|
}
|
||||||
|
|
||||||
found = []
|
found = []
|
||||||
md = {}
|
mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': ""}
|
||||||
ud = ""
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Parse the kernel command line, getting data passed in
|
# Parse the kernel command line, getting data passed in
|
||||||
|
md = {}
|
||||||
if parse_cmdline_data(self.cmdline_id, md):
|
if parse_cmdline_data(self.cmdline_id, md):
|
||||||
found.append("cmdline")
|
found.append("cmdline")
|
||||||
|
mydata.update(md)
|
||||||
except:
|
except:
|
||||||
util.logexc(LOG, "Unable to parse command line data")
|
util.logexc(LOG, "Unable to parse command line data")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Check to see if the seed dir has data.
|
# Check to see if the seed dir has data.
|
||||||
seedret = {}
|
pp2d_kwargs = {'required': ['user-data', 'meta-data'],
|
||||||
if util.read_optional_seed(seedret, base=self.seed_dir + "/"):
|
'optional': ['vendor-data']}
|
||||||
md = util.mergemanydict([md, seedret['meta-data']])
|
|
||||||
ud = seedret['user-data']
|
try:
|
||||||
|
seeded = util.pathprefix2dict(self.seed_dir, **pp2d_kwargs)
|
||||||
found.append(self.seed_dir)
|
found.append(self.seed_dir)
|
||||||
LOG.debug("Using seeded cache data from %s", self.seed_dir)
|
LOG.debug("Using seeded data from %s", self.seed_dir)
|
||||||
|
except ValueError as e:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if self.seed_dir in found:
|
||||||
|
mydata = _merge_new_seed(mydata, seeded)
|
||||||
|
|
||||||
# If the datasource config had a 'seedfrom' entry, then that takes
|
# If the datasource config had a 'seedfrom' entry, then that takes
|
||||||
# precedence over a 'seedfrom' that was found in a filesystem
|
# precedence over a 'seedfrom' that was found in a filesystem
|
||||||
# but not over external media
|
# but not over external media
|
||||||
if 'seedfrom' in self.ds_cfg and self.ds_cfg['seedfrom']:
|
if self.ds_cfg.get('seedfrom'):
|
||||||
found.append("ds_config")
|
found.append("ds_config_seedfrom")
|
||||||
md["seedfrom"] = self.ds_cfg['seedfrom']
|
mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom']
|
||||||
|
|
||||||
# if ds_cfg has 'user-data' and 'meta-data'
|
# fields appropriately named can also just come from the datasource
|
||||||
|
# config (ie, 'user-data', 'meta-data', 'vendor-data' there)
|
||||||
if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
|
if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
|
||||||
if self.ds_cfg['user-data']:
|
mydata = _merge_new_seed(mydata, self.ds_cfg)
|
||||||
ud = self.ds_cfg['user-data']
|
found.append("ds_config")
|
||||||
if self.ds_cfg['meta-data'] is not False:
|
|
||||||
md = util.mergemanydict([md, self.ds_cfg['meta-data']])
|
def _pp2d_callback(mp, data):
|
||||||
if 'ds_config' not in found:
|
util.pathprefix2dict(mp, **data)
|
||||||
found.append("ds_config")
|
|
||||||
|
|
||||||
label = self.ds_cfg.get('fs_label', "cidata")
|
label = self.ds_cfg.get('fs_label', "cidata")
|
||||||
if label is not None:
|
if label is not None:
|
||||||
@ -102,15 +109,21 @@ class DataSourceNoCloud(sources.DataSource):
|
|||||||
try:
|
try:
|
||||||
LOG.debug("Attempting to use data from %s", dev)
|
LOG.debug("Attempting to use data from %s", dev)
|
||||||
|
|
||||||
(newmd, newud) = util.mount_cb(dev, util.read_seeded)
|
try:
|
||||||
md = util.mergemanydict([newmd, md])
|
seeded = util.mount_cb(dev, _pp2d_callback)
|
||||||
ud = newud
|
except ValueError as e:
|
||||||
|
if dev in label_list:
|
||||||
|
LOG.warn("device %s with label=%s not a"
|
||||||
|
"valid seed.", dev, label)
|
||||||
|
continue
|
||||||
|
|
||||||
|
mydata = _merge_new_seed(mydata, seeded)
|
||||||
|
|
||||||
# For seed from a device, the default mode is 'net'.
|
# For seed from a device, the default mode is 'net'.
|
||||||
# that is more likely to be what is desired. If they want
|
# that is more likely to be what is desired. If they want
|
||||||
# dsmode of local, then they must specify that.
|
# dsmode of local, then they must specify that.
|
||||||
if 'dsmode' not in md:
|
if 'dsmode' not in mydata['meta-data']:
|
||||||
md['dsmode'] = "net"
|
mydata['meta-data'] = "net"
|
||||||
|
|
||||||
LOG.debug("Using data from %s", dev)
|
LOG.debug("Using data from %s", dev)
|
||||||
found.append(dev)
|
found.append(dev)
|
||||||
@ -133,8 +146,8 @@ class DataSourceNoCloud(sources.DataSource):
|
|||||||
# attempt to seed the userdata / metadata from its value
|
# attempt to seed the userdata / metadata from its value
|
||||||
# its primarily value is in allowing the user to type less
|
# its primarily value is in allowing the user to type less
|
||||||
# on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
|
# on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
|
||||||
if "seedfrom" in md:
|
if "seedfrom" in mydata['meta-data']:
|
||||||
seedfrom = md["seedfrom"]
|
seedfrom = mydata['meta-data']["seedfrom"]
|
||||||
seedfound = False
|
seedfound = False
|
||||||
for proto in self.supported_seed_starts:
|
for proto in self.supported_seed_starts:
|
||||||
if seedfrom.startswith(proto):
|
if seedfrom.startswith(proto):
|
||||||
@ -144,7 +157,7 @@ class DataSourceNoCloud(sources.DataSource):
|
|||||||
LOG.debug("Seed from %s not supported by %s", seedfrom, self)
|
LOG.debug("Seed from %s not supported by %s", seedfrom, self)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if 'network-interfaces' in md:
|
if 'network-interfaces' in mydata['meta-data']:
|
||||||
seeded_interfaces = self.dsmode
|
seeded_interfaces = self.dsmode
|
||||||
|
|
||||||
# This could throw errors, but the user told us to do it
|
# This could throw errors, but the user told us to do it
|
||||||
@ -153,25 +166,30 @@ class DataSourceNoCloud(sources.DataSource):
|
|||||||
LOG.debug("Using seeded cache data from %s", seedfrom)
|
LOG.debug("Using seeded cache data from %s", seedfrom)
|
||||||
|
|
||||||
# Values in the command line override those from the seed
|
# Values in the command line override those from the seed
|
||||||
md = util.mergemanydict([md, md_seed])
|
mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
|
||||||
|
md_seed])
|
||||||
|
mydata['user-data'] = ud
|
||||||
found.append(seedfrom)
|
found.append(seedfrom)
|
||||||
|
|
||||||
# Now that we have exhausted any other places merge in the defaults
|
# Now that we have exhausted any other places merge in the defaults
|
||||||
md = util.mergemanydict([md, defaults])
|
mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
|
||||||
|
defaults])
|
||||||
|
|
||||||
# Update the network-interfaces if metadata had 'network-interfaces'
|
# Update the network-interfaces if metadata had 'network-interfaces'
|
||||||
# entry and this is the local datasource, or 'seedfrom' was used
|
# entry and this is the local datasource, or 'seedfrom' was used
|
||||||
# and the source of the seed was self.dsmode
|
# and the source of the seed was self.dsmode
|
||||||
# ('local' for NoCloud, 'net' for NoCloudNet')
|
# ('local' for NoCloud, 'net' for NoCloudNet')
|
||||||
if ('network-interfaces' in md and
|
if ('network-interfaces' in mydata['meta-data'] and
|
||||||
(self.dsmode in ("local", seeded_interfaces))):
|
(self.dsmode in ("local", seeded_interfaces))):
|
||||||
LOG.debug("Updating network interfaces from %s", self)
|
LOG.debug("Updating network interfaces from %s", self)
|
||||||
self.distro.apply_network(md['network-interfaces'])
|
self.distro.apply_network(
|
||||||
|
mydata['meta-data']['network-interfaces'])
|
||||||
|
|
||||||
if md['dsmode'] == self.dsmode:
|
if mydata['meta-data']['dsmode'] == self.dsmode:
|
||||||
self.seed = ",".join(found)
|
self.seed = ",".join(found)
|
||||||
self.metadata = md
|
self.metadata = mydata['meta-data']
|
||||||
self.userdata_raw = ud
|
self.userdata_raw = mydata['user-data']
|
||||||
|
self.vendordata = mydata['vendor-data']
|
||||||
return True
|
return True
|
||||||
|
|
||||||
LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
|
LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
|
||||||
@ -222,6 +240,16 @@ def parse_cmdline_data(ds_id, fill, cmdline=None):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _merge_new_seed(cur, seeded):
|
||||||
|
ret = cur.copy()
|
||||||
|
ret['meta-data'] = util.mergemanydict([cur['meta-data'],
|
||||||
|
util.load_yaml(seeded['meta-data'])])
|
||||||
|
ret['user-data'] = seeded['user-data']
|
||||||
|
if 'vendor-data' in seeded:
|
||||||
|
ret['vendor-data'] = seeded['vendor-data']
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
class DataSourceNoCloudNet(DataSourceNoCloud):
|
class DataSourceNoCloudNet(DataSourceNoCloud):
|
||||||
def __init__(self, sys_cfg, distro, paths):
|
def __init__(self, sys_cfg, distro, paths):
|
||||||
DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
|
DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
|
||||||
|
@ -25,7 +25,9 @@
|
|||||||
# requests on the console. For example, to get the hostname, you
|
# requests on the console. For example, to get the hostname, you
|
||||||
# would send "GET hostname" on /dev/ttyS1.
|
# would send "GET hostname" on /dev/ttyS1.
|
||||||
#
|
#
|
||||||
|
# Certain behavior is defined by the DataDictionary
|
||||||
|
# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
|
||||||
|
# Comments with "@datadictionary" are snippets of the definition
|
||||||
|
|
||||||
import base64
|
import base64
|
||||||
from cloudinit import log as logging
|
from cloudinit import log as logging
|
||||||
@ -43,10 +45,11 @@ SMARTOS_ATTRIB_MAP = {
|
|||||||
'local-hostname': ('hostname', True),
|
'local-hostname': ('hostname', True),
|
||||||
'public-keys': ('root_authorized_keys', True),
|
'public-keys': ('root_authorized_keys', True),
|
||||||
'user-script': ('user-script', False),
|
'user-script': ('user-script', False),
|
||||||
'user-data': ('user-data', False),
|
'legacy-user-data': ('user-data', False),
|
||||||
|
'user-data': ('cloud-init:user-data', False),
|
||||||
'iptables_disable': ('iptables_disable', True),
|
'iptables_disable': ('iptables_disable', True),
|
||||||
'motd_sys_info': ('motd_sys_info', True),
|
'motd_sys_info': ('motd_sys_info', True),
|
||||||
'availability_zone': ('datacenter_name', True),
|
'availability_zone': ('sdc:datacenter_name', True),
|
||||||
'vendordata': ('sdc:operator-script', False),
|
'vendordata': ('sdc:operator-script', False),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -71,7 +74,11 @@ BUILTIN_DS_CONFIG = {
|
|||||||
'seed_timeout': 60,
|
'seed_timeout': 60,
|
||||||
'no_base64_decode': ['root_authorized_keys',
|
'no_base64_decode': ['root_authorized_keys',
|
||||||
'motd_sys_info',
|
'motd_sys_info',
|
||||||
'iptables_disable'],
|
'iptables_disable',
|
||||||
|
'user-data',
|
||||||
|
'user-script',
|
||||||
|
'sdc:datacenter_name',
|
||||||
|
],
|
||||||
'base64_keys': [],
|
'base64_keys': [],
|
||||||
'base64_all': False,
|
'base64_all': False,
|
||||||
'disk_aliases': {'ephemeral0': '/dev/vdb'},
|
'disk_aliases': {'ephemeral0': '/dev/vdb'},
|
||||||
@ -88,6 +95,11 @@ BUILTIN_CLOUD_CONFIG = {
|
|||||||
'device': 'ephemeral0'}],
|
'device': 'ephemeral0'}],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# @datadictionary: this is legacy path for placing files from metadata
|
||||||
|
# per the SmartOS location. It is not preferable, but is done for
|
||||||
|
# legacy reasons
|
||||||
|
LEGACY_USER_D = "/var/db"
|
||||||
|
|
||||||
|
|
||||||
class DataSourceSmartOS(sources.DataSource):
|
class DataSourceSmartOS(sources.DataSource):
|
||||||
def __init__(self, sys_cfg, distro, paths):
|
def __init__(self, sys_cfg, distro, paths):
|
||||||
@ -107,6 +119,9 @@ class DataSourceSmartOS(sources.DataSource):
|
|||||||
self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode')
|
self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode')
|
||||||
self.b64_keys = self.ds_cfg.get('base64_keys')
|
self.b64_keys = self.ds_cfg.get('base64_keys')
|
||||||
self.b64_all = self.ds_cfg.get('base64_all')
|
self.b64_all = self.ds_cfg.get('base64_all')
|
||||||
|
self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
|
||||||
|
self.user_script_d = os.path.join(self.paths.get_cpath("scripts"),
|
||||||
|
'per-boot')
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
root = sources.DataSource.__str__(self)
|
root = sources.DataSource.__str__(self)
|
||||||
@ -144,14 +159,32 @@ class DataSourceSmartOS(sources.DataSource):
|
|||||||
smartos_noun, strip = attribute
|
smartos_noun, strip = attribute
|
||||||
md[ci_noun] = self.query(smartos_noun, strip=strip)
|
md[ci_noun] = self.query(smartos_noun, strip=strip)
|
||||||
|
|
||||||
|
# @datadictionary: This key may contain a program that is written
|
||||||
|
# to a file in the filesystem of the guest on each boot and then
|
||||||
|
# executed. It may be of any format that would be considered
|
||||||
|
# executable in the guest instance.
|
||||||
|
u_script = md.get('user-script')
|
||||||
|
u_script_f = "%s/99_user_script" % self.user_script_d
|
||||||
|
u_script_l = "%s/user-script" % LEGACY_USER_D
|
||||||
|
write_boot_content(u_script, u_script_f, link=u_script_l, shebang=True,
|
||||||
|
mode=0700)
|
||||||
|
|
||||||
|
# @datadictionary: This key has no defined format, but its value
|
||||||
|
# is written to the file /var/db/mdata-user-data on each boot prior
|
||||||
|
# to the phase that runs user-script. This file is not to be executed.
|
||||||
|
# This allows a configuration file of some kind to be injected into
|
||||||
|
# the machine to be consumed by the user-script when it runs.
|
||||||
|
u_data = md.get('legacy-user-data')
|
||||||
|
u_data_f = "%s/mdata-user-data" % LEGACY_USER_D
|
||||||
|
write_boot_content(u_data, u_data_f)
|
||||||
|
|
||||||
|
# Handle the cloud-init regular meta
|
||||||
if not md['local-hostname']:
|
if not md['local-hostname']:
|
||||||
md['local-hostname'] = system_uuid
|
md['local-hostname'] = system_uuid
|
||||||
|
|
||||||
ud = None
|
ud = None
|
||||||
if md['user-data']:
|
if md['user-data']:
|
||||||
ud = md['user-data']
|
ud = md['user-data']
|
||||||
elif md['user-script']:
|
|
||||||
ud = md['user-script']
|
|
||||||
|
|
||||||
self.metadata = util.mergemanydict([md, self.metadata])
|
self.metadata = util.mergemanydict([md, self.metadata])
|
||||||
self.userdata_raw = ud
|
self.userdata_raw = ud
|
||||||
@ -279,6 +312,62 @@ def dmi_data():
|
|||||||
return (sys_uuid.lower().strip(), sys_type.strip())
|
return (sys_uuid.lower().strip(), sys_type.strip())
|
||||||
|
|
||||||
|
|
||||||
|
def write_boot_content(content, content_f, link=None, shebang=False,
|
||||||
|
mode=0400):
|
||||||
|
"""
|
||||||
|
Write the content to content_f. Under the following rules:
|
||||||
|
1. If no content, remove the file
|
||||||
|
2. Write the content
|
||||||
|
3. If executable and no file magic, add it
|
||||||
|
4. If there is a link, create it
|
||||||
|
|
||||||
|
@param content: what to write
|
||||||
|
@param content_f: the file name
|
||||||
|
@param backup_d: the directory to save the backup at
|
||||||
|
@param link: if defined, location to create a symlink to
|
||||||
|
@param shebang: if no file magic, set shebang
|
||||||
|
@param mode: file mode
|
||||||
|
|
||||||
|
Becuase of the way that Cloud-init executes scripts (no shell),
|
||||||
|
a script will fail to execute if does not have a magic bit (shebang) set
|
||||||
|
for the file. If shebang=True, then the script will be checked for a magic
|
||||||
|
bit and to the SmartOS default of assuming that bash.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not content and os.path.exists(content_f):
|
||||||
|
os.unlink(content_f)
|
||||||
|
if link and os.path.islink(link):
|
||||||
|
os.unlink(link)
|
||||||
|
if not content:
|
||||||
|
return
|
||||||
|
|
||||||
|
util.write_file(content_f, content, mode=mode)
|
||||||
|
|
||||||
|
if shebang and not content.startswith("#!"):
|
||||||
|
try:
|
||||||
|
cmd = ["file", "--brief", "--mime-type", content_f]
|
||||||
|
(f_type, _err) = util.subp(cmd)
|
||||||
|
LOG.debug("script %s mime type is %s", content_f, f_type)
|
||||||
|
if f_type.strip() == "text/plain":
|
||||||
|
new_content = "\n".join(["#!/bin/bash", content])
|
||||||
|
util.write_file(content_f, new_content, mode=mode)
|
||||||
|
LOG.debug("added shebang to file %s", content_f)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
util.logexc(LOG, ("Failed to identify script type for %s" %
|
||||||
|
content_f, e))
|
||||||
|
|
||||||
|
if link:
|
||||||
|
try:
|
||||||
|
if os.path.islink(link):
|
||||||
|
os.unlink(link)
|
||||||
|
if content and os.path.exists(content_f):
|
||||||
|
util.ensure_dir(os.path.dirname(link))
|
||||||
|
os.symlink(content_f, link)
|
||||||
|
except IOError as e:
|
||||||
|
util.logexc(LOG, "failed establishing content link", e)
|
||||||
|
|
||||||
|
|
||||||
# Used to match classes to dependencies
|
# Used to match classes to dependencies
|
||||||
datasources = [
|
datasources = [
|
||||||
(DataSourceSmartOS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
|
(DataSourceSmartOS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
|
||||||
|
@ -129,7 +129,7 @@ class DataSource(object):
|
|||||||
# when the kernel named them 'vda' or 'xvda'
|
# when the kernel named them 'vda' or 'xvda'
|
||||||
# we want to return the correct value for what will actually
|
# we want to return the correct value for what will actually
|
||||||
# exist in this instance
|
# exist in this instance
|
||||||
mappings = {"sd": ("vd", "xvd")}
|
mappings = {"sd": ("vd", "xvd", "vtb")}
|
||||||
for (nfrom, tlist) in mappings.iteritems():
|
for (nfrom, tlist) in mappings.iteritems():
|
||||||
if not short_name.startswith(nfrom):
|
if not short_name.startswith(nfrom):
|
||||||
continue
|
continue
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import httplib
|
||||||
import time
|
import time
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
@ -32,6 +33,8 @@ from cloudinit import version
|
|||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
NOT_FOUND = httplib.NOT_FOUND
|
||||||
|
|
||||||
# Check if requests has ssl support (added in requests >= 0.8.8)
|
# Check if requests has ssl support (added in requests >= 0.8.8)
|
||||||
SSL_ENABLED = False
|
SSL_ENABLED = False
|
||||||
CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0)
|
CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0)
|
||||||
@ -58,6 +61,31 @@ def _cleanurl(url):
|
|||||||
return urlunparse(parsed_url)
|
return urlunparse(parsed_url)
|
||||||
|
|
||||||
|
|
||||||
|
# Made to have same accessors as UrlResponse so that the
|
||||||
|
# read_file_or_url can return this or that object and the
|
||||||
|
# 'user' of those objects will not need to know the difference.
|
||||||
|
class StringResponse(object):
|
||||||
|
def __init__(self, contents, code=200):
|
||||||
|
self.code = code
|
||||||
|
self.headers = {}
|
||||||
|
self.contents = contents
|
||||||
|
self.url = None
|
||||||
|
|
||||||
|
def ok(self, *args, **kwargs): # pylint: disable=W0613
|
||||||
|
if self.code != 200:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.contents
|
||||||
|
|
||||||
|
|
||||||
|
class FileResponse(StringResponse):
|
||||||
|
def __init__(self, path, contents, code=200):
|
||||||
|
StringResponse.__init__(self, contents, code=code)
|
||||||
|
self.url = path
|
||||||
|
|
||||||
|
|
||||||
class UrlResponse(object):
|
class UrlResponse(object):
|
||||||
def __init__(self, response):
|
def __init__(self, response):
|
||||||
self._response = response
|
self._response = response
|
||||||
@ -103,7 +131,7 @@ class UrlError(IOError):
|
|||||||
|
|
||||||
def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
|
def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
|
||||||
headers=None, headers_cb=None, ssl_details=None,
|
headers=None, headers_cb=None, ssl_details=None,
|
||||||
check_status=True, allow_redirects=True):
|
check_status=True, allow_redirects=True, exception_cb=None):
|
||||||
url = _cleanurl(url)
|
url = _cleanurl(url)
|
||||||
req_args = {
|
req_args = {
|
||||||
'url': url,
|
'url': url,
|
||||||
@ -163,14 +191,13 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
|
|||||||
# Handle retrying ourselves since the built-in support
|
# Handle retrying ourselves since the built-in support
|
||||||
# doesn't handle sleeping between tries...
|
# doesn't handle sleeping between tries...
|
||||||
for i in range(0, manual_tries):
|
for i in range(0, manual_tries):
|
||||||
|
req_args['headers'] = headers_cb(url)
|
||||||
|
filtered_req_args = {}
|
||||||
|
for (k, v) in req_args.items():
|
||||||
|
if k == 'data':
|
||||||
|
continue
|
||||||
|
filtered_req_args[k] = v
|
||||||
try:
|
try:
|
||||||
req_args['headers'] = headers_cb(url)
|
|
||||||
filtered_req_args = {}
|
|
||||||
for (k, v) in req_args.items():
|
|
||||||
if k == 'data':
|
|
||||||
continue
|
|
||||||
filtered_req_args[k] = v
|
|
||||||
|
|
||||||
LOG.debug("[%s/%s] open '%s' with %s configuration", i,
|
LOG.debug("[%s/%s] open '%s' with %s configuration", i,
|
||||||
manual_tries, url, filtered_req_args)
|
manual_tries, url, filtered_req_args)
|
||||||
|
|
||||||
@ -196,6 +223,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
|
|||||||
# ssl exceptions are not going to get fixed by waiting a
|
# ssl exceptions are not going to get fixed by waiting a
|
||||||
# few seconds
|
# few seconds
|
||||||
break
|
break
|
||||||
|
if exception_cb and not exception_cb(filtered_req_args, excps[-1]):
|
||||||
|
break
|
||||||
if i + 1 < manual_tries and sec_between > 0:
|
if i + 1 < manual_tries and sec_between > 0:
|
||||||
LOG.debug("Please wait %s seconds while we wait to try again",
|
LOG.debug("Please wait %s seconds while we wait to try again",
|
||||||
sec_between)
|
sec_between)
|
||||||
|
@ -311,7 +311,8 @@ class UserDataProcessor(object):
|
|||||||
def _attach_part(self, outer_msg, part):
|
def _attach_part(self, outer_msg, part):
|
||||||
"""
|
"""
|
||||||
Attach a message to an outer message. outermsg must be a MIMEMultipart.
|
Attach a message to an outer message. outermsg must be a MIMEMultipart.
|
||||||
Modifies a header in the outer message to keep track of number of attachments.
|
Modifies a header in the outer message to keep track of number of
|
||||||
|
attachments.
|
||||||
"""
|
"""
|
||||||
part_count = self._multi_part_count(outer_msg)
|
part_count = self._multi_part_count(outer_msg)
|
||||||
self._process_before_attach(part, part_count + 1)
|
self._process_before_attach(part, part_count + 1)
|
||||||
|
@ -26,6 +26,7 @@ from StringIO import StringIO
|
|||||||
|
|
||||||
import contextlib
|
import contextlib
|
||||||
import copy as obj_copy
|
import copy as obj_copy
|
||||||
|
import ctypes
|
||||||
import errno
|
import errno
|
||||||
import glob
|
import glob
|
||||||
import grp
|
import grp
|
||||||
@ -36,6 +37,7 @@ import os.path
|
|||||||
import platform
|
import platform
|
||||||
import pwd
|
import pwd
|
||||||
import random
|
import random
|
||||||
|
import re
|
||||||
import shutil
|
import shutil
|
||||||
import socket
|
import socket
|
||||||
import stat
|
import stat
|
||||||
@ -72,31 +74,6 @@ FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
|
|||||||
CONTAINER_TESTS = ['running-in-container', 'lxc-is-container']
|
CONTAINER_TESTS = ['running-in-container', 'lxc-is-container']
|
||||||
|
|
||||||
|
|
||||||
# Made to have same accessors as UrlResponse so that the
|
|
||||||
# read_file_or_url can return this or that object and the
|
|
||||||
# 'user' of those objects will not need to know the difference.
|
|
||||||
class StringResponse(object):
|
|
||||||
def __init__(self, contents, code=200):
|
|
||||||
self.code = code
|
|
||||||
self.headers = {}
|
|
||||||
self.contents = contents
|
|
||||||
self.url = None
|
|
||||||
|
|
||||||
def ok(self, *args, **kwargs): # pylint: disable=W0613
|
|
||||||
if self.code != 200:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return self.contents
|
|
||||||
|
|
||||||
|
|
||||||
class FileResponse(StringResponse):
|
|
||||||
def __init__(self, path, contents, code=200):
|
|
||||||
StringResponse.__init__(self, contents, code=code)
|
|
||||||
self.url = path
|
|
||||||
|
|
||||||
|
|
||||||
class ProcessExecutionError(IOError):
|
class ProcessExecutionError(IOError):
|
||||||
|
|
||||||
MESSAGE_TMPL = ('%(description)s\n'
|
MESSAGE_TMPL = ('%(description)s\n'
|
||||||
@ -392,11 +369,11 @@ def is_ipv4(instr):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
toks = [x for x in toks if (int(x) < 256 and int(x) > 0)]
|
toks = [x for x in toks if int(x) < 256 and int(x) >= 0]
|
||||||
except:
|
except:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return (len(toks) == 4)
|
return len(toks) == 4
|
||||||
|
|
||||||
|
|
||||||
def get_cfg_option_bool(yobj, key, default=False):
|
def get_cfg_option_bool(yobj, key, default=False):
|
||||||
@ -649,8 +626,8 @@ def read_optional_seed(fill, base="", ext="", timeout=5):
|
|||||||
fill['user-data'] = ud
|
fill['user-data'] = ud
|
||||||
fill['meta-data'] = md
|
fill['meta-data'] = md
|
||||||
return True
|
return True
|
||||||
except IOError as e:
|
except url_helper.UrlError as e:
|
||||||
if e.errno == errno.ENOENT:
|
if e.code == url_helper.NOT_FOUND:
|
||||||
return False
|
return False
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -689,7 +666,7 @@ def fetch_ssl_details(paths=None):
|
|||||||
|
|
||||||
def read_file_or_url(url, timeout=5, retries=10,
|
def read_file_or_url(url, timeout=5, retries=10,
|
||||||
headers=None, data=None, sec_between=1, ssl_details=None,
|
headers=None, data=None, sec_between=1, ssl_details=None,
|
||||||
headers_cb=None):
|
headers_cb=None, exception_cb=None):
|
||||||
url = url.lstrip()
|
url = url.lstrip()
|
||||||
if url.startswith("/"):
|
if url.startswith("/"):
|
||||||
url = "file://%s" % url
|
url = "file://%s" % url
|
||||||
@ -697,7 +674,14 @@ def read_file_or_url(url, timeout=5, retries=10,
|
|||||||
if data:
|
if data:
|
||||||
LOG.warn("Unable to post data to file resource %s", url)
|
LOG.warn("Unable to post data to file resource %s", url)
|
||||||
file_path = url[len("file://"):]
|
file_path = url[len("file://"):]
|
||||||
return FileResponse(file_path, contents=load_file(file_path))
|
try:
|
||||||
|
contents = load_file(file_path)
|
||||||
|
except IOError as e:
|
||||||
|
code = e.errno
|
||||||
|
if e.errno == errno.ENOENT:
|
||||||
|
code = url_helper.NOT_FOUND
|
||||||
|
raise url_helper.UrlError(cause=e, code=code, headers=None)
|
||||||
|
return url_helper.FileResponse(file_path, contents=contents)
|
||||||
else:
|
else:
|
||||||
return url_helper.readurl(url,
|
return url_helper.readurl(url,
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
@ -706,7 +690,8 @@ def read_file_or_url(url, timeout=5, retries=10,
|
|||||||
headers_cb=headers_cb,
|
headers_cb=headers_cb,
|
||||||
data=data,
|
data=data,
|
||||||
sec_between=sec_between,
|
sec_between=sec_between,
|
||||||
ssl_details=ssl_details)
|
ssl_details=ssl_details,
|
||||||
|
exception_cb=exception_cb)
|
||||||
|
|
||||||
|
|
||||||
def load_yaml(blob, default=None, allowed=(dict,)):
|
def load_yaml(blob, default=None, allowed=(dict,)):
|
||||||
@ -875,8 +860,8 @@ def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
|
|||||||
IP_address canonical_hostname [aliases...]
|
IP_address canonical_hostname [aliases...]
|
||||||
|
|
||||||
Fields of the entry are separated by any number of blanks and/or tab
|
Fields of the entry are separated by any number of blanks and/or tab
|
||||||
characters. Text from a "#" character until the end of the line is a
|
characters. Text from a "#" character until the end of the line is a
|
||||||
comment, and is ignored. Host names may contain only alphanumeric
|
comment, and is ignored. Host names may contain only alphanumeric
|
||||||
characters, minus signs ("-"), and periods ("."). They must begin with
|
characters, minus signs ("-"), and periods ("."). They must begin with
|
||||||
an alphabetic character and end with an alphanumeric character.
|
an alphabetic character and end with an alphanumeric character.
|
||||||
Optional aliases provide for name changes, alternate spellings, shorter
|
Optional aliases provide for name changes, alternate spellings, shorter
|
||||||
@ -960,7 +945,7 @@ def is_resolvable(name):
|
|||||||
pass
|
pass
|
||||||
_DNS_REDIRECT_IP = badips
|
_DNS_REDIRECT_IP = badips
|
||||||
if badresults:
|
if badresults:
|
||||||
LOG.debug("detected dns redirection: %s" % badresults)
|
LOG.debug("detected dns redirection: %s", badresults)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
result = socket.getaddrinfo(name, None)
|
result = socket.getaddrinfo(name, None)
|
||||||
@ -987,7 +972,7 @@ def gethostbyaddr(ip):
|
|||||||
|
|
||||||
def is_resolvable_url(url):
|
def is_resolvable_url(url):
|
||||||
"""determine if this url is resolvable (existing or ip)."""
|
"""determine if this url is resolvable (existing or ip)."""
|
||||||
return (is_resolvable(urlparse.urlparse(url).hostname))
|
return is_resolvable(urlparse.urlparse(url).hostname)
|
||||||
|
|
||||||
|
|
||||||
def search_for_mirror(candidates):
|
def search_for_mirror(candidates):
|
||||||
@ -1312,11 +1297,26 @@ def mounts():
|
|||||||
mounted = {}
|
mounted = {}
|
||||||
try:
|
try:
|
||||||
# Go through mounts to see what is already mounted
|
# Go through mounts to see what is already mounted
|
||||||
mount_locs = load_file("/proc/mounts").splitlines()
|
if os.path.exists("/proc/mounts"):
|
||||||
|
mount_locs = load_file("/proc/mounts").splitlines()
|
||||||
|
method = 'proc'
|
||||||
|
else:
|
||||||
|
(mountoutput, _err) = subp("mount")
|
||||||
|
mount_locs = mountoutput.splitlines()
|
||||||
|
method = 'mount'
|
||||||
|
mountre = r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$'
|
||||||
for mpline in mount_locs:
|
for mpline in mount_locs:
|
||||||
# Format at: man fstab
|
# Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered)
|
||||||
|
# FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates)
|
||||||
try:
|
try:
|
||||||
(dev, mp, fstype, opts, _freq, _passno) = mpline.split()
|
if method == 'proc':
|
||||||
|
(dev, mp, fstype, opts, _freq, _passno) = mpline.split()
|
||||||
|
else:
|
||||||
|
m = re.search(mountre, mpline)
|
||||||
|
dev = m.group(1)
|
||||||
|
mp = m.group(2)
|
||||||
|
fstype = m.group(3)
|
||||||
|
opts = m.group(4)
|
||||||
except:
|
except:
|
||||||
continue
|
continue
|
||||||
# If the name of the mount point contains spaces these
|
# If the name of the mount point contains spaces these
|
||||||
@ -1327,9 +1327,9 @@ def mounts():
|
|||||||
'mountpoint': mp,
|
'mountpoint': mp,
|
||||||
'opts': opts,
|
'opts': opts,
|
||||||
}
|
}
|
||||||
LOG.debug("Fetched %s mounts from %s", mounted, "/proc/mounts")
|
LOG.debug("Fetched %s mounts from %s", mounted, method)
|
||||||
except (IOError, OSError):
|
except (IOError, OSError):
|
||||||
logexc(LOG, "Failed fetching mount points from /proc/mounts")
|
logexc(LOG, "Failed fetching mount points")
|
||||||
return mounted
|
return mounted
|
||||||
|
|
||||||
|
|
||||||
@ -1386,7 +1386,7 @@ def get_builtin_cfg():
|
|||||||
|
|
||||||
|
|
||||||
def sym_link(source, link):
|
def sym_link(source, link):
|
||||||
LOG.debug("Creating symbolic link from %r => %r" % (link, source))
|
LOG.debug("Creating symbolic link from %r => %r", link, source)
|
||||||
os.symlink(source, link)
|
os.symlink(source, link)
|
||||||
|
|
||||||
|
|
||||||
@ -1414,12 +1414,27 @@ def time_rfc2822():
|
|||||||
|
|
||||||
def uptime():
|
def uptime():
|
||||||
uptime_str = '??'
|
uptime_str = '??'
|
||||||
|
method = 'unknown'
|
||||||
try:
|
try:
|
||||||
contents = load_file("/proc/uptime").strip()
|
if os.path.exists("/proc/uptime"):
|
||||||
if contents:
|
method = '/proc/uptime'
|
||||||
uptime_str = contents.split()[0]
|
contents = load_file("/proc/uptime").strip()
|
||||||
|
if contents:
|
||||||
|
uptime_str = contents.split()[0]
|
||||||
|
else:
|
||||||
|
method = 'ctypes'
|
||||||
|
libc = ctypes.CDLL('/lib/libc.so.7')
|
||||||
|
size = ctypes.c_size_t()
|
||||||
|
buf = ctypes.c_int()
|
||||||
|
size.value = ctypes.sizeof(buf)
|
||||||
|
libc.sysctlbyname("kern.boottime", ctypes.byref(buf),
|
||||||
|
ctypes.byref(size), None, 0)
|
||||||
|
now = time.time()
|
||||||
|
bootup = buf.value
|
||||||
|
uptime_str = now - bootup
|
||||||
|
|
||||||
except:
|
except:
|
||||||
logexc(LOG, "Unable to read uptime from /proc/uptime")
|
logexc(LOG, "Unable to read uptime using method: %s" % method)
|
||||||
return uptime_str
|
return uptime_str
|
||||||
|
|
||||||
|
|
||||||
@ -1758,6 +1773,19 @@ def parse_mtab(path):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def parse_mount(path):
|
||||||
|
(mountoutput, _err) = subp("mount")
|
||||||
|
mount_locs = mountoutput.splitlines()
|
||||||
|
for line in mount_locs:
|
||||||
|
m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line)
|
||||||
|
devpth = m.group(1)
|
||||||
|
mount_point = m.group(2)
|
||||||
|
fs_type = m.group(3)
|
||||||
|
if mount_point == path:
|
||||||
|
return devpth, fs_type, mount_point
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def get_mount_info(path, log=LOG):
|
def get_mount_info(path, log=LOG):
|
||||||
# Use /proc/$$/mountinfo to find the device where path is mounted.
|
# Use /proc/$$/mountinfo to find the device where path is mounted.
|
||||||
# This is done because with a btrfs filesystem using os.stat(path)
|
# This is done because with a btrfs filesystem using os.stat(path)
|
||||||
@ -1791,8 +1819,10 @@ def get_mount_info(path, log=LOG):
|
|||||||
if os.path.exists(mountinfo_path):
|
if os.path.exists(mountinfo_path):
|
||||||
lines = load_file(mountinfo_path).splitlines()
|
lines = load_file(mountinfo_path).splitlines()
|
||||||
return parse_mount_info(path, lines, log)
|
return parse_mount_info(path, lines, log)
|
||||||
else:
|
elif os.path.exists("/etc/mtab"):
|
||||||
return parse_mtab(path)
|
return parse_mtab(path)
|
||||||
|
else:
|
||||||
|
return parse_mount(path)
|
||||||
|
|
||||||
|
|
||||||
def which(program):
|
def which(program):
|
||||||
@ -1805,7 +1835,7 @@ def which(program):
|
|||||||
if is_exe(program):
|
if is_exe(program):
|
||||||
return program
|
return program
|
||||||
else:
|
else:
|
||||||
for path in os.environ["PATH"].split(os.pathsep):
|
for path in os.environ.get("PATH", "").split(os.pathsep):
|
||||||
path = path.strip('"')
|
path = path.strip('"')
|
||||||
exe_file = os.path.join(path, program)
|
exe_file = os.path.join(path, program)
|
||||||
if is_exe(exe_file):
|
if is_exe(exe_file):
|
||||||
@ -1859,3 +1889,28 @@ def expand_dotted_devname(dotted):
|
|||||||
return toks
|
return toks
|
||||||
else:
|
else:
|
||||||
return (dotted, None)
|
return (dotted, None)
|
||||||
|
|
||||||
|
|
||||||
|
def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
|
||||||
|
# return a dictionary populated with keys in 'required' and 'optional'
|
||||||
|
# by reading files in prefix + delim + entry
|
||||||
|
if required is None:
|
||||||
|
required = []
|
||||||
|
if optional is None:
|
||||||
|
optional = []
|
||||||
|
|
||||||
|
missing = []
|
||||||
|
ret = {}
|
||||||
|
for f in required + optional:
|
||||||
|
try:
|
||||||
|
ret[f] = load_file(base + delim + f, quiet=False)
|
||||||
|
except IOError as e:
|
||||||
|
if e.errno != errno.ENOENT:
|
||||||
|
raise
|
||||||
|
if f in required:
|
||||||
|
missing.append(f)
|
||||||
|
|
||||||
|
if len(missing):
|
||||||
|
raise ValueError("Missing required files: %s", ','.join(missing))
|
||||||
|
|
||||||
|
return ret
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
import sys, os
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
# If extensions (or modules to document with autodoc) are in another directory,
|
# If extensions (or modules to document with autodoc) are in another directory,
|
||||||
# add these directories to sys.path here. If the directory is relative to the
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
|
@ -16,11 +16,35 @@ responds with the status and if "SUCCESS" returns until a single ".\n".
|
|||||||
|
|
||||||
New versions of the SmartOS tooling will include support for base64 encoded data.
|
New versions of the SmartOS tooling will include support for base64 encoded data.
|
||||||
|
|
||||||
Userdata
|
Meta-data channels
|
||||||
--------
|
------------------
|
||||||
|
|
||||||
In SmartOS parlance, user-data is a actually meta-data. This userdata can be
|
Cloud-init supports three modes of delivering user/meta-data via the flexible
|
||||||
provided as key-value pairs.
|
channels of SmartOS.
|
||||||
|
|
||||||
|
* user-data is written to /var/db/user-data
|
||||||
|
- per the spec, user-data is for consumption by the end-user, not provisioning
|
||||||
|
tools
|
||||||
|
- cloud-init entirely ignores this channel other than writting it to disk
|
||||||
|
- removal of the meta-data key means that /var/db/user-data gets removed
|
||||||
|
- a backup of previous meta-data is maintained as /var/db/user-data.<timestamp>
|
||||||
|
- <timestamp> is the epoch time when cloud-init ran
|
||||||
|
|
||||||
|
* user-script is written to /var/lib/cloud/scripts/per-boot/99_user_data
|
||||||
|
- this is executed each boot
|
||||||
|
- a link is created to /var/db/user-script
|
||||||
|
- previous versions of the user-script is written to
|
||||||
|
/var/lib/cloud/scripts/per-boot.backup/99_user_script.<timestamp>.
|
||||||
|
- <timestamp> is the epoch time when cloud-init ran.
|
||||||
|
- when the 'user-script' meta-data key goes missing, the user-script is
|
||||||
|
removed from the file system, although a backup is maintained.
|
||||||
|
- if the script is not shebanged (i.e. starts with #!<executable>), then
|
||||||
|
or is not an executable, cloud-init will add a shebang of "#!/bin/bash"
|
||||||
|
|
||||||
|
* cloud-init:user-data is treated like on other Clouds.
|
||||||
|
- this channel is used for delivering _all_ cloud-init instructions
|
||||||
|
- scripts delivered over this channel must be well formed (i.e. must have
|
||||||
|
a shebang)
|
||||||
|
|
||||||
Cloud-init supports reading the traditional meta-data fields supported by the
|
Cloud-init supports reading the traditional meta-data fields supported by the
|
||||||
SmartOS tools. These are:
|
SmartOS tools. These are:
|
||||||
@ -32,19 +56,49 @@ SmartOS tools. These are:
|
|||||||
Note: At this time iptables_disable and enable_motd_sys_info are read but
|
Note: At this time iptables_disable and enable_motd_sys_info are read but
|
||||||
are not actioned.
|
are not actioned.
|
||||||
|
|
||||||
user-script
|
disabling user-script
|
||||||
-----------
|
---------------------
|
||||||
|
|
||||||
SmartOS traditionally supports sending over a user-script for execution at the
|
Cloud-init uses the per-boot script functionality to handle the execution
|
||||||
rc.local level. Cloud-init supports running user-scripts as if they were
|
of the user-script. If you want to prevent this use a cloud-config of:
|
||||||
cloud-init user-data. In this sense, anything with a shell interpreter
|
|
||||||
directive will run.
|
|
||||||
|
|
||||||
user-data and user-script
|
#cloud-config
|
||||||
-------------------------
|
cloud_final_modules:
|
||||||
|
- scripts-per-once
|
||||||
|
- scripts-per-instance
|
||||||
|
- scripts-user
|
||||||
|
- ssh-authkey-fingerprints
|
||||||
|
- keys-to-console
|
||||||
|
- phone-home
|
||||||
|
- final-message
|
||||||
|
- power-state-change
|
||||||
|
|
||||||
In the event that a user defines the meta-data key of "user-data" it will
|
Alternatively you can use the json patch method
|
||||||
always supersede any user-script data. This is for consistency.
|
#cloud-config-jsonp
|
||||||
|
[
|
||||||
|
{ "op": "replace",
|
||||||
|
"path": "/cloud_final_modules",
|
||||||
|
"value": ["scripts-per-once",
|
||||||
|
"scripts-per-instance",
|
||||||
|
"scripts-user",
|
||||||
|
"ssh-authkey-fingerprints",
|
||||||
|
"keys-to-console",
|
||||||
|
"phone-home",
|
||||||
|
"final-message",
|
||||||
|
"power-state-change"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
The default cloud-config includes "script-per-boot". Cloud-init will still
|
||||||
|
ingest and write the user-data but will not execute it, when you disable
|
||||||
|
the per-boot script handling.
|
||||||
|
|
||||||
|
Note: Unless you have an explicit use-case, it is recommended that you not
|
||||||
|
disable the per-boot script execution, especially if you are using
|
||||||
|
any of the life-cycle management features of SmartOS.
|
||||||
|
|
||||||
|
The cloud-config needs to be delivered over the cloud-init:user-data channel
|
||||||
|
in order for cloud-init to ingest it.
|
||||||
|
|
||||||
base64
|
base64
|
||||||
------
|
------
|
||||||
@ -54,6 +108,8 @@ are provided by SmartOS:
|
|||||||
* root_authorized_keys
|
* root_authorized_keys
|
||||||
* enable_motd_sys_info
|
* enable_motd_sys_info
|
||||||
* iptables_disable
|
* iptables_disable
|
||||||
|
* user-data
|
||||||
|
* user-script
|
||||||
|
|
||||||
This list can be changed through system config of variable 'no_base64_decode'.
|
This list can be changed through system config of variable 'no_base64_decode'.
|
||||||
|
|
||||||
|
2
setup.py
2
setup.py
@ -136,7 +136,7 @@ setuptools.setup(name='cloud-init',
|
|||||||
[f for f in glob('doc/examples/seed/*') if is_f(f)]),
|
[f for f in glob('doc/examples/seed/*') if is_f(f)]),
|
||||||
],
|
],
|
||||||
install_requires=read_requires(),
|
install_requires=read_requires(),
|
||||||
cmdclass = {
|
cmdclass={
|
||||||
# Use a subclass for install that handles
|
# Use a subclass for install that handles
|
||||||
# adding on the right init system configuration files
|
# adding on the right init system configuration files
|
||||||
'install': InitsysInstallData,
|
'install': InitsysInstallData,
|
||||||
|
34
sysvinit/freebsd/cloudconfig
Executable file
34
sysvinit/freebsd/cloudconfig
Executable file
@ -0,0 +1,34 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# PROVIDE: cloudconfig
|
||||||
|
# REQUIRE: cloudinit cloudinitlocal
|
||||||
|
# BEFORE: cloudfinal
|
||||||
|
|
||||||
|
. /etc/rc.subr
|
||||||
|
|
||||||
|
name="cloudconfig"
|
||||||
|
command="/usr/bin/cloud-init"
|
||||||
|
start_cmd="cloudconfig_start"
|
||||||
|
stop_cmd=":"
|
||||||
|
rcvar="cloudinit_enable"
|
||||||
|
start_precmd="cloudinit_override"
|
||||||
|
start_cmd="cloudconfig_start"
|
||||||
|
|
||||||
|
: ${cloudinit_config:="/etc/cloud/cloud.cfg"}
|
||||||
|
|
||||||
|
cloudinit_override()
|
||||||
|
{
|
||||||
|
# If there exist sysconfig/default variable override files use it...
|
||||||
|
if [ -f /etc/default/cloud-init ]; then
|
||||||
|
. /etc/default/cloud-init
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
cloudconfig_start()
|
||||||
|
{
|
||||||
|
echo "${command} starting"
|
||||||
|
${command} ${cloudinit_config} modules --mode config
|
||||||
|
}
|
||||||
|
|
||||||
|
load_rc_config $name
|
||||||
|
run_rc_command "$1"
|
34
sysvinit/freebsd/cloudfinal
Executable file
34
sysvinit/freebsd/cloudfinal
Executable file
@ -0,0 +1,34 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# PROVIDE: cloudfinal
|
||||||
|
# REQUIRE: LOGIN cloudinit cloudconfig cloudinitlocal
|
||||||
|
# REQUIRE: cron mail sshd swaplate
|
||||||
|
|
||||||
|
. /etc/rc.subr
|
||||||
|
|
||||||
|
name="cloudfinal"
|
||||||
|
command="/usr/bin/cloud_init"
|
||||||
|
start_cmd="cloudfinal_start"
|
||||||
|
stop_cmd=":"
|
||||||
|
rcvar="cloudinit_enable"
|
||||||
|
start_precmd="cloudinit_override"
|
||||||
|
start_cmd="cloudfinal_start"
|
||||||
|
|
||||||
|
: ${cloudinit_config:="/etc/cloud/cloud.cfg"}
|
||||||
|
|
||||||
|
cloudinit_override()
|
||||||
|
{
|
||||||
|
# If there exist sysconfig/default variable override files use it...
|
||||||
|
if [ -f /etc/default/cloud-init ]; then
|
||||||
|
. /etc/default/cloud-init
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
cloudfinal_start()
|
||||||
|
{
|
||||||
|
echo -n "${command} starting"
|
||||||
|
${command} ${cloudinit_config} modules --mode final
|
||||||
|
}
|
||||||
|
|
||||||
|
load_rc_config $name
|
||||||
|
run_rc_command "$1"
|
34
sysvinit/freebsd/cloudinit
Executable file
34
sysvinit/freebsd/cloudinit
Executable file
@ -0,0 +1,34 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# PROVIDE: cloudinit
|
||||||
|
# REQUIRE: FILESYSTEMS NETWORKING cloudinitlocal
|
||||||
|
# BEFORE: cloudconfig cloudfinal
|
||||||
|
|
||||||
|
. /etc/rc.subr
|
||||||
|
|
||||||
|
name="cloudinit"
|
||||||
|
command="/usr/bin/cloud_init"
|
||||||
|
start_cmd="cloudinit_start"
|
||||||
|
stop_cmd=":"
|
||||||
|
rcvar="cloudinit_enable"
|
||||||
|
start_precmd="cloudinit_override"
|
||||||
|
start_cmd="cloudinit_start"
|
||||||
|
|
||||||
|
: ${cloudinit_config:="/etc/cloud/cloud.cfg"}
|
||||||
|
|
||||||
|
cloudinit_override()
|
||||||
|
{
|
||||||
|
# If there exist sysconfig/default variable override files use it...
|
||||||
|
if [ -f /etc/default/cloud-init ]; then
|
||||||
|
. /etc/default/cloud-init
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
cloudinit_start()
|
||||||
|
{
|
||||||
|
echo -n "${command} starting"
|
||||||
|
${command} ${cloudinit_config} init
|
||||||
|
}
|
||||||
|
|
||||||
|
load_rc_config $name
|
||||||
|
run_rc_command "$1"
|
34
sysvinit/freebsd/cloudinitlocal
Executable file
34
sysvinit/freebsd/cloudinitlocal
Executable file
@ -0,0 +1,34 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# PROVIDE: cloudinitlocal
|
||||||
|
# REQUIRE: mountcritlocal
|
||||||
|
# BEFORE: NETWORKING FILESYSTEMS cloudinit cloudconfig cloudfinal
|
||||||
|
|
||||||
|
. /etc/rc.subr
|
||||||
|
|
||||||
|
name="cloudinitlocal"
|
||||||
|
command="/usr/bin/cloud-init"
|
||||||
|
start_cmd="cloudlocal_start"
|
||||||
|
stop_cmd=":"
|
||||||
|
rcvar="cloudinit_enable"
|
||||||
|
start_precmd="cloudinit_override"
|
||||||
|
start_cmd="cloudlocal_start"
|
||||||
|
|
||||||
|
: ${cloudinit_config:="/etc/cloud/cloud.cfg"}
|
||||||
|
|
||||||
|
cloudinit_override()
|
||||||
|
{
|
||||||
|
# If there exist sysconfig/default variable override files use it...
|
||||||
|
if [ -f /etc/default/cloud-init ]; then
|
||||||
|
. /etc/default/cloud-init
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
cloudlocal_start()
|
||||||
|
{
|
||||||
|
echo -n "${command} starting"
|
||||||
|
${command} ${cloudinit_config} init --local
|
||||||
|
}
|
||||||
|
|
||||||
|
load_rc_config $name
|
||||||
|
run_rc_command "$1"
|
@ -187,7 +187,8 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
|
|||||||
|
|
||||||
|
|
||||||
def populate_dir(path, files):
|
def populate_dir(path, files):
|
||||||
os.makedirs(path)
|
if not os.path.exists(path):
|
||||||
|
os.makedirs(path)
|
||||||
for (name, content) in files.iteritems():
|
for (name, content) in files.iteritems():
|
||||||
with open(os.path.join(path, name), "w") as fp:
|
with open(os.path.join(path, name), "w") as fp:
|
||||||
fp.write(content)
|
fp.write(content)
|
||||||
|
@ -196,7 +196,7 @@ class TestCmdlineUrl(MockerTestCase):
|
|||||||
mock_readurl = self.mocker.replace(url_helper.readurl,
|
mock_readurl = self.mocker.replace(url_helper.readurl,
|
||||||
passthrough=False)
|
passthrough=False)
|
||||||
mock_readurl(url, ARGS, KWARGS)
|
mock_readurl(url, ARGS, KWARGS)
|
||||||
self.mocker.result(util.StringResponse(payload))
|
self.mocker.result(url_helper.StringResponse(payload))
|
||||||
self.mocker.replay()
|
self.mocker.replay()
|
||||||
|
|
||||||
self.assertEqual((key, url, None),
|
self.assertEqual((key, url, None),
|
||||||
@ -212,7 +212,7 @@ class TestCmdlineUrl(MockerTestCase):
|
|||||||
mock_readurl = self.mocker.replace(url_helper.readurl,
|
mock_readurl = self.mocker.replace(url_helper.readurl,
|
||||||
passthrough=False)
|
passthrough=False)
|
||||||
mock_readurl(url, ARGS, KWARGS)
|
mock_readurl(url, ARGS, KWARGS)
|
||||||
self.mocker.result(util.StringResponse(payload))
|
self.mocker.result(url_helper.StringResponse(payload))
|
||||||
self.mocker.replay()
|
self.mocker.replay()
|
||||||
|
|
||||||
self.assertEqual((key, url, payload),
|
self.assertEqual((key, url, payload),
|
||||||
@ -225,7 +225,7 @@ class TestCmdlineUrl(MockerTestCase):
|
|||||||
cmdline = "ro %s=%s bar=1" % (key, url)
|
cmdline = "ro %s=%s bar=1" % (key, url)
|
||||||
|
|
||||||
self.mocker.replace(url_helper.readurl, passthrough=False)
|
self.mocker.replace(url_helper.readurl, passthrough=False)
|
||||||
self.mocker.result(util.StringResponse(""))
|
self.mocker.result(url_helper.StringResponse(""))
|
||||||
self.mocker.replay()
|
self.mocker.replay()
|
||||||
|
|
||||||
self.assertEqual((None, None, None),
|
self.assertEqual((None, None, None),
|
||||||
|
@ -285,10 +285,11 @@ class TestConfigDriveDataSource(MockerTestCase):
|
|||||||
self.assertEqual(["/dev/vdb", "/dev/zdd"],
|
self.assertEqual(["/dev/vdb", "/dev/zdd"],
|
||||||
ds.find_candidate_devs())
|
ds.find_candidate_devs())
|
||||||
|
|
||||||
# verify that partitions are not considered
|
# verify that partitions are considered, that have correct label.
|
||||||
devs_with_answers = {"TYPE=vfat": ["/dev/sda1"],
|
devs_with_answers = {"TYPE=vfat": ["/dev/sda1"],
|
||||||
"TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]}
|
"TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]}
|
||||||
self.assertEqual([], ds.find_candidate_devs())
|
self.assertEqual(["/dev/vdb3"],
|
||||||
|
ds.find_candidate_devs())
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
util.find_devs_with = orig_find_devs_with
|
util.find_devs_with = orig_find_devs_with
|
||||||
|
@ -119,9 +119,10 @@ class TestMAASDataSource(mocker.MockerTestCase):
|
|||||||
mock_request(url, headers=None, timeout=mocker.ANY,
|
mock_request(url, headers=None, timeout=mocker.ANY,
|
||||||
data=mocker.ANY, sec_between=mocker.ANY,
|
data=mocker.ANY, sec_between=mocker.ANY,
|
||||||
ssl_details=mocker.ANY, retries=mocker.ANY,
|
ssl_details=mocker.ANY, retries=mocker.ANY,
|
||||||
headers_cb=my_headers_cb)
|
headers_cb=my_headers_cb,
|
||||||
|
exception_cb=mocker.ANY)
|
||||||
resp = valid.get(key)
|
resp = valid.get(key)
|
||||||
self.mocker.result(util.StringResponse(resp))
|
self.mocker.result(url_helper.StringResponse(resp))
|
||||||
self.mocker.replay()
|
self.mocker.replay()
|
||||||
|
|
||||||
(userdata, metadata) = DataSourceMAAS.read_maas_seed_url(my_seed,
|
(userdata, metadata) = DataSourceMAAS.read_maas_seed_url(my_seed,
|
||||||
|
@ -97,6 +97,41 @@ class TestNoCloudDataSource(MockerTestCase):
|
|||||||
self.assertEqual(dsrc.metadata.get('instance-id'), 'IID')
|
self.assertEqual(dsrc.metadata.get('instance-id'), 'IID')
|
||||||
self.assertTrue(ret)
|
self.assertTrue(ret)
|
||||||
|
|
||||||
|
def test_nocloud_seed_with_vendordata(self):
|
||||||
|
md = {'instance-id': 'IID', 'dsmode': 'local'}
|
||||||
|
ud = "USER_DATA_HERE"
|
||||||
|
vd = "THIS IS MY VENDOR_DATA"
|
||||||
|
|
||||||
|
populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
|
||||||
|
{'user-data': ud, 'meta-data': yaml.safe_dump(md),
|
||||||
|
'vendor-data': vd})
|
||||||
|
|
||||||
|
sys_cfg = {
|
||||||
|
'datasource': {'NoCloud': {'fs_label': None}}
|
||||||
|
}
|
||||||
|
|
||||||
|
ds = DataSourceNoCloud.DataSourceNoCloud
|
||||||
|
|
||||||
|
dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
|
||||||
|
ret = dsrc.get_data()
|
||||||
|
self.assertEqual(dsrc.userdata_raw, ud)
|
||||||
|
self.assertEqual(dsrc.metadata, md)
|
||||||
|
self.assertEqual(dsrc.vendordata, vd)
|
||||||
|
self.assertTrue(ret)
|
||||||
|
|
||||||
|
def test_nocloud_no_vendordata(self):
|
||||||
|
populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
|
||||||
|
{'user-data': "ud", 'meta-data': "instance-id: IID\n"})
|
||||||
|
|
||||||
|
sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
|
||||||
|
|
||||||
|
ds = DataSourceNoCloud.DataSourceNoCloud
|
||||||
|
|
||||||
|
dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
|
||||||
|
ret = dsrc.get_data()
|
||||||
|
self.assertEqual(dsrc.userdata_raw, "ud")
|
||||||
|
self.assertFalse(dsrc.vendordata)
|
||||||
|
self.assertTrue(ret)
|
||||||
|
|
||||||
class TestParseCommandLineData(MockerTestCase):
|
class TestParseCommandLineData(MockerTestCase):
|
||||||
|
|
||||||
|
@ -27,6 +27,10 @@ from cloudinit import helpers
|
|||||||
from cloudinit.sources import DataSourceSmartOS
|
from cloudinit.sources import DataSourceSmartOS
|
||||||
|
|
||||||
from mocker import MockerTestCase
|
from mocker import MockerTestCase
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import re
|
||||||
|
import stat
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
MOCK_RETURNS = {
|
MOCK_RETURNS = {
|
||||||
@ -35,7 +39,11 @@ MOCK_RETURNS = {
|
|||||||
'disable_iptables_flag': None,
|
'disable_iptables_flag': None,
|
||||||
'enable_motd_sys_info': None,
|
'enable_motd_sys_info': None,
|
||||||
'test-var1': 'some data',
|
'test-var1': 'some data',
|
||||||
'user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']),
|
'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']),
|
||||||
|
'sdc:datacenter_name': 'somewhere2',
|
||||||
|
'sdc:operator-script': '\n'.join(['bin/true', '']),
|
||||||
|
'user-data': '\n'.join(['something', '']),
|
||||||
|
'user-script': '\n'.join(['/bin/true', '']),
|
||||||
}
|
}
|
||||||
|
|
||||||
DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc')
|
DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc')
|
||||||
@ -101,6 +109,7 @@ class TestSmartOSDataSource(MockerTestCase):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
# makeDir comes from MockerTestCase
|
# makeDir comes from MockerTestCase
|
||||||
self.tmp = self.makeDir()
|
self.tmp = self.makeDir()
|
||||||
|
self.legacy_user_d = self.makeDir()
|
||||||
|
|
||||||
# patch cloud_dir, so our 'seed_dir' is guaranteed empty
|
# patch cloud_dir, so our 'seed_dir' is guaranteed empty
|
||||||
self.paths = helpers.Paths({'cloud_dir': self.tmp})
|
self.paths = helpers.Paths({'cloud_dir': self.tmp})
|
||||||
@ -138,6 +147,7 @@ class TestSmartOSDataSource(MockerTestCase):
|
|||||||
sys_cfg['datasource'] = sys_cfg.get('datasource', {})
|
sys_cfg['datasource'] = sys_cfg.get('datasource', {})
|
||||||
sys_cfg['datasource']['SmartOS'] = ds_cfg
|
sys_cfg['datasource']['SmartOS'] = ds_cfg
|
||||||
|
|
||||||
|
self.apply_patches([(mod, 'LEGACY_USER_D', self.legacy_user_d)])
|
||||||
self.apply_patches([(mod, 'get_serial', _get_serial)])
|
self.apply_patches([(mod, 'get_serial', _get_serial)])
|
||||||
self.apply_patches([(mod, 'dmi_data', _dmi_data)])
|
self.apply_patches([(mod, 'dmi_data', _dmi_data)])
|
||||||
dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None,
|
dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None,
|
||||||
@ -194,7 +204,7 @@ class TestSmartOSDataSource(MockerTestCase):
|
|||||||
# metadata provided base64_all of true
|
# metadata provided base64_all of true
|
||||||
my_returns = MOCK_RETURNS.copy()
|
my_returns = MOCK_RETURNS.copy()
|
||||||
my_returns['base64_all'] = "true"
|
my_returns['base64_all'] = "true"
|
||||||
for k in ('hostname', 'user-data'):
|
for k in ('hostname', 'cloud-init:user-data'):
|
||||||
my_returns[k] = base64.b64encode(my_returns[k])
|
my_returns[k] = base64.b64encode(my_returns[k])
|
||||||
|
|
||||||
dsrc = self._get_ds(mockdata=my_returns)
|
dsrc = self._get_ds(mockdata=my_returns)
|
||||||
@ -202,7 +212,7 @@ class TestSmartOSDataSource(MockerTestCase):
|
|||||||
self.assertTrue(ret)
|
self.assertTrue(ret)
|
||||||
self.assertEquals(MOCK_RETURNS['hostname'],
|
self.assertEquals(MOCK_RETURNS['hostname'],
|
||||||
dsrc.metadata['local-hostname'])
|
dsrc.metadata['local-hostname'])
|
||||||
self.assertEquals(MOCK_RETURNS['user-data'],
|
self.assertEquals(MOCK_RETURNS['cloud-init:user-data'],
|
||||||
dsrc.userdata_raw)
|
dsrc.userdata_raw)
|
||||||
self.assertEquals(MOCK_RETURNS['root_authorized_keys'],
|
self.assertEquals(MOCK_RETURNS['root_authorized_keys'],
|
||||||
dsrc.metadata['public-keys'])
|
dsrc.metadata['public-keys'])
|
||||||
@ -213,9 +223,9 @@ class TestSmartOSDataSource(MockerTestCase):
|
|||||||
|
|
||||||
def test_b64_userdata(self):
|
def test_b64_userdata(self):
|
||||||
my_returns = MOCK_RETURNS.copy()
|
my_returns = MOCK_RETURNS.copy()
|
||||||
my_returns['b64-user-data'] = "true"
|
my_returns['b64-cloud-init:user-data'] = "true"
|
||||||
my_returns['b64-hostname'] = "true"
|
my_returns['b64-hostname'] = "true"
|
||||||
for k in ('hostname', 'user-data'):
|
for k in ('hostname', 'cloud-init:user-data'):
|
||||||
my_returns[k] = base64.b64encode(my_returns[k])
|
my_returns[k] = base64.b64encode(my_returns[k])
|
||||||
|
|
||||||
dsrc = self._get_ds(mockdata=my_returns)
|
dsrc = self._get_ds(mockdata=my_returns)
|
||||||
@ -223,7 +233,8 @@ class TestSmartOSDataSource(MockerTestCase):
|
|||||||
self.assertTrue(ret)
|
self.assertTrue(ret)
|
||||||
self.assertEquals(MOCK_RETURNS['hostname'],
|
self.assertEquals(MOCK_RETURNS['hostname'],
|
||||||
dsrc.metadata['local-hostname'])
|
dsrc.metadata['local-hostname'])
|
||||||
self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw)
|
self.assertEquals(MOCK_RETURNS['cloud-init:user-data'],
|
||||||
|
dsrc.userdata_raw)
|
||||||
self.assertEquals(MOCK_RETURNS['root_authorized_keys'],
|
self.assertEquals(MOCK_RETURNS['root_authorized_keys'],
|
||||||
dsrc.metadata['public-keys'])
|
dsrc.metadata['public-keys'])
|
||||||
|
|
||||||
@ -238,13 +249,131 @@ class TestSmartOSDataSource(MockerTestCase):
|
|||||||
self.assertTrue(ret)
|
self.assertTrue(ret)
|
||||||
self.assertEquals(MOCK_RETURNS['hostname'],
|
self.assertEquals(MOCK_RETURNS['hostname'],
|
||||||
dsrc.metadata['local-hostname'])
|
dsrc.metadata['local-hostname'])
|
||||||
self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw)
|
self.assertEquals(MOCK_RETURNS['cloud-init:user-data'],
|
||||||
|
dsrc.userdata_raw)
|
||||||
|
|
||||||
def test_userdata(self):
|
def test_userdata(self):
|
||||||
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
|
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
|
||||||
ret = dsrc.get_data()
|
ret = dsrc.get_data()
|
||||||
self.assertTrue(ret)
|
self.assertTrue(ret)
|
||||||
self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw)
|
self.assertEquals(MOCK_RETURNS['user-data'],
|
||||||
|
dsrc.metadata['legacy-user-data'])
|
||||||
|
self.assertEquals(MOCK_RETURNS['cloud-init:user-data'],
|
||||||
|
dsrc.userdata_raw)
|
||||||
|
|
||||||
|
def test_sdc_scripts(self):
|
||||||
|
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
|
||||||
|
ret = dsrc.get_data()
|
||||||
|
self.assertTrue(ret)
|
||||||
|
self.assertEquals(MOCK_RETURNS['user-script'],
|
||||||
|
dsrc.metadata['user-script'])
|
||||||
|
|
||||||
|
legacy_script_f = "%s/user-script" % self.legacy_user_d
|
||||||
|
self.assertTrue(os.path.exists(legacy_script_f))
|
||||||
|
self.assertTrue(os.path.islink(legacy_script_f))
|
||||||
|
user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
|
||||||
|
self.assertEquals(user_script_perm, '700')
|
||||||
|
|
||||||
|
def test_scripts_shebanged(self):
|
||||||
|
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
|
||||||
|
ret = dsrc.get_data()
|
||||||
|
self.assertTrue(ret)
|
||||||
|
self.assertEquals(MOCK_RETURNS['user-script'],
|
||||||
|
dsrc.metadata['user-script'])
|
||||||
|
|
||||||
|
legacy_script_f = "%s/user-script" % self.legacy_user_d
|
||||||
|
self.assertTrue(os.path.exists(legacy_script_f))
|
||||||
|
self.assertTrue(os.path.islink(legacy_script_f))
|
||||||
|
shebang = None
|
||||||
|
with open(legacy_script_f, 'r') as f:
|
||||||
|
shebang = f.readlines()[0].strip()
|
||||||
|
self.assertEquals(shebang, "#!/bin/bash")
|
||||||
|
user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
|
||||||
|
self.assertEquals(user_script_perm, '700')
|
||||||
|
|
||||||
|
def test_scripts_shebang_not_added(self):
|
||||||
|
"""
|
||||||
|
Test that the SmartOS requirement that plain text scripts
|
||||||
|
are executable. This test makes sure that plain texts scripts
|
||||||
|
with out file magic have it added appropriately by cloud-init.
|
||||||
|
"""
|
||||||
|
|
||||||
|
my_returns = MOCK_RETURNS.copy()
|
||||||
|
my_returns['user-script'] = '\n'.join(['#!/usr/bin/perl',
|
||||||
|
'print("hi")', ''])
|
||||||
|
|
||||||
|
dsrc = self._get_ds(mockdata=my_returns)
|
||||||
|
ret = dsrc.get_data()
|
||||||
|
self.assertTrue(ret)
|
||||||
|
self.assertEquals(my_returns['user-script'],
|
||||||
|
dsrc.metadata['user-script'])
|
||||||
|
|
||||||
|
legacy_script_f = "%s/user-script" % self.legacy_user_d
|
||||||
|
self.assertTrue(os.path.exists(legacy_script_f))
|
||||||
|
self.assertTrue(os.path.islink(legacy_script_f))
|
||||||
|
shebang = None
|
||||||
|
with open(legacy_script_f, 'r') as f:
|
||||||
|
shebang = f.readlines()[0].strip()
|
||||||
|
self.assertEquals(shebang, "#!/usr/bin/perl")
|
||||||
|
|
||||||
|
def test_scripts_removed(self):
|
||||||
|
"""
|
||||||
|
Since SmartOS requires that the user script is fetched
|
||||||
|
each boot, we want to make sure that the information
|
||||||
|
is backed-up for user-review later.
|
||||||
|
|
||||||
|
This tests the behavior of when a script is removed. It makes
|
||||||
|
sure that a) the previous script is backed-up; and 2) that
|
||||||
|
there is no script remaining.
|
||||||
|
"""
|
||||||
|
|
||||||
|
script_d = os.path.join(self.tmp, "scripts", "per-boot")
|
||||||
|
os.makedirs(script_d)
|
||||||
|
|
||||||
|
test_script_f = "%s/99_user_script" % script_d
|
||||||
|
with open(test_script_f, 'w') as f:
|
||||||
|
f.write("TEST DATA")
|
||||||
|
|
||||||
|
my_returns = MOCK_RETURNS.copy()
|
||||||
|
del my_returns['user-script']
|
||||||
|
|
||||||
|
dsrc = self._get_ds(mockdata=my_returns)
|
||||||
|
ret = dsrc.get_data()
|
||||||
|
self.assertTrue(ret)
|
||||||
|
self.assertFalse(dsrc.metadata['user-script'])
|
||||||
|
self.assertFalse(os.path.exists(test_script_f))
|
||||||
|
|
||||||
|
def test_userdata_removed(self):
|
||||||
|
"""
|
||||||
|
User-data in the SmartOS world is supposed to be written to a file
|
||||||
|
each and every boot. This tests to make sure that in the event the
|
||||||
|
legacy user-data is removed, the existing user-data is backed-up and
|
||||||
|
there is no /var/db/user-data left.
|
||||||
|
"""
|
||||||
|
|
||||||
|
user_data_f = "%s/mdata-user-data" % self.legacy_user_d
|
||||||
|
with open(user_data_f, 'w') as f:
|
||||||
|
f.write("PREVIOUS")
|
||||||
|
|
||||||
|
my_returns = MOCK_RETURNS.copy()
|
||||||
|
del my_returns['user-data']
|
||||||
|
|
||||||
|
dsrc = self._get_ds(mockdata=my_returns)
|
||||||
|
ret = dsrc.get_data()
|
||||||
|
self.assertTrue(ret)
|
||||||
|
self.assertFalse(dsrc.metadata.get('legacy-user-data'))
|
||||||
|
|
||||||
|
found_new = False
|
||||||
|
for root, _dirs, files in os.walk(self.legacy_user_d):
|
||||||
|
for name in files:
|
||||||
|
name_f = os.path.join(root, name)
|
||||||
|
permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:]
|
||||||
|
if re.match(r'.*\/mdata-user-data$', name_f):
|
||||||
|
found_new = True
|
||||||
|
print name_f
|
||||||
|
self.assertEquals(permissions, '400')
|
||||||
|
|
||||||
|
self.assertFalse(found_new)
|
||||||
|
|
||||||
def test_disable_iptables_flag(self):
|
def test_disable_iptables_flag(self):
|
||||||
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
|
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
|
||||||
|
@ -33,6 +33,14 @@ class TestEc2Util(helpers.TestCase):
|
|||||||
userdata = eu.get_instance_userdata(self.VERSION, retries=0)
|
userdata = eu.get_instance_userdata(self.VERSION, retries=0)
|
||||||
self.assertEquals('', userdata)
|
self.assertEquals('', userdata)
|
||||||
|
|
||||||
|
@hp.activate
|
||||||
|
def test_userdata_fetch_fail_server_not_found(self):
|
||||||
|
hp.register_uri(hp.GET,
|
||||||
|
'http://169.254.169.254/%s/user-data' % (self.VERSION),
|
||||||
|
status=404)
|
||||||
|
userdata = eu.get_instance_userdata(self.VERSION)
|
||||||
|
self.assertEquals('', userdata)
|
||||||
|
|
||||||
@hp.activate
|
@hp.activate
|
||||||
def test_metadata_fetch_no_keys(self):
|
def test_metadata_fetch_no_keys(self):
|
||||||
base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
|
base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
|
||||||
|
40
tests/unittests/test_pathprefix2dict.py
Normal file
40
tests/unittests/test_pathprefix2dict.py
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
from cloudinit import util
|
||||||
|
|
||||||
|
from mocker import MockerTestCase
|
||||||
|
from tests.unittests.helpers import populate_dir
|
||||||
|
|
||||||
|
|
||||||
|
class TestPathPrefix2Dict(MockerTestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.tmp = self.makeDir()
|
||||||
|
|
||||||
|
def test_required_only(self):
|
||||||
|
dirdata = {'f1': 'f1content', 'f2': 'f2content'}
|
||||||
|
populate_dir(self.tmp, dirdata)
|
||||||
|
|
||||||
|
ret = util.pathprefix2dict(self.tmp, required=['f1', 'f2'])
|
||||||
|
self.assertEqual(dirdata, ret)
|
||||||
|
|
||||||
|
def test_required_missing(self):
|
||||||
|
dirdata = {'f1': 'f1content'}
|
||||||
|
populate_dir(self.tmp, dirdata)
|
||||||
|
kwargs = {'required': ['f1', 'f2']}
|
||||||
|
self.assertRaises(ValueError, util.pathprefix2dict, self.tmp, **kwargs)
|
||||||
|
|
||||||
|
def test_no_required_and_optional(self):
|
||||||
|
dirdata = {'f1': 'f1c', 'f2': 'f2c'}
|
||||||
|
populate_dir(self.tmp, dirdata)
|
||||||
|
|
||||||
|
ret = util.pathprefix2dict(self.tmp, required=None,
|
||||||
|
optional=['f1', 'f2'])
|
||||||
|
self.assertEqual(dirdata, ret)
|
||||||
|
|
||||||
|
def test_required_and_optional(self):
|
||||||
|
dirdata = {'f1': 'f1c', 'f2': 'f2c'}
|
||||||
|
populate_dir(self.tmp, dirdata)
|
||||||
|
|
||||||
|
ret = util.pathprefix2dict(self.tmp, required=['f1'], optional=['f2'])
|
||||||
|
self.assertEqual(dirdata, ret)
|
||||||
|
|
||||||
|
# vi: ts=4 expandtab
|
@ -1,32 +1,23 @@
|
|||||||
#!/bin/sh
|
#!/usr/bin/env python
|
||||||
|
|
||||||
set -e
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
find_root() {
|
if 'CLOUD_INIT_TOP_D' in os.environ:
|
||||||
local topd
|
topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
|
||||||
if [ -z "${CLOUD_INIT_TOP_D}" ]; then
|
else:
|
||||||
topd=$(cd "$(dirname "${0}")" && cd .. && pwd)
|
topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||||
else
|
|
||||||
topd=$(cd "${CLOUD_INIT_TOP_D}" && pwd)
|
|
||||||
fi
|
|
||||||
[ $? -eq 0 -a -f "${topd}/setup.py" ] || return
|
|
||||||
ROOT_DIR="$topd"
|
|
||||||
}
|
|
||||||
fail() { echo "$0:" "$@" 1>&2; exit 1; }
|
|
||||||
|
|
||||||
if ! find_root; then
|
for fname in ("setup.py", "requirements.txt"):
|
||||||
fail "Unable to locate 'setup.py' file that should " \
|
if not os.path.isfile(os.path.join(topd, fname)):
|
||||||
"exist in the cloud-init root directory."
|
sys.stderr.write("Unable to locate '%s' file that should "
|
||||||
fi
|
"exist in cloud-init root directory." % fname)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
REQUIRES="$ROOT_DIR/requirements.txt"
|
with open(os.path.join(topd, "requirements.txt"), "r") as fp:
|
||||||
|
for line in fp:
|
||||||
|
if not line.strip() or line.startswith("#"):
|
||||||
|
continue
|
||||||
|
sys.stdout.write(line)
|
||||||
|
|
||||||
if [ ! -e "$REQUIRES" ]; then
|
sys.exit(0)
|
||||||
fail "Unable to find 'requirements.txt' file located at '$REQUIRES'"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Filter out comments and empty lines
|
|
||||||
DEPS=$(sed -n -e 's,#.*,,' -e '/./p' "$REQUIRES") &&
|
|
||||||
[ -n "$DEPS" ] ||
|
|
||||||
fail "failed to read deps from '${REQUIRES}'"
|
|
||||||
echo "$DEPS" | sort -d -f
|
|
||||||
|
@ -1,32 +1,26 @@
|
|||||||
#!/bin/sh
|
#!/usr/bin/env python
|
||||||
|
|
||||||
set -e
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
find_root() {
|
if 'CLOUD_INIT_TOP_D' in os.environ:
|
||||||
local topd
|
topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
|
||||||
if [ -z "${CLOUD_INIT_TOP_D}" ]; then
|
else:
|
||||||
topd=$(cd "$(dirname "${0}")" && cd .. && pwd)
|
topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||||
else
|
|
||||||
topd=$(cd "${CLOUD_INIT_TOP_D}" && pwd)
|
|
||||||
fi
|
|
||||||
[ $? -eq 0 -a -f "${topd}/setup.py" ] || return
|
|
||||||
ROOT_DIR="$topd"
|
|
||||||
}
|
|
||||||
fail() { echo "$0:" "$@" 1>&2; exit 1; }
|
|
||||||
|
|
||||||
if ! find_root; then
|
for fname in ("setup.py", "ChangeLog"):
|
||||||
fail "Unable to locate 'setup.py' file that should " \
|
if not os.path.isfile(os.path.join(topd, fname)):
|
||||||
"exist in the cloud-init root directory."
|
sys.stderr.write("Unable to locate '%s' file that should "
|
||||||
fi
|
"exist in cloud-init root directory." % fname)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
CHNG_LOG="$ROOT_DIR/ChangeLog"
|
vermatch = re.compile(r"^[0-9]+[.][0-9]+[.][0-9]+:$")
|
||||||
|
|
||||||
if [ ! -e "$CHNG_LOG" ]; then
|
with open(os.path.join(topd, "ChangeLog"), "r") as fp:
|
||||||
fail "Unable to find 'ChangeLog' file located at '$CHNG_LOG'"
|
for line in fp:
|
||||||
fi
|
if vermatch.match(line):
|
||||||
|
sys.stdout.write(line.strip()[:-1] + "\n")
|
||||||
|
break
|
||||||
|
|
||||||
VERSION=$(sed -n '/^[0-9]\+[.][0-9]\+[.][0-9]\+:/ {s/://; p; :a;n; ba; }' \
|
sys.exit(0)
|
||||||
"$CHNG_LOG") &&
|
|
||||||
[ -n "$VERSION" ] ||
|
|
||||||
fail "failed to get version from '$CHNG_LOG'"
|
|
||||||
echo "$VERSION"
|
|
||||||
|
@ -1,15 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
ci_files='cloudinit/*.py cloudinit/config/*.py'
|
|
||||||
test_files=$(find tests -name "*.py")
|
|
||||||
def_files="$ci_files $test_files"
|
|
||||||
|
|
||||||
if [ $# -eq 0 ]; then
|
if [ $# -eq 0 ]; then
|
||||||
files=( )
|
files=( bin/cloud-init $(find * -name "*.py" -type f) )
|
||||||
for f in $def_files; do
|
|
||||||
[ -f "$f" ] || { echo "failed, $f not a file" 1>&2; exit 1; }
|
|
||||||
files[${#files[@]}]=${f}
|
|
||||||
done
|
|
||||||
else
|
else
|
||||||
files=( "$@" );
|
files=( "$@" );
|
||||||
fi
|
fi
|
||||||
@ -44,4 +36,3 @@ cmd=(
|
|||||||
echo -e "\nRunning 'cloudinit' pep8:"
|
echo -e "\nRunning 'cloudinit' pep8:"
|
||||||
echo "${cmd[@]}"
|
echo "${cmd[@]}"
|
||||||
"${cmd[@]}"
|
"${cmd[@]}"
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
if [ $# -eq 0 ]; then
|
if [ $# -eq 0 ]; then
|
||||||
files=( $(find * -name "*.py" -type f) )
|
files=( bin/cloud-init $(find * -name "*.py" -type f) )
|
||||||
else
|
else
|
||||||
files=( "$@" );
|
files=( "$@" );
|
||||||
fi
|
fi
|
||||||
@ -16,6 +16,7 @@ cmd=(
|
|||||||
--rcfile=$RC_FILE
|
--rcfile=$RC_FILE
|
||||||
--disable=R
|
--disable=R
|
||||||
--disable=I
|
--disable=I
|
||||||
|
--dummy-variables-rgx="_"
|
||||||
"${files[@]}"
|
"${files[@]}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user