merge from trunk
This commit is contained in:
commit
b93ee71176
@ -10,6 +10,10 @@
|
|||||||
- support individual MIME segments to be gzip compressed (LP: #1203203)
|
- support individual MIME segments to be gzip compressed (LP: #1203203)
|
||||||
- always finalize handlers even if processing failed (LP: #1203368)
|
- always finalize handlers even if processing failed (LP: #1203368)
|
||||||
- support merging into cloud-config via jsonp. (LP: #1200476)
|
- support merging into cloud-config via jsonp. (LP: #1200476)
|
||||||
|
- add datasource 'SmartOS' for Joyent Cloud. Adds a dependency on serial.
|
||||||
|
- add 'log_time' helper to util for timing how long things take
|
||||||
|
which also reads from uptime. uptime is useful as clock may change during
|
||||||
|
boot due to ntp.
|
||||||
0.7.2:
|
0.7.2:
|
||||||
- add a debian watch file
|
- add a debian watch file
|
||||||
- add 'sudo' entry to ubuntu's default user (LP: #1080717)
|
- add 'sudo' entry to ubuntu's default user (LP: #1080717)
|
||||||
|
4
Requires
4
Requires
@ -10,6 +10,10 @@ PrettyTable
|
|||||||
# datasource is removed, this is no longer needed
|
# datasource is removed, this is no longer needed
|
||||||
oauth
|
oauth
|
||||||
|
|
||||||
|
# This one is currently used only by the SmartOS datasource. If that
|
||||||
|
# datasource is removed, this is no longer needed
|
||||||
|
pyserial
|
||||||
|
|
||||||
# This is only needed for places where we need to support configs in a manner
|
# This is only needed for places where we need to support configs in a manner
|
||||||
# that the built-in config parser is not sufficent (ie
|
# that the built-in config parser is not sufficent (ie
|
||||||
# when we need to preserve comments, or do not have a top-level
|
# when we need to preserve comments, or do not have a top-level
|
||||||
|
@ -502,7 +502,9 @@ def main():
|
|||||||
signal_handler.attach_handlers()
|
signal_handler.attach_handlers()
|
||||||
|
|
||||||
(name, functor) = args.action
|
(name, functor) = args.action
|
||||||
return functor(name, args)
|
|
||||||
|
return util.log_time(logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
|
||||||
|
get_uptime=True, func=functor, args=(name, args))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -264,7 +264,8 @@ def handle(_name, cfg, _cloud, log, _args):
|
|||||||
raise e
|
raise e
|
||||||
return
|
return
|
||||||
|
|
||||||
resized = resize_devices(resizer, devices)
|
resized = util.log_time(logfunc=log.debug, msg="resize_devices",
|
||||||
|
func=resize_devices, args=(resizer, devices))
|
||||||
for (entry, action, msg) in resized:
|
for (entry, action, msg) in resized:
|
||||||
if action == RESIZE.CHANGED:
|
if action == RESIZE.CHANGED:
|
||||||
log.info("'%s' resized: %s" % (entry, msg))
|
log.info("'%s' resized: %s" % (entry, msg))
|
||||||
|
@ -21,7 +21,6 @@
|
|||||||
import errno
|
import errno
|
||||||
import os
|
import os
|
||||||
import stat
|
import stat
|
||||||
import time
|
|
||||||
|
|
||||||
from cloudinit.settings import PER_ALWAYS
|
from cloudinit.settings import PER_ALWAYS
|
||||||
from cloudinit import util
|
from cloudinit import util
|
||||||
@ -120,9 +119,12 @@ def handle(name, cfg, _cloud, log, args):
|
|||||||
if resize_root == NOBLOCK:
|
if resize_root == NOBLOCK:
|
||||||
# Fork to a child that will run
|
# Fork to a child that will run
|
||||||
# the resize command
|
# the resize command
|
||||||
util.fork_cb(do_resize, resize_cmd, log)
|
util.fork_cb(
|
||||||
|
util.log_time(logfunc=log.debug, msg="backgrounded Resizing",
|
||||||
|
func=do_resize, args=(resize_cmd, log)))
|
||||||
else:
|
else:
|
||||||
do_resize(resize_cmd, log)
|
util.log_time(logfunc=log.debug, msg="Resizing",
|
||||||
|
func=do_resize, args=(resize_cmd, log))
|
||||||
|
|
||||||
action = 'Resized'
|
action = 'Resized'
|
||||||
if resize_root == NOBLOCK:
|
if resize_root == NOBLOCK:
|
||||||
@ -132,13 +134,10 @@ def handle(name, cfg, _cloud, log, args):
|
|||||||
|
|
||||||
|
|
||||||
def do_resize(resize_cmd, log):
|
def do_resize(resize_cmd, log):
|
||||||
start = time.time()
|
|
||||||
try:
|
try:
|
||||||
util.subp(resize_cmd)
|
util.subp(resize_cmd)
|
||||||
except util.ProcessExecutionError:
|
except util.ProcessExecutionError:
|
||||||
util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd)
|
util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd)
|
||||||
raise
|
raise
|
||||||
tot_time = time.time() - start
|
|
||||||
log.debug("Resizing took %.3f seconds", tot_time)
|
|
||||||
# TODO(harlowja): Should we add a fsck check after this to make
|
# TODO(harlowja): Should we add a fsck check after this to make
|
||||||
# sure we didn't corrupt anything?
|
# sure we didn't corrupt anything?
|
||||||
|
@ -31,9 +31,21 @@ LOG = logging.getLogger(__name__)
|
|||||||
DS_NAME = 'Azure'
|
DS_NAME = 'Azure'
|
||||||
DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
|
DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
|
||||||
AGENT_START = ['service', 'walinuxagent', 'start']
|
AGENT_START = ['service', 'walinuxagent', 'start']
|
||||||
BUILTIN_DS_CONFIG = {'datasource': {DS_NAME: {
|
BOUNCE_COMMAND = ['sh', '-xc',
|
||||||
|
"i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"]
|
||||||
|
|
||||||
|
BUILTIN_DS_CONFIG = {
|
||||||
'agent_command': AGENT_START,
|
'agent_command': AGENT_START,
|
||||||
'data_dir': "/var/lib/waagent"}}}
|
'data_dir': "/var/lib/waagent",
|
||||||
|
'set_hostname': True,
|
||||||
|
'hostname_bounce': {
|
||||||
|
'interface': 'eth0',
|
||||||
|
'policy': True,
|
||||||
|
'command': BOUNCE_COMMAND,
|
||||||
|
'hostname_command': 'hostname',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
DS_CFG_PATH = ['datasource', DS_NAME]
|
||||||
|
|
||||||
|
|
||||||
class DataSourceAzureNet(sources.DataSource):
|
class DataSourceAzureNet(sources.DataSource):
|
||||||
@ -42,19 +54,19 @@ class DataSourceAzureNet(sources.DataSource):
|
|||||||
self.seed_dir = os.path.join(paths.seed_dir, 'azure')
|
self.seed_dir = os.path.join(paths.seed_dir, 'azure')
|
||||||
self.cfg = {}
|
self.cfg = {}
|
||||||
self.seed = None
|
self.seed = None
|
||||||
|
self.ds_cfg = util.mergemanydict([
|
||||||
|
util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
|
||||||
|
BUILTIN_DS_CONFIG])
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
root = sources.DataSource.__str__(self)
|
root = sources.DataSource.__str__(self)
|
||||||
return "%s [seed=%s]" % (root, self.seed)
|
return "%s [seed=%s]" % (root, self.seed)
|
||||||
|
|
||||||
def get_data(self):
|
def get_data(self):
|
||||||
ddir_cfgpath = ['datasource', DS_NAME, 'data_dir']
|
|
||||||
# azure removes/ejects the cdrom containing the ovf-env.xml
|
# azure removes/ejects the cdrom containing the ovf-env.xml
|
||||||
# file on reboot. So, in order to successfully reboot we
|
# file on reboot. So, in order to successfully reboot we
|
||||||
# need to look in the datadir and consider that valid
|
# need to look in the datadir and consider that valid
|
||||||
ddir = util.get_cfg_by_path(self.sys_cfg, ddir_cfgpath)
|
ddir = self.ds_cfg['data_dir']
|
||||||
if ddir is None:
|
|
||||||
ddir = util.get_cfg_by_path(BUILTIN_DS_CONFIG, ddir_cfgpath)
|
|
||||||
|
|
||||||
candidates = [self.seed_dir]
|
candidates = [self.seed_dir]
|
||||||
candidates.extend(list_possible_azure_ds_devs())
|
candidates.extend(list_possible_azure_ds_devs())
|
||||||
@ -91,44 +103,46 @@ class DataSourceAzureNet(sources.DataSource):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
if found == ddir:
|
if found == ddir:
|
||||||
LOG.debug("using cached datasource in %s", ddir)
|
LOG.debug("using files cached in %s", ddir)
|
||||||
|
|
||||||
fields = [('cmd', ['datasource', DS_NAME, 'agent_command']),
|
# now update ds_cfg to reflect contents pass in config
|
||||||
('datadir', ddir_cfgpath)]
|
usercfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
|
||||||
mycfg = {}
|
self.ds_cfg = util.mergemanydict([usercfg, self.ds_cfg])
|
||||||
for cfg in (self.cfg, self.sys_cfg, BUILTIN_DS_CONFIG):
|
mycfg = self.ds_cfg
|
||||||
for name, path in fields:
|
|
||||||
if name in mycfg:
|
|
||||||
continue
|
|
||||||
value = util.get_cfg_by_path(cfg, keyp=path)
|
|
||||||
if value is not None:
|
|
||||||
mycfg[name] = value
|
|
||||||
|
|
||||||
# walinux agent writes files world readable, but expects
|
# walinux agent writes files world readable, but expects
|
||||||
# the directory to be protected.
|
# the directory to be protected.
|
||||||
write_files(mycfg['datadir'], files, dirmode=0700)
|
write_files(mycfg['data_dir'], files, dirmode=0700)
|
||||||
|
|
||||||
|
# handle the hostname 'publishing'
|
||||||
|
try:
|
||||||
|
handle_set_hostname(mycfg.get('set_hostname'),
|
||||||
|
self.metadata.get('local-hostname'),
|
||||||
|
mycfg['hostname_bounce'])
|
||||||
|
except Exception as e:
|
||||||
|
LOG.warn("Failed publishing hostname: %s" % e)
|
||||||
|
util.logexc(LOG, "handling set_hostname failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
invoke_agent(mycfg['cmd'])
|
invoke_agent(mycfg['agent_command'])
|
||||||
except util.ProcessExecutionError:
|
except util.ProcessExecutionError:
|
||||||
# claim the datasource even if the command failed
|
# claim the datasource even if the command failed
|
||||||
util.logexc(LOG, "agent command '%s' failed.", mycfg['cmd'])
|
util.logexc(LOG, "agent command '%s' failed.",
|
||||||
|
mycfg['agent_command'])
|
||||||
|
|
||||||
shcfgxml = os.path.join(mycfg['datadir'], "SharedConfig.xml")
|
shcfgxml = os.path.join(mycfg['data_dir'], "SharedConfig.xml")
|
||||||
wait_for = [shcfgxml]
|
wait_for = [shcfgxml]
|
||||||
|
|
||||||
fp_files = []
|
fp_files = []
|
||||||
for pk in self.cfg.get('_pubkeys', []):
|
for pk in self.cfg.get('_pubkeys', []):
|
||||||
bname = pk['fingerprint'] + ".crt"
|
bname = pk['fingerprint'] + ".crt"
|
||||||
fp_files += [os.path.join(mycfg['datadir'], bname)]
|
fp_files += [os.path.join(mycfg['data_dir'], bname)]
|
||||||
|
|
||||||
start = time.time()
|
missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
|
||||||
missing = wait_for_files(wait_for + fp_files)
|
func=wait_for_files,
|
||||||
|
args=(wait_for + fp_files,))
|
||||||
if len(missing):
|
if len(missing):
|
||||||
LOG.warn("Did not find files, but going on: %s", missing)
|
LOG.warn("Did not find files, but going on: %s", missing)
|
||||||
else:
|
|
||||||
LOG.debug("waited %.3f seconds for %d files to appear",
|
|
||||||
time.time() - start, len(wait_for))
|
|
||||||
|
|
||||||
if shcfgxml in missing:
|
if shcfgxml in missing:
|
||||||
LOG.warn("SharedConfig.xml missing, using static instance-id")
|
LOG.warn("SharedConfig.xml missing, using static instance-id")
|
||||||
@ -148,6 +162,56 @@ class DataSourceAzureNet(sources.DataSource):
|
|||||||
return self.cfg
|
return self.cfg
|
||||||
|
|
||||||
|
|
||||||
|
def handle_set_hostname(enabled, hostname, cfg):
|
||||||
|
if not util.is_true(enabled):
|
||||||
|
return
|
||||||
|
|
||||||
|
if not hostname:
|
||||||
|
LOG.warn("set_hostname was true but no local-hostname")
|
||||||
|
return
|
||||||
|
|
||||||
|
apply_hostname_bounce(hostname=hostname, policy=cfg['policy'],
|
||||||
|
interface=cfg['interface'],
|
||||||
|
command=cfg['command'],
|
||||||
|
hostname_command=cfg['hostname_command'])
|
||||||
|
|
||||||
|
|
||||||
|
def apply_hostname_bounce(hostname, policy, interface, command,
|
||||||
|
hostname_command="hostname"):
|
||||||
|
# set the hostname to 'hostname' if it is not already set to that.
|
||||||
|
# then, if policy is not off, bounce the interface using command
|
||||||
|
prev_hostname = util.subp(hostname_command, capture=True)[0].strip()
|
||||||
|
|
||||||
|
util.subp([hostname_command, hostname])
|
||||||
|
|
||||||
|
msg = ("phostname=%s hostname=%s policy=%s interface=%s" %
|
||||||
|
(prev_hostname, hostname, policy, interface))
|
||||||
|
|
||||||
|
if util.is_false(policy):
|
||||||
|
LOG.debug("pubhname: policy false, skipping [%s]", msg)
|
||||||
|
return
|
||||||
|
|
||||||
|
if prev_hostname == hostname and policy != "force":
|
||||||
|
LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg)
|
||||||
|
return
|
||||||
|
|
||||||
|
env = os.environ.copy()
|
||||||
|
env['interface'] = interface
|
||||||
|
env['hostname'] = hostname
|
||||||
|
env['old_hostname'] = prev_hostname
|
||||||
|
|
||||||
|
if command == "builtin":
|
||||||
|
command = BOUNCE_COMMAND
|
||||||
|
|
||||||
|
LOG.debug("pubhname: publishing hostname [%s]", msg)
|
||||||
|
shell = not isinstance(command, (list, tuple))
|
||||||
|
# capture=False, see comments in bug 1202758 and bug 1206164.
|
||||||
|
util.log_time(logfunc=LOG.debug, msg="publishing hostname",
|
||||||
|
get_uptime=True, func=util.subp,
|
||||||
|
kwargs={'command': command, 'shell': shell, 'capture': False,
|
||||||
|
'env': env})
|
||||||
|
|
||||||
|
|
||||||
def crtfile_to_pubkey(fname):
|
def crtfile_to_pubkey(fname):
|
||||||
pipeline = ('openssl x509 -noout -pubkey < "$0" |'
|
pipeline = ('openssl x509 -noout -pubkey < "$0" |'
|
||||||
'ssh-keygen -i -m PKCS8 -f /dev/stdin')
|
'ssh-keygen -i -m PKCS8 -f /dev/stdin')
|
||||||
@ -319,15 +383,21 @@ def read_azure_ovf(contents):
|
|||||||
name = child.localName.lower()
|
name = child.localName.lower()
|
||||||
|
|
||||||
simple = False
|
simple = False
|
||||||
|
value = ""
|
||||||
if (len(child.childNodes) == 1 and
|
if (len(child.childNodes) == 1 and
|
||||||
child.childNodes[0].nodeType == dom.TEXT_NODE):
|
child.childNodes[0].nodeType == dom.TEXT_NODE):
|
||||||
simple = True
|
simple = True
|
||||||
value = child.childNodes[0].wholeText
|
value = child.childNodes[0].wholeText
|
||||||
|
|
||||||
|
attrs = {k: v for k, v in child.attributes.items()}
|
||||||
|
|
||||||
# we accept either UserData or CustomData. If both are present
|
# we accept either UserData or CustomData. If both are present
|
||||||
# then behavior is undefined.
|
# then behavior is undefined.
|
||||||
if (name == "userdata" or name == "customdata"):
|
if (name == "userdata" or name == "customdata"):
|
||||||
|
if attrs.get('encoding') in (None, "base64"):
|
||||||
ud = base64.b64decode(''.join(value.split()))
|
ud = base64.b64decode(''.join(value.split()))
|
||||||
|
else:
|
||||||
|
ud = value
|
||||||
elif name == "username":
|
elif name == "username":
|
||||||
username = value
|
username = value
|
||||||
elif name == "userpassword":
|
elif name == "userpassword":
|
||||||
@ -335,7 +405,11 @@ def read_azure_ovf(contents):
|
|||||||
elif name == "hostname":
|
elif name == "hostname":
|
||||||
md['local-hostname'] = value
|
md['local-hostname'] = value
|
||||||
elif name == "dscfg":
|
elif name == "dscfg":
|
||||||
cfg['datasource'] = {DS_NAME: util.load_yaml(value, default={})}
|
if attrs.get('encoding') in (None, "base64"):
|
||||||
|
dscfg = base64.b64decode(''.join(value.split()))
|
||||||
|
else:
|
||||||
|
dscfg = value
|
||||||
|
cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})}
|
||||||
elif name == "ssh":
|
elif name == "ssh":
|
||||||
cfg['_pubkeys'] = load_azure_ovf_pubkeys(child)
|
cfg['_pubkeys'] = load_azure_ovf_pubkeys(child)
|
||||||
elif name == "disablesshpasswordauthentication":
|
elif name == "disablesshpasswordauthentication":
|
||||||
|
@ -1770,3 +1770,38 @@ def which(program):
|
|||||||
return exe_file
|
return exe_file
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False):
|
||||||
|
if args is None:
|
||||||
|
args = []
|
||||||
|
if kwargs is None:
|
||||||
|
kwargs = {}
|
||||||
|
|
||||||
|
start = time.time()
|
||||||
|
|
||||||
|
ustart = None
|
||||||
|
if get_uptime:
|
||||||
|
try:
|
||||||
|
ustart = float(uptime())
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
ret = func(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
delta = time.time() - start
|
||||||
|
if ustart is not None:
|
||||||
|
try:
|
||||||
|
udelta = float(uptime()) - ustart
|
||||||
|
except ValueError:
|
||||||
|
udelta = "N/A"
|
||||||
|
|
||||||
|
tmsg = " took %0.3f seconds" % delta
|
||||||
|
if get_uptime:
|
||||||
|
tmsg += "(%0.2f)" % udelta
|
||||||
|
try:
|
||||||
|
logfunc(msg + tmsg)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return ret
|
||||||
|
@ -45,6 +45,11 @@ datasource:
|
|||||||
|
|
||||||
Azure:
|
Azure:
|
||||||
agent_command: [service, walinuxagent, start]
|
agent_command: [service, walinuxagent, start]
|
||||||
|
set_hostname: True
|
||||||
|
hostname_bounce:
|
||||||
|
interface: eth0
|
||||||
|
policy: on # [can be 'on', 'off' or 'force']
|
||||||
|
}
|
||||||
|
|
||||||
SmartOS:
|
SmartOS:
|
||||||
# Smart OS datasource works over a serial console interacting with
|
# Smart OS datasource works over a serial console interacting with
|
||||||
|
134
doc/sources/azure/README.rst
Normal file
134
doc/sources/azure/README.rst
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
================
|
||||||
|
Azure Datasource
|
||||||
|
================
|
||||||
|
|
||||||
|
This datasource finds metadata and user-data from the Azure cloud platform.
|
||||||
|
|
||||||
|
Azure Platform
|
||||||
|
--------------
|
||||||
|
The azure cloud-platform provides initial data to an instance via an attached
|
||||||
|
CD formated in UDF. That CD contains a 'ovf-env.xml' file that provides some
|
||||||
|
information. Additional information is obtained via interaction with the
|
||||||
|
"endpoint". The ip address of the endpoint is advertised to the instance
|
||||||
|
inside of dhcp option 245. On ubuntu, that can be seen in
|
||||||
|
/var/lib/dhcp/dhclient.eth0.leases as a colon delimited hex value (example:
|
||||||
|
``option unknown-245 64:41:60:82;`` is 100.65.96.130)
|
||||||
|
|
||||||
|
walinuxagent
|
||||||
|
------------
|
||||||
|
In order to operate correctly, cloud-init needs walinuxagent to provide much
|
||||||
|
of the interaction with azure. In addition to "provisioning" code, walinux
|
||||||
|
does the following on the agent is a long running daemon that handles the
|
||||||
|
following things:
|
||||||
|
- generate a x509 certificate and send that to the endpoint
|
||||||
|
|
||||||
|
waagent.conf config
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
in order to use waagent.conf with cloud-init, the following settings are recommended. Other values can be changed or set to the defaults.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
# disabling provisioning turns off all 'Provisioning.*' function
|
||||||
|
Provisioning.Enabled=n
|
||||||
|
# this is currently not handled by cloud-init, so let walinuxagent do it.
|
||||||
|
ResourceDisk.Format=y
|
||||||
|
ResourceDisk.MountPoint=/mnt
|
||||||
|
|
||||||
|
|
||||||
|
Userdata
|
||||||
|
--------
|
||||||
|
Userdata is provided to cloud-init inside the ovf-env.xml file. Cloud-init
|
||||||
|
expects that user-data will be provided as base64 encoded value inside the
|
||||||
|
text child of a element named ``UserData`` or ``CustomData`` which is a direct
|
||||||
|
child of the ``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``)
|
||||||
|
If both ``UserData`` and ``CustomData`` are provided behavior is undefined on
|
||||||
|
which will be selected.
|
||||||
|
|
||||||
|
In the example below, user-data provided is 'this is my userdata', and the
|
||||||
|
datasource config provided is ``{"agent_command": ["start", "walinuxagent"]}``.
|
||||||
|
That agent command will take affect as if it were specified in system config.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
<wa:ProvisioningSection>
|
||||||
|
<wa:Version>1.0</wa:Version>
|
||||||
|
<LinuxProvisioningConfigurationSet
|
||||||
|
xmlns="http://schemas.microsoft.com/windowsazure"
|
||||||
|
xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
|
||||||
|
<ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
|
||||||
|
<HostName>myHost</HostName>
|
||||||
|
<UserName>myuser</UserName>
|
||||||
|
<UserPassword/>
|
||||||
|
<CustomData>dGhpcyBpcyBteSB1c2VyZGF0YQ===</CustomData>
|
||||||
|
<dscfg>eyJhZ2VudF9jb21tYW5kIjogWyJzdGFydCIsICJ3YWxpbnV4YWdlbnQiXX0=</dscfg>
|
||||||
|
<DisableSshPasswordAuthentication>true</DisableSshPasswordAuthentication>
|
||||||
|
<SSH>
|
||||||
|
<PublicKeys>
|
||||||
|
<PublicKey>
|
||||||
|
<Fingerprint>6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7</Fingerprint>
|
||||||
|
<Path>this-value-unused</Path>
|
||||||
|
</PublicKey>
|
||||||
|
</PublicKeys>
|
||||||
|
</SSH>
|
||||||
|
</LinuxProvisioningConfigurationSet>
|
||||||
|
</wa:ProvisioningSection>
|
||||||
|
|
||||||
|
Configuration
|
||||||
|
-------------
|
||||||
|
Configuration for the datasource can be read from the system config's or set
|
||||||
|
via the `dscfg` entry in the `LinuxProvisioningConfigurationSet`. Content in
|
||||||
|
dscfg node is expected to be base64 encoded yaml content, and it will be
|
||||||
|
merged into the 'datasource: Azure' entry.
|
||||||
|
|
||||||
|
The '``hostname_bounce: command``' entry can be either the literal string
|
||||||
|
'builtin' or a command to execute. The command will be invoked after the
|
||||||
|
hostname is set, and will have the 'interface' in its environment. If
|
||||||
|
``set_hostname`` is not true, then ``hostname_bounce`` will be ignored.
|
||||||
|
|
||||||
|
An example might be:
|
||||||
|
command: ["sh", "-c", "killall dhclient; dhclient $interface"]
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
datasource:
|
||||||
|
agent_command
|
||||||
|
Azure:
|
||||||
|
agent_command: [service, walinuxagent, start]
|
||||||
|
set_hostname: True
|
||||||
|
hostname_bounce:
|
||||||
|
# the name of the interface to bounce
|
||||||
|
interface: eth0
|
||||||
|
# policy can be 'on', 'off' or 'force'
|
||||||
|
policy: on
|
||||||
|
# the method 'bounce' command.
|
||||||
|
command: "builtin"
|
||||||
|
hostname_command: "hostname"
|
||||||
|
}
|
||||||
|
|
||||||
|
hostname
|
||||||
|
--------
|
||||||
|
When the user launches an instance, they provide a hostname for that instance.
|
||||||
|
The hostname is provided to the instance in the ovf-env.xml file as
|
||||||
|
``HostName``.
|
||||||
|
|
||||||
|
Whatever value the instance provides in its dhcp request will resolve in the
|
||||||
|
domain returned in the 'search' request.
|
||||||
|
|
||||||
|
The interesting issue is that a generic image will already have a hostname
|
||||||
|
configured. The ubuntu cloud images have 'ubuntu' as the hostname of the
|
||||||
|
system, and the initial dhcp request on eth0 is not guaranteed to occur after
|
||||||
|
the datasource code has been run. So, on first boot, that initial value will
|
||||||
|
be sent in the dhcp request and *that* value will resolve.
|
||||||
|
|
||||||
|
In order to make the ``HostName`` provided in the ovf-env.xml resolve, a
|
||||||
|
dhcp request must be made with the new value. Walinuxagent (in its current
|
||||||
|
version) handles this by polling the state of hostname and bouncing ('``ifdown
|
||||||
|
eth0; ifup eth0``' the network interface if it sees that a change has been
|
||||||
|
made.
|
||||||
|
|
||||||
|
cloud-init handles this by setting the hostname in the DataSource's 'get_data'
|
||||||
|
method via '``hostname $HostName``', and then bouncing the interface. This
|
||||||
|
behavior can be configured or disabled in the datasource config. See
|
||||||
|
'Configuration' above.
|
@ -32,11 +32,12 @@ PKG_MP = {
|
|||||||
'boto': 'python-boto',
|
'boto': 'python-boto',
|
||||||
'cheetah': 'python-cheetah',
|
'cheetah': 'python-cheetah',
|
||||||
'configobj': 'python-configobj',
|
'configobj': 'python-configobj',
|
||||||
|
'jsonpatch': 'python-json-patch',
|
||||||
'oauth': 'python-oauth',
|
'oauth': 'python-oauth',
|
||||||
'prettytable': 'python-prettytable',
|
'prettytable': 'python-prettytable',
|
||||||
|
'pyserial': 'python-serial',
|
||||||
'pyyaml': 'python-yaml',
|
'pyyaml': 'python-yaml',
|
||||||
'requests': 'python-requests',
|
'requests': 'python-requests',
|
||||||
'jsonpatch': 'python-json-patch',
|
|
||||||
}
|
}
|
||||||
DEBUILD_ARGS = ["-us", "-S", "-uc", "-d"]
|
DEBUILD_ARGS = ["-us", "-S", "-uc", "-d"]
|
||||||
|
|
||||||
@ -95,12 +96,20 @@ def main():
|
|||||||
default=False,
|
default=False,
|
||||||
action='store_true')
|
action='store_true')
|
||||||
|
|
||||||
|
parser.add_argument("--init-system", dest="init_system",
|
||||||
|
help=("build deb with INIT_SYSTEM=xxx"
|
||||||
|
" (default: %(default)s"),
|
||||||
|
default=os.environ.get("INIT_SYSTEM", "upstart"))
|
||||||
|
|
||||||
|
|
||||||
for ent in DEBUILD_ARGS:
|
for ent in DEBUILD_ARGS:
|
||||||
parser.add_argument(ent, dest="debuild_args", action='append_const',
|
parser.add_argument(ent, dest="debuild_args", action='append_const',
|
||||||
const=ent, help=("pass through '%s' to debuild" % ent))
|
const=ent, help=("pass through '%s' to debuild" % ent))
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
os.environ['INIT_SYSTEM'] = args.init_system
|
||||||
|
|
||||||
capture = True
|
capture = True
|
||||||
if args.verbose:
|
if args.verbose:
|
||||||
capture = False
|
capture = False
|
||||||
|
@ -42,6 +42,7 @@ PKG_MP = {
|
|||||||
'jsonpatch': 'python-jsonpatch',
|
'jsonpatch': 'python-jsonpatch',
|
||||||
'oauth': 'python-oauth',
|
'oauth': 'python-oauth',
|
||||||
'prettytable': 'python-prettytable',
|
'prettytable': 'python-prettytable',
|
||||||
|
'pyserial': 'pyserial',
|
||||||
'pyyaml': 'PyYAML',
|
'pyyaml': 'PyYAML',
|
||||||
'requests': 'python-requests',
|
'requests': 'python-requests',
|
||||||
},
|
},
|
||||||
@ -53,6 +54,7 @@ PKG_MP = {
|
|||||||
'jsonpatch': 'python-jsonpatch',
|
'jsonpatch': 'python-jsonpatch',
|
||||||
'oauth': 'python-oauth',
|
'oauth': 'python-oauth',
|
||||||
'prettytable': 'python-prettytable',
|
'prettytable': 'python-prettytable',
|
||||||
|
'pyserial': 'python-pyserial',
|
||||||
'pyyaml': 'python-yaml',
|
'pyyaml': 'python-yaml',
|
||||||
'requests': 'python-requests',
|
'requests': 'python-requests',
|
||||||
}
|
}
|
||||||
|
4
setup.py
4
setup.py
@ -37,8 +37,8 @@ def is_f(p):
|
|||||||
|
|
||||||
|
|
||||||
INITSYS_FILES = {
|
INITSYS_FILES = {
|
||||||
'sysvinit': [f for f in glob('sysvinit/*') if is_f(f)],
|
'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)],
|
||||||
'sysvinit_deb': [f for f in glob('sysvinit/*') if is_f(f)],
|
'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)],
|
||||||
'systemd': [f for f in glob('systemd/*') if is_f(f)],
|
'systemd': [f for f in glob('systemd/*') if is_f(f)],
|
||||||
'upstart': [f for f in glob('upstart/*') if is_f(f)],
|
'upstart': [f for f in glob('upstart/*') if is_f(f)],
|
||||||
}
|
}
|
||||||
|
64
sysvinit/debian/cloud-config
Normal file
64
sysvinit/debian/cloud-config
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
#! /bin/sh
|
||||||
|
### BEGIN INIT INFO
|
||||||
|
# Provides: cloud-config
|
||||||
|
# Required-Start: cloud-init cloud-init-local
|
||||||
|
# Required-Stop:
|
||||||
|
# Default-Start: 2 3 4 5
|
||||||
|
# Default-Stop: 0 1 6
|
||||||
|
# Short-Description: Cloud init modules --mode config
|
||||||
|
# Description: Cloud configuration initialization
|
||||||
|
### END INIT INFO
|
||||||
|
|
||||||
|
# Authors: Julien Danjou <acid@debian.org>
|
||||||
|
# Juerg Haefliger <juerg.haefliger@hp.com>
|
||||||
|
# Thomas Goirand <zigo@debian.org>
|
||||||
|
|
||||||
|
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||||
|
DESC="Cloud service"
|
||||||
|
NAME=cloud-init
|
||||||
|
DAEMON=/usr/bin/$NAME
|
||||||
|
DAEMON_ARGS="modules --mode config"
|
||||||
|
SCRIPTNAME=/etc/init.d/$NAME
|
||||||
|
|
||||||
|
# Exit if the package is not installed
|
||||||
|
[ -x "$DAEMON" ] || exit 0
|
||||||
|
|
||||||
|
# Read configuration variable file if it is present
|
||||||
|
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||||
|
|
||||||
|
# Define LSB log_* functions.
|
||||||
|
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||||
|
# and status_of_proc is working.
|
||||||
|
. /lib/lsb/init-functions
|
||||||
|
|
||||||
|
if init_is_upstart; then
|
||||||
|
case "$1" in
|
||||||
|
stop)
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
start)
|
||||||
|
log_daemon_msg "Starting $DESC" "$NAME"
|
||||||
|
$DAEMON ${DAEMON_ARGS}
|
||||||
|
case "$?" in
|
||||||
|
0|1) log_end_msg 0 ;;
|
||||||
|
2) log_end_msg 1 ;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
stop|restart|force-reload)
|
||||||
|
echo "Error: argument '$1' not supported" >&2
|
||||||
|
exit 3
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Usage: $SCRIPTNAME {start}" >&2
|
||||||
|
exit 3
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
:
|
66
sysvinit/debian/cloud-final
Normal file
66
sysvinit/debian/cloud-final
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
#! /bin/sh
|
||||||
|
### BEGIN INIT INFO
|
||||||
|
# Provides: cloud-final
|
||||||
|
# Required-Start: $all cloud-config
|
||||||
|
# Required-Stop:
|
||||||
|
# Default-Start: 2 3 4 5
|
||||||
|
# Default-Stop: 0 1 6
|
||||||
|
# Short-Description: Cloud init modules final jobs
|
||||||
|
# Description: This runs the cloud configuration initialization "final" jobs
|
||||||
|
# and can be seen as the traditional "rc.local" time for the cloud.
|
||||||
|
# It runs after all cloud-config jobs are run
|
||||||
|
### END INIT INFO
|
||||||
|
|
||||||
|
# Authors: Julien Danjou <acid@debian.org>
|
||||||
|
# Juerg Haefliger <juerg.haefliger@hp.com>
|
||||||
|
# Thomas Goirand <zigo@debian.org>
|
||||||
|
|
||||||
|
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||||
|
DESC="Cloud service"
|
||||||
|
NAME=cloud-init
|
||||||
|
DAEMON=/usr/bin/$NAME
|
||||||
|
DAEMON_ARGS="modules --mode final"
|
||||||
|
SCRIPTNAME=/etc/init.d/$NAME
|
||||||
|
|
||||||
|
# Exit if the package is not installed
|
||||||
|
[ -x "$DAEMON" ] || exit 0
|
||||||
|
|
||||||
|
# Read configuration variable file if it is present
|
||||||
|
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||||
|
|
||||||
|
# Define LSB log_* functions.
|
||||||
|
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||||
|
# and status_of_proc is working.
|
||||||
|
. /lib/lsb/init-functions
|
||||||
|
|
||||||
|
if init_is_upstart; then
|
||||||
|
case "$1" in
|
||||||
|
stop)
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
start)
|
||||||
|
log_daemon_msg "Starting $DESC" "$NAME"
|
||||||
|
$DAEMON ${DAEMON_ARGS}
|
||||||
|
case "$?" in
|
||||||
|
0|1) log_end_msg 0 ;;
|
||||||
|
2) log_end_msg 1 ;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
stop|restart|force-reload)
|
||||||
|
echo "Error: argument '$1' not supported" >&2
|
||||||
|
exit 3
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Usage: $SCRIPTNAME {start}" >&2
|
||||||
|
exit 3
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
:
|
64
sysvinit/debian/cloud-init
Executable file
64
sysvinit/debian/cloud-init
Executable file
@ -0,0 +1,64 @@
|
|||||||
|
#! /bin/sh
|
||||||
|
### BEGIN INIT INFO
|
||||||
|
# Provides: cloud-init
|
||||||
|
# Required-Start: $local_fs $remote_fs $syslog $network cloud-init-local
|
||||||
|
# Required-Stop: $remote_fs
|
||||||
|
# X-Start-Before: sshd
|
||||||
|
# Default-Start: 2 3 4 5
|
||||||
|
# Default-Stop: 0 1 6
|
||||||
|
# Short-Description: Cloud init
|
||||||
|
# Description: Cloud configuration initialization
|
||||||
|
### END INIT INFO
|
||||||
|
|
||||||
|
# Authors: Julien Danjou <acid@debian.org>
|
||||||
|
# Thomas Goirand <zigo@debian.org>
|
||||||
|
|
||||||
|
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||||
|
DESC="Cloud service"
|
||||||
|
NAME=cloud-init
|
||||||
|
DAEMON=/usr/bin/$NAME
|
||||||
|
DAEMON_ARGS="init"
|
||||||
|
SCRIPTNAME=/etc/init.d/$NAME
|
||||||
|
|
||||||
|
# Exit if the package is not installed
|
||||||
|
[ -x "$DAEMON" ] || exit 0
|
||||||
|
|
||||||
|
# Read configuration variable file if it is present
|
||||||
|
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||||
|
|
||||||
|
# Define LSB log_* functions.
|
||||||
|
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||||
|
# and status_of_proc is working.
|
||||||
|
. /lib/lsb/init-functions
|
||||||
|
|
||||||
|
if init_is_upstart; then
|
||||||
|
case "$1" in
|
||||||
|
stop)
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
start)
|
||||||
|
log_daemon_msg "Starting $DESC" "$NAME"
|
||||||
|
$DAEMON ${DAEMON_ARGS}
|
||||||
|
case "$?" in
|
||||||
|
0|1) log_end_msg 0 ;;
|
||||||
|
2) log_end_msg 1 ;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
stop|restart|force-reload)
|
||||||
|
echo "Error: argument '$1' not supported" >&2
|
||||||
|
exit 3
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Usage: $SCRIPTNAME {start}" >&2
|
||||||
|
exit 3
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
:
|
63
sysvinit/debian/cloud-init-local
Normal file
63
sysvinit/debian/cloud-init-local
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
#! /bin/sh
|
||||||
|
### BEGIN INIT INFO
|
||||||
|
# Provides: cloud-init-local
|
||||||
|
# Required-Start: $local_fs $remote_fs
|
||||||
|
# Required-Stop:
|
||||||
|
# Default-Start: 2 3 4 5
|
||||||
|
# Default-Stop: 0 1 6
|
||||||
|
# Short-Description: Cloud init local
|
||||||
|
# Description: Cloud configuration initialization
|
||||||
|
### END INIT INFO
|
||||||
|
|
||||||
|
# Authors: Julien Danjou <acid@debian.org>
|
||||||
|
# Juerg Haefliger <juerg.haefliger@hp.com>
|
||||||
|
|
||||||
|
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||||
|
DESC="Cloud service"
|
||||||
|
NAME=cloud-init
|
||||||
|
DAEMON=/usr/bin/$NAME
|
||||||
|
DAEMON_ARGS="init --local"
|
||||||
|
SCRIPTNAME=/etc/init.d/$NAME
|
||||||
|
|
||||||
|
# Exit if the package is not installed
|
||||||
|
[ -x "$DAEMON" ] || exit 0
|
||||||
|
|
||||||
|
# Read configuration variable file if it is present
|
||||||
|
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||||
|
|
||||||
|
# Define LSB log_* functions.
|
||||||
|
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||||
|
# and status_of_proc is working.
|
||||||
|
. /lib/lsb/init-functions
|
||||||
|
|
||||||
|
if init_is_upstart; then
|
||||||
|
case "$1" in
|
||||||
|
stop)
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
start)
|
||||||
|
log_daemon_msg "Starting $DESC" "$NAME"
|
||||||
|
$DAEMON ${DAEMON_ARGS}
|
||||||
|
case "$?" in
|
||||||
|
0|1) log_end_msg 0 ;;
|
||||||
|
2) log_end_msg 1 ;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
stop|restart|force-reload)
|
||||||
|
echo "Error: argument '$1' not supported" >&2
|
||||||
|
exit 3
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Usage: $SCRIPTNAME {start}" >&2
|
||||||
|
exit 3
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
:
|
@ -26,8 +26,15 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None):
|
|||||||
xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
|
xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
|
||||||
<ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
|
<ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
|
||||||
"""
|
"""
|
||||||
for key, val in data.items():
|
for key, dval in data.items():
|
||||||
content += "<%s>%s</%s>\n" % (key, val, key)
|
if isinstance(dval, dict):
|
||||||
|
val = dval.get('text')
|
||||||
|
attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v in dval.items()
|
||||||
|
if k != 'text'])
|
||||||
|
else:
|
||||||
|
val = dval
|
||||||
|
attrs = ""
|
||||||
|
content += "<%s%s>%s</%s>\n" % (key, attrs, val, key)
|
||||||
|
|
||||||
if userdata:
|
if userdata:
|
||||||
content += "<UserData>%s</UserData>\n" % (base64.b64encode(userdata))
|
content += "<UserData>%s</UserData>\n" % (base64.b64encode(userdata))
|
||||||
@ -103,6 +110,9 @@ class TestAzureDataSource(MockerTestCase):
|
|||||||
data['iid_from_shared_cfg'] = path
|
data['iid_from_shared_cfg'] = path
|
||||||
return 'i-my-azure-id'
|
return 'i-my-azure-id'
|
||||||
|
|
||||||
|
def _apply_hostname_bounce(**kwargs):
|
||||||
|
data['apply_hostname_bounce'] = kwargs
|
||||||
|
|
||||||
if data.get('ovfcontent') is not None:
|
if data.get('ovfcontent') is not None:
|
||||||
populate_dir(os.path.join(self.paths.seed_dir, "azure"),
|
populate_dir(os.path.join(self.paths.seed_dir, "azure"),
|
||||||
{'ovf-env.xml': data['ovfcontent']})
|
{'ovf-env.xml': data['ovfcontent']})
|
||||||
@ -118,7 +128,9 @@ class TestAzureDataSource(MockerTestCase):
|
|||||||
(mod, 'pubkeys_from_crt_files',
|
(mod, 'pubkeys_from_crt_files',
|
||||||
_pubkeys_from_crt_files),
|
_pubkeys_from_crt_files),
|
||||||
(mod, 'iid_from_shared_config',
|
(mod, 'iid_from_shared_config',
|
||||||
_iid_from_shared_config), ])
|
_iid_from_shared_config),
|
||||||
|
(mod, 'apply_hostname_bounce',
|
||||||
|
_apply_hostname_bounce), ])
|
||||||
|
|
||||||
dsrc = mod.DataSourceAzureNet(
|
dsrc = mod.DataSourceAzureNet(
|
||||||
data.get('sys_cfg', {}), distro=None, paths=self.paths)
|
data.get('sys_cfg', {}), distro=None, paths=self.paths)
|
||||||
@ -139,10 +151,24 @@ class TestAzureDataSource(MockerTestCase):
|
|||||||
self.assertEqual(0700, data['datadir_mode'])
|
self.assertEqual(0700, data['datadir_mode'])
|
||||||
self.assertEqual(dsrc.metadata['instance-id'], 'i-my-azure-id')
|
self.assertEqual(dsrc.metadata['instance-id'], 'i-my-azure-id')
|
||||||
|
|
||||||
def test_user_cfg_set_agent_command(self):
|
def test_user_cfg_set_agent_command_plain(self):
|
||||||
|
# set dscfg in via plaintext
|
||||||
cfg = {'agent_command': "my_command"}
|
cfg = {'agent_command': "my_command"}
|
||||||
odata = {'HostName': "myhost", 'UserName': "myuser",
|
odata = {'HostName': "myhost", 'UserName': "myuser",
|
||||||
'dscfg': yaml.dump(cfg)}
|
'dscfg': {'text': yaml.dump(cfg), 'encoding': 'plain'}}
|
||||||
|
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
|
||||||
|
|
||||||
|
dsrc = self._get_ds(data)
|
||||||
|
ret = dsrc.get_data()
|
||||||
|
self.assertTrue(ret)
|
||||||
|
self.assertEqual(data['agent_invoked'], cfg['agent_command'])
|
||||||
|
|
||||||
|
def test_user_cfg_set_agent_command(self):
|
||||||
|
# set dscfg in via base64 encoded yaml
|
||||||
|
cfg = {'agent_command': "my_command"}
|
||||||
|
odata = {'HostName': "myhost", 'UserName': "myuser",
|
||||||
|
'dscfg': {'text': base64.b64encode(yaml.dump(cfg)),
|
||||||
|
'encoding': 'base64'}}
|
||||||
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
|
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
|
||||||
|
|
||||||
dsrc = self._get_ds(data)
|
dsrc = self._get_ds(data)
|
||||||
@ -218,6 +244,48 @@ class TestAzureDataSource(MockerTestCase):
|
|||||||
for mypk in mypklist:
|
for mypk in mypklist:
|
||||||
self.assertIn(mypk, dsrc.cfg['_pubkeys'])
|
self.assertIn(mypk, dsrc.cfg['_pubkeys'])
|
||||||
|
|
||||||
|
def test_disabled_bounce(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def test_apply_bounce_call_1(self):
|
||||||
|
# hostname needs to get through to apply_hostname_bounce
|
||||||
|
mydata = "FOOBAR"
|
||||||
|
odata = {'HostName': 'my-random-hostname'}
|
||||||
|
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
|
||||||
|
|
||||||
|
self._get_ds(data).get_data()
|
||||||
|
self.assertIn('hostname', data['apply_hostname_bounce'])
|
||||||
|
self.assertEqual(data['apply_hostname_bounce']['hostname'],
|
||||||
|
odata['HostName'])
|
||||||
|
|
||||||
|
def test_apply_bounce_call_configurable(self):
|
||||||
|
# hostname_bounce should be configurable in datasource cfg
|
||||||
|
cfg = {'hostname_bounce': {'interface': 'eth1', 'policy': 'off',
|
||||||
|
'command': 'my-bounce-command',
|
||||||
|
'hostname_command': 'my-hostname-command'}}
|
||||||
|
odata = {'HostName': "xhost",
|
||||||
|
'dscfg': {'text': base64.b64encode(yaml.dump(cfg)),
|
||||||
|
'encoding': 'base64'}}
|
||||||
|
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
|
||||||
|
self._get_ds(data).get_data()
|
||||||
|
|
||||||
|
for k in cfg['hostname_bounce']:
|
||||||
|
self.assertIn(k, data['apply_hostname_bounce'])
|
||||||
|
|
||||||
|
for k, v in cfg['hostname_bounce'].items():
|
||||||
|
self.assertEqual(data['apply_hostname_bounce'][k], v)
|
||||||
|
|
||||||
|
def test_set_hostname_disabled(self):
|
||||||
|
# config specifying set_hostname off should not bounce
|
||||||
|
cfg = {'set_hostname': False}
|
||||||
|
odata = {'HostName': "xhost",
|
||||||
|
'dscfg': {'text': base64.b64encode(yaml.dump(cfg)),
|
||||||
|
'encoding': 'base64'}}
|
||||||
|
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
|
||||||
|
self._get_ds(data).get_data()
|
||||||
|
|
||||||
|
self.assertEqual(data.get('apply_hostname_bounce', "N/A"), "N/A")
|
||||||
|
|
||||||
|
|
||||||
class TestReadAzureOvf(MockerTestCase):
|
class TestReadAzureOvf(MockerTestCase):
|
||||||
def test_invalid_xml_raises_non_azure_ds(self):
|
def test_invalid_xml_raises_non_azure_ds(self):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user