Rebasing on trunk
This commit is contained in:
commit
c8d4265b08
@ -223,7 +223,8 @@ def resize_devices(resizer, devices):
|
||||
"stat of '%s' failed: %s" % (blockdev, e),))
|
||||
continue
|
||||
|
||||
if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
|
||||
if (not stat.S_ISBLK(statret.st_mode) and
|
||||
not stat.S_ISCHR(statret.st_mode)):
|
||||
info.append((devent, RESIZE.SKIPPED,
|
||||
"device '%s' not a block device" % blockdev,))
|
||||
continue
|
||||
|
@ -16,15 +16,14 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import re
|
||||
|
||||
from cloudinit import distros
|
||||
from cloudinit import helpers
|
||||
from cloudinit import log as logging
|
||||
from cloudinit import netinfo
|
||||
from cloudinit import ssh_util
|
||||
from cloudinit import util
|
||||
|
||||
from cloudinit.settings import PER_INSTANCE
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -39,26 +38,27 @@ class Distro(distros.Distro):
|
||||
|
||||
# Updates a key in /etc/rc.conf.
|
||||
def updatercconf(self, key, value):
|
||||
LOG.debug("updatercconf: %s => %s" % (key, value))
|
||||
LOG.debug("updatercconf: %s => %s", key, value)
|
||||
conf = self.loadrcconf()
|
||||
configchanged = False
|
||||
for item in conf:
|
||||
if item == key and conf[item] != value:
|
||||
conf[item] = value
|
||||
LOG.debug("[rc.conf]: Value %s for key %s needs to be changed" % (value, key))
|
||||
LOG.debug("[rc.conf]: Value %s for key %s needs to be changed",
|
||||
value, key)
|
||||
configchanged = True
|
||||
|
||||
if configchanged:
|
||||
LOG.debug("Writing new /etc/rc.conf file")
|
||||
with open('/etc/rc.conf', 'w') as file:
|
||||
with open('/etc/rc.conf', 'w') as fp:
|
||||
for keyval in conf.items():
|
||||
file.write("%s=%s\n" % keyval)
|
||||
fp.write("%s=%s\n" % keyval)
|
||||
|
||||
# Load the contents of /etc/rc.conf and store all keys in a dict.
|
||||
def loadrcconf(self):
|
||||
conf = {}
|
||||
with open("/etc/rc.conf") as file:
|
||||
for line in file:
|
||||
with open("/etc/rc.conf") as fp:
|
||||
for line in fp:
|
||||
tok = line.split('=')
|
||||
conf[tok[0]] = tok[1].rstrip()
|
||||
return conf
|
||||
@ -75,7 +75,7 @@ class Distro(distros.Distro):
|
||||
sys_hostname = self._read_hostname()
|
||||
return ('rc.conf', sys_hostname)
|
||||
|
||||
def _read_hostname(self, default=None):
|
||||
def _read_hostname(self, filename, default=None):
|
||||
hostname = None
|
||||
try:
|
||||
hostname = self.readrcconf('hostname')
|
||||
@ -90,17 +90,17 @@ class Distro(distros.Distro):
|
||||
return fqdn
|
||||
return hostname
|
||||
|
||||
def _write_hostname(self, your_hostname, out_fn):
|
||||
self.updatercconf('hostname', your_hostname)
|
||||
def _write_hostname(self, hostname, filename):
|
||||
self.updatercconf('hostname', hostname)
|
||||
|
||||
def create_group(self, name, members):
|
||||
group_add_cmd = ['pw', '-n', name]
|
||||
if util.is_group(name):
|
||||
LOG.warn("Skipping creation of existing group '%s'" % name)
|
||||
LOG.warn("Skipping creation of existing group '%s'", name)
|
||||
else:
|
||||
try:
|
||||
util.subp(group_add_cmd)
|
||||
LOG.info("Created new group %s" % name)
|
||||
LOG.info("Created new group %s", name)
|
||||
except Exception:
|
||||
util.logexc("Failed to create group %s", name)
|
||||
|
||||
@ -111,11 +111,11 @@ class Distro(distros.Distro):
|
||||
"; user does not exist.", member, name)
|
||||
continue
|
||||
util.subp(['pw', 'usermod', '-n', name, '-G', member])
|
||||
LOG.info("Added user '%s' to group '%s'" % (member, name))
|
||||
LOG.info("Added user '%s' to group '%s'", member, name)
|
||||
|
||||
def add_user(self, name, **kwargs):
|
||||
if util.is_user(name):
|
||||
LOG.info("User %s already exists, skipping." % name)
|
||||
LOG.info("User %s already exists, skipping.", name)
|
||||
return False
|
||||
|
||||
adduser_cmd = ['pw', 'useradd', '-n', name]
|
||||
@ -170,7 +170,7 @@ class Distro(distros.Distro):
|
||||
raise e
|
||||
|
||||
# TODO:
|
||||
def set_passwd(self, name, **kwargs):
|
||||
def set_passwd(self, user, passwd, hashed=False):
|
||||
return False
|
||||
|
||||
def lock_passwd(self, name):
|
||||
@ -182,7 +182,7 @@ class Distro(distros.Distro):
|
||||
|
||||
# TODO:
|
||||
def write_sudo_rules(self, name, rules, sudo_file=None):
|
||||
LOG.debug("[write_sudo_rules] Name: %s" % name)
|
||||
LOG.debug("[write_sudo_rules] Name: %s", name)
|
||||
|
||||
def create_user(self, name, **kwargs):
|
||||
self.add_user(name, **kwargs)
|
||||
@ -217,7 +217,8 @@ class Distro(distros.Distro):
|
||||
origconf = open(loginconf, 'r')
|
||||
|
||||
for line in origconf:
|
||||
newconf.write(re.sub('^default:', r'default:lang=%s:' % locale, line))
|
||||
newconf.write(re.sub(r'^default:',
|
||||
r'default:lang=%s:' % locale, line))
|
||||
newconf.close()
|
||||
origconf.close()
|
||||
# Make a backup of login.conf.
|
||||
@ -233,14 +234,14 @@ class Distro(distros.Distro):
|
||||
util.logexc("Failed to apply locale %s", locale)
|
||||
copyfile(backupconf, loginconf)
|
||||
|
||||
def install_packages():
|
||||
def install_packages(self, pkglist):
|
||||
return
|
||||
|
||||
def package_command():
|
||||
def package_command(self, cmd, args=None, pkgs=None):
|
||||
return
|
||||
|
||||
def set_timezone():
|
||||
def set_timezone(self, tz):
|
||||
return
|
||||
|
||||
def update_package_sources():
|
||||
def update_package_sources(self):
|
||||
return
|
||||
|
@ -16,6 +16,7 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import httplib
|
||||
from urlparse import (urlparse, urlunparse)
|
||||
|
||||
import functools
|
||||
@ -23,9 +24,11 @@ import json
|
||||
import urllib
|
||||
|
||||
from cloudinit import log as logging
|
||||
from cloudinit import url_helper
|
||||
from cloudinit import util
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
SKIP_USERDATA_CODES = frozenset([httplib.NOT_FOUND])
|
||||
|
||||
|
||||
def maybe_json_object(text):
|
||||
@ -138,20 +141,38 @@ class MetadataMaterializer(object):
|
||||
return joined
|
||||
|
||||
|
||||
def _skip_retry_on_codes(status_codes, _request_args, cause):
|
||||
"""Returns if a request should retry based on a given set of codes that
|
||||
case retrying to be stopped/skipped.
|
||||
"""
|
||||
if cause.code in status_codes:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def get_instance_userdata(api_version='latest',
|
||||
metadata_address='http://169.254.169.254',
|
||||
ssl_details=None, timeout=5, retries=5):
|
||||
ud_url = combine_url(metadata_address, api_version)
|
||||
ud_url = combine_url(ud_url, 'user-data')
|
||||
user_data = ''
|
||||
try:
|
||||
# It is ok for userdata to not exist (thats why we are stopping if
|
||||
# NOT_FOUND occurs) and just in that case returning an empty string.
|
||||
exception_cb = functools.partial(_skip_retry_on_codes,
|
||||
SKIP_USERDATA_CODES)
|
||||
response = util.read_file_or_url(ud_url,
|
||||
ssl_details=ssl_details,
|
||||
timeout=timeout,
|
||||
retries=retries)
|
||||
return str(response)
|
||||
retries=retries,
|
||||
exception_cb=exception_cb)
|
||||
user_data = str(response)
|
||||
except url_helper.UrlError as e:
|
||||
if e.code not in SKIP_USERDATA_CODES:
|
||||
util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
|
||||
except Exception:
|
||||
util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
|
||||
return ''
|
||||
return user_data
|
||||
|
||||
|
||||
def get_instance_metadata(api_version='latest',
|
||||
|
@ -44,7 +44,7 @@ def netdev_info(empty=""):
|
||||
# If the output of ifconfig doesn't contain the required info in the
|
||||
# obvious place, use a regex filter to be sure.
|
||||
elif len(toks) > 1:
|
||||
if re.search("flags=\d+<up,", toks[1]):
|
||||
if re.search(r"flags=\d+<up,", toks[1]):
|
||||
devs[curdev]['up'] = True
|
||||
|
||||
fieldpost = ""
|
||||
@ -58,11 +58,8 @@ def netdev_info(empty=""):
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
"""
|
||||
Couple the different items we're interested in with the correct field
|
||||
since FreeBSD/CentOS/Fedora differ in the output.
|
||||
"""
|
||||
|
||||
# Couple the different items we're interested in with the correct
|
||||
# field since FreeBSD/CentOS/Fedora differ in the output.
|
||||
ifconfigfields = {
|
||||
"addr:": "addr", "inet": "addr",
|
||||
"bcast:": "bcast", "broadcast": "bcast",
|
||||
@ -98,17 +95,16 @@ def route_info():
|
||||
continue
|
||||
toks = line.split()
|
||||
|
||||
"""
|
||||
FreeBSD shows 6 items in the routing table:
|
||||
Destination Gateway Flags Refs Use Netif Expire
|
||||
default 10.65.0.1 UGS 0 34920 vtnet0
|
||||
|
||||
Linux netstat shows 2 more:
|
||||
Destination Gateway Genmask Flags MSS Window irtt Iface
|
||||
0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
|
||||
"""
|
||||
|
||||
if len(toks) < 6 or toks[0] == "Kernel" or toks[0] == "Destination" or toks[0] == "Internet" or toks[0] == "Internet6" or toks[0] == "Routing":
|
||||
# FreeBSD shows 6 items in the routing table:
|
||||
# Destination Gateway Flags Refs Use Netif Expire
|
||||
# default 10.65.0.1 UGS 0 34920 vtnet0
|
||||
#
|
||||
# Linux netstat shows 2 more:
|
||||
# Destination Gateway Genmask Flags MSS Window irtt Iface
|
||||
# 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
|
||||
if (len(toks) < 6 or toks[0] == "Kernel" or
|
||||
toks[0] == "Destination" or toks[0] == "Internet" or
|
||||
toks[0] == "Internet6" or toks[0] == "Routing"):
|
||||
continue
|
||||
|
||||
if len(toks) < 8:
|
||||
|
@ -103,7 +103,7 @@ class UrlError(IOError):
|
||||
|
||||
def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
|
||||
headers=None, headers_cb=None, ssl_details=None,
|
||||
check_status=True, allow_redirects=True):
|
||||
check_status=True, allow_redirects=True, exception_cb=None):
|
||||
url = _cleanurl(url)
|
||||
req_args = {
|
||||
'url': url,
|
||||
@ -163,14 +163,13 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
|
||||
# Handle retrying ourselves since the built-in support
|
||||
# doesn't handle sleeping between tries...
|
||||
for i in range(0, manual_tries):
|
||||
req_args['headers'] = headers_cb(url)
|
||||
filtered_req_args = {}
|
||||
for (k, v) in req_args.items():
|
||||
if k == 'data':
|
||||
continue
|
||||
filtered_req_args[k] = v
|
||||
try:
|
||||
req_args['headers'] = headers_cb(url)
|
||||
filtered_req_args = {}
|
||||
for (k, v) in req_args.items():
|
||||
if k == 'data':
|
||||
continue
|
||||
filtered_req_args[k] = v
|
||||
|
||||
LOG.debug("[%s/%s] open '%s' with %s configuration", i,
|
||||
manual_tries, url, filtered_req_args)
|
||||
|
||||
@ -196,6 +195,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
|
||||
# ssl exceptions are not going to get fixed by waiting a
|
||||
# few seconds
|
||||
break
|
||||
if exception_cb and not exception_cb(filtered_req_args, excps[-1]):
|
||||
break
|
||||
if i + 1 < manual_tries and sec_between > 0:
|
||||
LOG.debug("Please wait %s seconds while we wait to try again",
|
||||
sec_between)
|
||||
|
@ -311,7 +311,8 @@ class UserDataProcessor(object):
|
||||
def _attach_part(self, outer_msg, part):
|
||||
"""
|
||||
Attach a message to an outer message. outermsg must be a MIMEMultipart.
|
||||
Modifies a header in the outer message to keep track of number of attachments.
|
||||
Modifies a header in the outer message to keep track of number of
|
||||
attachments.
|
||||
"""
|
||||
part_count = self._multi_part_count(outer_msg)
|
||||
self._process_before_attach(part, part_count + 1)
|
||||
|
@ -691,7 +691,7 @@ def fetch_ssl_details(paths=None):
|
||||
|
||||
def read_file_or_url(url, timeout=5, retries=10,
|
||||
headers=None, data=None, sec_between=1, ssl_details=None,
|
||||
headers_cb=None):
|
||||
headers_cb=None, exception_cb=None):
|
||||
url = url.lstrip()
|
||||
if url.startswith("/"):
|
||||
url = "file://%s" % url
|
||||
@ -708,7 +708,8 @@ def read_file_or_url(url, timeout=5, retries=10,
|
||||
headers_cb=headers_cb,
|
||||
data=data,
|
||||
sec_between=sec_between,
|
||||
ssl_details=ssl_details)
|
||||
ssl_details=ssl_details,
|
||||
exception_cb=exception_cb)
|
||||
|
||||
|
||||
def load_yaml(blob, default=None, allowed=(dict,)):
|
||||
@ -962,7 +963,7 @@ def is_resolvable(name):
|
||||
pass
|
||||
_DNS_REDIRECT_IP = badips
|
||||
if badresults:
|
||||
LOG.debug("detected dns redirection: %s" % badresults)
|
||||
LOG.debug("detected dns redirection: %s", badresults)
|
||||
|
||||
try:
|
||||
result = socket.getaddrinfo(name, None)
|
||||
@ -1321,6 +1322,7 @@ def mounts():
|
||||
(mountoutput, _err) = subp("mount")
|
||||
mount_locs = mountoutput.splitlines()
|
||||
method = 'mount'
|
||||
mountre = r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$'
|
||||
for mpline in mount_locs:
|
||||
# Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered)
|
||||
# FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates)
|
||||
@ -1328,7 +1330,7 @@ def mounts():
|
||||
if method == 'proc':
|
||||
(dev, mp, fstype, opts, _freq, _passno) = mpline.split()
|
||||
else:
|
||||
m = re.search('^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', mpline)
|
||||
m = re.search(mountre, mpline)
|
||||
dev = m.group(1)
|
||||
mp = m.group(2)
|
||||
fstype = m.group(3)
|
||||
@ -1402,7 +1404,7 @@ def get_builtin_cfg():
|
||||
|
||||
|
||||
def sym_link(source, link):
|
||||
LOG.debug("Creating symbolic link from %r => %r" % (link, source))
|
||||
LOG.debug("Creating symbolic link from %r => %r", link, source)
|
||||
os.symlink(source, link)
|
||||
|
||||
|
||||
@ -1443,7 +1445,8 @@ def uptime():
|
||||
size = ctypes.c_size_t()
|
||||
buf = ctypes.c_int()
|
||||
size.value = ctypes.sizeof(buf)
|
||||
libc.sysctlbyname("kern.boottime", ctypes.byref(buf), ctypes.byref(size), None, 0)
|
||||
libc.sysctlbyname("kern.boottime", ctypes.byref(buf),
|
||||
ctypes.byref(size), None, 0)
|
||||
now = time.time()
|
||||
bootup = buf.value
|
||||
uptime_str = now - bootup
|
||||
@ -1792,7 +1795,7 @@ def parse_mount(path):
|
||||
(mountoutput, _err) = subp("mount")
|
||||
mount_locs = mountoutput.splitlines()
|
||||
for line in mount_locs:
|
||||
m = re.search('^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line)
|
||||
m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line)
|
||||
devpth = m.group(1)
|
||||
mount_point = m.group(2)
|
||||
fs_type = m.group(3)
|
||||
|
@ -1,4 +1,5 @@
|
||||
import sys, os
|
||||
import os
|
||||
import sys
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
|
6
setup.py
6
setup.py
@ -63,7 +63,7 @@ def tiny_p(cmd, capture=True):
|
||||
(out, err) = sp.communicate()
|
||||
ret = sp.returncode # pylint: disable=E1101
|
||||
if ret not in [0]:
|
||||
raise RuntimeError("Failed running %s [rc=%s] (%s, %s)"
|
||||
raise RuntimeError("Failed running %s [rc=%s] (%s, %s)"
|
||||
% (cmd, ret, out, err))
|
||||
return (out, err)
|
||||
|
||||
@ -102,7 +102,7 @@ class InitsysInstallData(install):
|
||||
" specifying a init system!") % (", ".join(INITSYS_TYPES)))
|
||||
elif self.init_system:
|
||||
self.distribution.data_files.append(
|
||||
(INITSYS_ROOTS[self.init_system],
|
||||
(INITSYS_ROOTS[self.init_system],
|
||||
INITSYS_FILES[self.init_system]))
|
||||
# Force that command to reinitalize (with new file list)
|
||||
self.distribution.reinitialize_command('install_data', True)
|
||||
@ -134,7 +134,7 @@ setuptools.setup(name='cloud-init',
|
||||
[f for f in glob('doc/examples/seed/*') if is_f(f)]),
|
||||
],
|
||||
install_requires=read_requires(),
|
||||
cmdclass = {
|
||||
cmdclass={
|
||||
# Use a subclass for install that handles
|
||||
# adding on the right init system configuration files
|
||||
'install': InitsysInstallData,
|
||||
|
@ -285,7 +285,7 @@ class TestConfigDriveDataSource(MockerTestCase):
|
||||
self.assertEqual(["/dev/vdb", "/dev/zdd"],
|
||||
ds.find_candidate_devs())
|
||||
|
||||
# verify that partitions are considered, but only if they have a label.
|
||||
# verify that partitions are considered, that have correct label.
|
||||
devs_with_answers = {"TYPE=vfat": ["/dev/sda1"],
|
||||
"TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]}
|
||||
self.assertEqual(["/dev/vdb3"],
|
||||
|
@ -119,7 +119,8 @@ class TestMAASDataSource(mocker.MockerTestCase):
|
||||
mock_request(url, headers=None, timeout=mocker.ANY,
|
||||
data=mocker.ANY, sec_between=mocker.ANY,
|
||||
ssl_details=mocker.ANY, retries=mocker.ANY,
|
||||
headers_cb=my_headers_cb)
|
||||
headers_cb=my_headers_cb,
|
||||
exception_cb=mocker.ANY)
|
||||
resp = valid.get(key)
|
||||
self.mocker.result(util.StringResponse(resp))
|
||||
self.mocker.replay()
|
||||
|
@ -33,6 +33,14 @@ class TestEc2Util(helpers.TestCase):
|
||||
userdata = eu.get_instance_userdata(self.VERSION, retries=0)
|
||||
self.assertEquals('', userdata)
|
||||
|
||||
@hp.activate
|
||||
def test_userdata_fetch_fail_server_not_found(self):
|
||||
hp.register_uri(hp.GET,
|
||||
'http://169.254.169.254/%s/user-data' % (self.VERSION),
|
||||
status=404)
|
||||
userdata = eu.get_instance_userdata(self.VERSION)
|
||||
self.assertEquals('', userdata)
|
||||
|
||||
@hp.activate
|
||||
def test_metadata_fetch_no_keys(self):
|
||||
base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION)
|
||||
|
@ -1,15 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
ci_files='cloudinit/*.py cloudinit/config/*.py'
|
||||
test_files=$(find tests -name "*.py")
|
||||
def_files="$ci_files $test_files"
|
||||
|
||||
if [ $# -eq 0 ]; then
|
||||
files=( )
|
||||
for f in $def_files; do
|
||||
[ -f "$f" ] || { echo "failed, $f not a file" 1>&2; exit 1; }
|
||||
files[${#files[@]}]=${f}
|
||||
done
|
||||
files=( bin/cloud-init $(find * -name "*.py" -type f) )
|
||||
else
|
||||
files=( "$@" );
|
||||
fi
|
||||
@ -44,4 +36,3 @@ cmd=(
|
||||
echo -e "\nRunning 'cloudinit' pep8:"
|
||||
echo "${cmd[@]}"
|
||||
"${cmd[@]}"
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ $# -eq 0 ]; then
|
||||
files=( $(find * -name "*.py" -type f) )
|
||||
files=( bin/cloud-init $(find * -name "*.py" -type f) )
|
||||
else
|
||||
files=( "$@" );
|
||||
fi
|
||||
@ -16,6 +16,7 @@ cmd=(
|
||||
--rcfile=$RC_FILE
|
||||
--disable=R
|
||||
--disable=I
|
||||
--dummy-variables-rgx="_"
|
||||
"${files[@]}"
|
||||
)
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user