Copied devstack from designate.

This commit is contained in:
Pino de Candia 2018-01-26 14:41:34 -06:00
parent b1ed741394
commit 6a546abc52
19 changed files with 2065 additions and 31 deletions

20
devstack/README.rst Normal file
View File

@ -0,0 +1,20 @@
====================
Enabling in Devstack
====================
**WARNING**: the stack.sh script must be run in a disposable VM that is not
being created automatically, see the README.md file in the "devstack"
repository. See contrib/vagrant to create a vagrant VM.
1. Download DevStack::
git clone https://git.openstack.org/openstack-dev/devstack.git
cd devstack
2. Add this repo as an external repository::
> cat local.conf
[[local|localrc]]
enable_plugin designate https://git.openstack.org/openstack/designate
3. run ``stack.sh``

318
devstack/exercise.sh Normal file
View File

@ -0,0 +1,318 @@
#!/usr/bin/env bash
# **designate.sh**
# Simple Tests to verify designate is running
echo "*********************************************************************"
echo "Begin DevStack Exercise: $0"
echo "*********************************************************************"
# This script exits on an error so that errors don't compound and you see
# only the first error that occurred.
set -o errexit
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
# Keep track of the current directory
SCRIPT_DIR=$(cd $(dirname "$0") && pwd)
DEVSTACK_DIR=$(cd $SCRIPT_DIR/../..; pwd)/devstack
if [ -x "$HOME/devstack/stack.sh" ]; then
DEVSTACK_DIR=$HOME/devstack/
fi
# Import common functions
source $DEVSTACK_DIR/functions
# Import configuration
source $DEVSTACK_DIR/openrc admin admin
# Import exercise configuration
source $DEVSTACK_DIR/exerciserc
# Skip if designate is not enabled
is_service_enabled designate || exit 55
# Import settings + designate library
source $SCRIPT_DIR/plugin.sh
# Settings
# ========
source $SCRIPT_DIR/settings
# Used with dig to look up in DNS
DIG_TIMEOUT=30
if [ "$DESIGNATE_BACKEND_DRIVER" == "akamai" ]; then
# Akamai can be slow to propagate changes out
DIG_TIMEOUT=300
fi
# used with dig to look up in DNS
DIG_FLAGS="-p $DESIGNATE_SERVICE_PORT_DNS @$DESIGNATE_SERVICE_HOST"
# used with dig to do an AXFR against MDNS
DIG_AXFR_FLAGS="-p $DESIGNATE_SERVICE_PORT_MDNS @$DESIGNATE_SERVICE_HOST AXFR +tcp +nocmd"
# Functions
# =========
function cleanup {
# Try to cleanup any domains, this is important for backends like
# Akamai/Dyn, where state is not fully reset between test runs.
source $DEVSTACK_DIR/openrc admin admin
designate --all-tenants domain-list -f csv | awk 'BEGIN { FS = "," } ; {print $1}' | \
tail -n+2 | xargs --no-run-if-empty -n1 designate --all-tenants domain-delete
}
trap cleanup EXIT
function ensure_record_present {
local record_name=$1
local record_type=$2
local record_value=$3
if [ "$DESIGNATE_BACKEND_DRIVER" = "fake" ] ; then
# if the backend is fake, there will be no actual DNS records
return 0
fi
if ! timeout $DIG_TIMEOUT sh -c "while ! dig +short $DIG_FLAGS $record_name $record_type | grep \"$record_value\"; do sleep 1; done"; then
die $LINENO "Error: record $record_name ($record_type) not found in DNS"
fi
# Display for debugging
dig $DIG_FLAGS $record_name $record_type
return 0
}
function ensure_record_absent {
local record_name=$1
local record_type=$2
local record_value=$3
if [ "$DESIGNATE_BACKEND_DRIVER" = "fake" ] ; then
# if the backend is fake, there will be no actual DNS records
return 0
fi
if ! timeout $DIG_TIMEOUT sh -c "while dig +short $DIG_FLAGS $record_name $record_type | grep \"$record_value\"; do sleep 1; done"; then
# Display for debugging
dig $DIG_FLAGS $record_name $record_type
die $LINENO "Error: record $record_name ($record_type) found in DNS, should be absent"
fi
return 0
}
# do an AXFR request to MDNS
# if it does not match the expected value, give an error
function verify_axfr_in_mdns {
# Display for debugging
dig $DIG_AXFR_FLAGS "$1"
if dig $DIG_AXFR_FLAGS "$1"; then
if [ -n "$2" ] ; then
local axfr_records=$(dig $DIG_AXFR_FLAGS "$1" | grep "$1" | wc -l)
if [ "$axfr_records" = "$2" ] ; then
return 0
else
die $LINENO "Error: AXFR to MDNS did not return the expected number of records"
fi
fi
return 0
else
die $LINENO "Error: AXFR to MDNS did not return a correct response"
fi
}
# get the domain id (uuid) given the domain name
# if REQUIRED is set, die with an error if name not found
function get_domain_id {
local domain_name=$1
local required=$2
local domain_id=$(designate domain-list | egrep " $domain_name " | get_field 1)
if [ "$required" = "1" ] ; then
die_if_not_set $LINENO domain_id "Failure retrieving DOMAIN_ID"
fi
echo "$domain_id"
}
# get the domain_name given the id
function get_domain_name {
designate domain-list | grep "$1" | get_field 2
}
# if the given domain does not exist, it will be created
# the domain_id of the domain will be returned
function get_or_create_domain_id {
local domainid=$(get_domain_id "$1")
if [[ -z "$domainid" ]]; then
designate domain-create --name $1 --email admin@devstack.org --ttl 86400 --description "domain $1" 1>&2
domainid=$(designate domain-list | grep "$1" | get_field 1)
fi
echo $domainid
}
# get the record id (uuid) given the record name and domain id
# if REQUIRED is set, die with an error if name not found
function get_record_id {
local domain_id=$1
local record_name=$2
local record_type=$3
local required=$4
local record_id=$(designate record-list $domain_id | egrep " $record_name " | egrep " $record_type " | get_field 1)
if [ "$required" = "1" ] ; then
die_if_not_set $LINENO record_id "Failure retrieving RECORD_ID"
fi
echo "$record_id"
}
# Testing Servers
# ===============
designate server-list
# NUMBER_OF_RECORDS keeps track of the records we need to get for AXFR
# We start with the number of NS lines returned from server list
# (Header line makes up for SOA + Number of NS record lines)
NUMBER_OF_RECORDS=$(designate server-list -f csv | wc -l)
# Add 1 extra to account for the additional SOA at the end of the AXFR
((NUMBER_OF_RECORDS+=1))
# Testing Domains
# ===============
# List domains
designate domain-list
# Create random domain name
DOMAIN_NAME="exercise-$(openssl rand -hex 4).com."
# Create the domain
designate domain-create --name $DOMAIN_NAME --email devstack@example.org
DOMAIN_ID=$(get_domain_id $DOMAIN_NAME 1)
# Fetch the domain
designate domain-get $DOMAIN_ID
# List the nameservers hosting the domain
designate domain-servers-list $DOMAIN_ID
# Testing Records
# ===============
# Create random record name
A_RECORD_NAME="$(openssl rand -hex 4).${DOMAIN_NAME}"
# Create an A record
designate record-create $DOMAIN_ID --name $A_RECORD_NAME --type A --data 127.0.0.1
((NUMBER_OF_RECORDS++))
A_RECORD_ID=$(get_record_id $DOMAIN_ID $A_RECORD_NAME A)
# Fetch the record
designate record-get $DOMAIN_ID $A_RECORD_ID
# Verify the record is published in DNS
ensure_record_present $A_RECORD_NAME A 127.0.0.1
# -----
# Create random record name
AAAA_RECORD_NAME="$(openssl rand -hex 4).${DOMAIN_NAME}"
# Create an AAAA record
designate record-create $DOMAIN_ID --name $AAAA_RECORD_NAME --type AAAA --data "2607:f0d0:1002:51::4"
((NUMBER_OF_RECORDS++))
AAAA_RECORD_ID=$(get_record_id $DOMAIN_ID $AAAA_RECORD_NAME AAAA)
# Fetch the record
designate record-get $DOMAIN_ID $AAAA_RECORD_ID
# Verify the record is published in DNS
ensure_record_present $AAAA_RECORD_NAME AAAA 2607:f0d0:1002:51::4
# -----
# Create a MX record
designate record-create $DOMAIN_ID --name $DOMAIN_NAME --type MX --priority 5 --data "mail.example.com."
((NUMBER_OF_RECORDS++))
MX_RECORD_ID=$(get_record_id $DOMAIN_ID $DOMAIN_NAME MX)
# Fetch the record
designate record-get $DOMAIN_ID $MX_RECORD_ID
# Verify the record is published in DNS
ensure_record_present $DOMAIN_NAME MX "5 mail.example.com."
# -----
# Create a SRV record
designate record-create $DOMAIN_ID --name _sip._tcp.$DOMAIN_NAME --type SRV --priority 10 --data "5 5060 sip.example.com."
((NUMBER_OF_RECORDS++))
SRV_RECORD_ID=$(get_record_id $DOMAIN_ID _sip._tcp.$DOMAIN_NAME SRV)
# Fetch the record
designate record-get $DOMAIN_ID $SRV_RECORD_ID
# Verify the record is published in DNS
ensure_record_present _sip._tcp.$DOMAIN_NAME SRV "10 5 5060 sip.example.com."
# -----
# Create random record name
CNAME_RECORD_NAME="$(openssl rand -hex 4).${DOMAIN_NAME}"
# Create a CNAME record
designate record-create $DOMAIN_ID --name $CNAME_RECORD_NAME --type CNAME --data $DOMAIN_NAME
((NUMBER_OF_RECORDS++))
CNAME_RECORD_ID=$(get_record_id $DOMAIN_ID $CNAME_RECORD_NAME CNAME)
# Fetch the record
designate record-get $DOMAIN_ID $CNAME_RECORD_ID
# Verify the record is published in DNS
ensure_record_present $CNAME_RECORD_NAME CNAME $DOMAIN_NAME
# -----
# List Records
designate record-list $DOMAIN_ID
# Send an AXFR to MDNS and check for the records returned
verify_axfr_in_mdns $DOMAIN_NAME $NUMBER_OF_RECORDS
# -----
# Delete a Record
designate record-delete $DOMAIN_ID $CNAME_RECORD_ID
# List Records
designate record-list $DOMAIN_ID
# Fetch the record - should be gone
designate record-get $DOMAIN_ID $CNAME_RECORD_ID || echo "good - record was removed"
# verify not in DNS anymore
ensure_record_absent $CNAME_RECORD_NAME CNAME $DOMAIN_NAME
# Testing Domains Delete
# ======================
# Delete the domain
designate domain-delete $DOMAIN_ID
# Fetch the domain - should be gone
designate domain-get $DOMAIN_ID || echo "good - domain was removed"
set +o xtrace
echo "*********************************************************************"
echo "SUCCESS: End DevStack Exercise: $0"
echo "*********************************************************************"

View File

@ -0,0 +1,24 @@
#!/bin/bash
set -ex
pushd $BASE/new/devstack
DEVSTACK_GATE_DESIGNATE_DRIVER=${DEVSTACK_GATE_DESIGNATE_DRIVER:-powerdns}
export KEEP_LOCALRC=1
export ENABLED_SERVICES=designate,designate-api,designate-central,designate-sink,designate-mdns,designate-pool-manager,designate-zone-manager
echo "DESIGNATE_SERVICE_PORT_DNS=5322" >> $BASE/new/devstack/localrc
echo "DESIGNATE_BACKEND_DRIVER=$DEVSTACK_GATE_DESIGNATE_DRIVER" >> $BASE/new/devstack/localrc
echo "DESIGNATE_PERIODIC_RECOVERY_INTERVAL=20" >> $BASE/new/devstack/localrc
echo "DESIGNATE_PERIODIC_SYNC_INTERVAL=20" >> $BASE/new/devstack/localrc
# Pass through any DESIGNATE_ env vars to the localrc file
env | grep -E "^DESIGNATE_" >> $BASE/new/devstack/localrc || :
popd
# Run DevStack Gate
$BASE/new/devstack-gate/devstack-vm-gate.sh

View File

@ -0,0 +1,50 @@
#!/bin/bash
set -ex
# Run the Designate DevStack exercises
$BASE/new/designate/devstack/exercise.sh
# Import functions needed for the below workaround
source $BASE/new/devstack/functions
# Workaround for Tempest architectural changes
# See bugs:
# 1) https://bugs.launchpad.net/manila/+bug/1531049
# 2) https://bugs.launchpad.net/tempest/+bug/1524717
TEMPEST_CONFIG=$BASE/new/tempest/etc/tempest.conf
ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-"admin"}
ADMIN_PASSWORD=${ADMIN_PASSWORD:-"secretadmin"}
sudo chown -R $USER:stack $BASE/new/tempest
sudo chown -R $USER:stack $BASE/data/tempest
iniset $TEMPEST_CONFIG auth admin_username ${ADMIN_USERNAME:-"admin"}
iniset $TEMPEST_CONFIG auth admin_password $ADMIN_PASSWORD
iniset $TEMPEST_CONFIG auth admin_tenant_name $ADMIN_TENANT_NAME
iniset $TEMPEST_CONFIG auth admin_domain_name ${ADMIN_DOMAIN_NAME:-"Default"}
iniset $TEMPEST_CONFIG identity username ${TEMPEST_USERNAME:-"demo"}
iniset $TEMPEST_CONFIG identity password $ADMIN_PASSWORD
iniset $TEMPEST_CONFIG identity tenant_name ${TEMPEST_TENANT_NAME:-"demo"}
iniset $TEMPEST_CONFIG identity alt_username ${ALT_USERNAME:-"alt_demo"}
iniset $TEMPEST_CONFIG identity alt_password $ADMIN_PASSWORD
iniset $TEMPEST_CONFIG identity alt_tenant_name ${ALT_TENANT_NAME:-"alt_demo"}
iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4
iniset $TEMPEST_CONFIG validation ssh_timeout $BUILD_TIMEOUT
iniset $TEMPEST_CONFIG validation network_for_ssh ${PRIVATE_NETWORK_NAME:-"private"}
# Run the Designate Tempest tests
sudo BASE=$BASE ./run_tempest_tests.sh
# TODO(pglass) - update cli tests to look in the [auth] section for admin creds
iniset $TEMPEST_CONFIG identity admin_username ${ADMIN_USERNAME:-"admin"}
iniset $TEMPEST_CONFIG identity admin_password $ADMIN_PASSWORD
iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME
iniset $TEMPEST_CONFIG identity admin_domain_name ${ADMIN_DOMAIN_NAME:-"Default"}
# must match the dir where `openstack` is installed
DESIGNATE_CLI_DIR=${DESIGNATE_CLI_DIR:-"$BASE/new/python-designateclient"}
iniset $TEMPEST_CONFIG designateclient directory "$DESIGNATE_CLI_DIR/.venv/bin"
# Run the python-designateclient functional tests
sudo BASE=$BASE ./run_cli_tests.sh

View File

@ -0,0 +1,28 @@
#!/bin/bash -e
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
DESIGNATE_CLI_DIR=${DESIGNATE_CLI_DIR:-"$BASE/new/python-designateclient"}
TEMPEST_DIR=${TEMPEST_DIR:-"$BASE/new/tempest"}
export TEMPEST_CONFIG=$TEMPEST_DIR/etc/tempest.conf
pushd $DESIGNATE_CLI_DIR
# we need the actual openstack executable which is not installed by tox
virtualenv "$DESIGNATE_CLI_DIR/.venv"
source "$DESIGNATE_CLI_DIR/.venv/bin/activate"
pip install python-openstackclient
pip install .
tox -e functional -- --concurrency 4
popd

View File

@ -0,0 +1,32 @@
#!/bin/bash -e
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# How many seconds to wait for the API to be responding before giving up
API_RESPONDING_TIMEOUT=20
if ! timeout ${API_RESPONDING_TIMEOUT} sh -c "while ! curl -s http://127.0.0.1:9001/ 2>/dev/null | grep -q 'v2' ; do sleep 1; done"; then
echo "The Designate API failed to respond within ${API_RESPONDING_TIMEOUT} seconds"
exit 1
fi
echo "Successfully contacted the Designate API"
# Where Designate and Tempest code lives
DESIGNATE_DIR=${DESIGNATE_DIR:-"$BASE/new/designate"}
TEMPEST_DIR=${TEMPEST_DIR:-"$BASE/new/tempest"}
pushd $DESIGNATE_DIR
export TEMPEST_CONFIG=$TEMPEST_DIR/etc/tempest.conf
tox -e functional -- --concurrency 4
popd

664
devstack/networking_test.py Normal file
View File

@ -0,0 +1,664 @@
#!/usr/bin/env python
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Author: Federico Ceratto <federico.ceratto@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Network simulator
~~~~~~~~~~~~~~~~~
Perform end-to-end stress tests on Designate on a simulated network
that displays high latency and packet loss (almost like real ones)
WARNING: this script is to be run on a disposable devstack VM
It requires sudo and it will configure /sbin/tc
Usage:
cd <designate_repo>/contrib/vagrant
./setup_ubuntu_devstack
vagrant ssh ubuntu
source ~/devstack/openrc
/opt/stack/designate/devstack/networking_test.py
Monitor the logfiles
"""
from argparse import ArgumentParser
from collections import OrderedDict
from itertools import product
from subprocess import check_output
from subprocess import CalledProcessError
from tempfile import NamedTemporaryFile
from threading import Thread
import json
import logging
import os
import random
import string
import time
import sys
import dns
import dns.resolver
log = logging.getLogger()
tc_path = '/sbin/tc'
sudo_path = '/usr/bin/sudo'
iptables_restore_path = '/sbin/iptables-restore'
designate_cli_path = '/usr/local/bin/designate'
openstack_cli = 'openstack'
def gen_random_name(l):
return "".join(
random.choice(string.ascii_lowercase + string.digits)
for n in range(l)
)
def parse_args():
ap = ArgumentParser()
ap.add_argument('-d', '--debug', action='store_true')
return ap.parse_args()
def run_shell(cmd, env=None):
log.debug(" running %s" % cmd)
out = check_output(cmd, env=env, shell=True, executable='/bin/bash')
return [line.rstrip() for line in out.splitlines()]
class DesignateCLI(object):
"""Designate CLI runner
"""
def __init__(self):
"""Setup CLI handler"""
self._cli_env = {}
for k, v in sorted(os.environ.items()):
if k.startswith('OS_'):
log.debug("%s: %s", k, v)
self._cli_env[k] = v
def setup_quota(self, quota):
"""Setup quota
"""
user_id = self.run_json("token issue")["user_id"]
cmd = """quota-update
--domains %(quota)d
--domain-recordsets %(quota)d
--recordset-records %(quota)d
--domain-records %(quota)d
%(user_id)s """
cmd = ' '.join(cmd.split())
quotas = self.run_designate_cli_table(cmd % dict(quota=quota,
user_id=user_id))
assert quotas['domain_records'] == str(quota)
def run(self, cmd):
"""Run a openstack client command
"""
return run_shell("%s %s" % (openstack_cli, cmd),
env=self._cli_env)
def run_json(self, cmd):
"""Run a openstack client command using JSON output
:returns: dict
:raises CalledProcessError:
"""
cmd = "%s %s -f json" % (openstack_cli, cmd)
log.debug(" running %s" % cmd)
out = check_output(cmd, env=self._cli_env, shell=True,
executable='/bin/bash')
return json.loads(out)
def runcsv(self, cmd):
"""Run a command using the -f csv flag, parse the output
and return a list of dicts
"""
cmdout = self.run(cmd + " -f csv")
header = [item.strip('"') for item in cmdout[0].split(',')]
output_rows = []
for line in cmdout[1:]:
rawvalues = line.split(',')
d = OrderedDict()
for k, v in zip(header, rawvalues):
if v.startswith('"') or v.endswith('"'):
v = v.strip('"')
else:
try:
v = int(v)
except ValueError:
v = float(v)
d[k] = v
output_rows.append(d)
return output_rows
def run_designate_cli_table(self, cmd):
"""Run a command in the designate cli expecting a table to be
returned and parse it into a dict
"""
cmdout = run_shell("%s %s" % (designate_cli_path, cmd),
env=self._cli_env)
out = {}
try:
for line in cmdout:
if not line.startswith('| '):
continue
if not line.endswith(' |'):
continue
k = line.split('|')[1].strip()
v = line.split('|')[2].strip()
out[k] = v
except Exception:
log.error("Unable to parse output into a dict:")
for line in out:
log.error(line)
log.error("-----------------------------------")
raise
return out
class TrafficControl(object):
"""Configure Linux Traffic Control to simulate a real network
"""
protocol_marks = dict(
mysql=1,
dns_udp=2,
dns_tcp=3,
)
def run_tc(self, cmd):
return run_shell("%s %s %s" % (sudo_path, tc_path, cmd))
def _apply_iptables_conf(self, ipt_conf):
tf = NamedTemporaryFile()
tf.file.write(ipt_conf)
tf.file.flush()
run_shell("%s %s %s" % (sudo_path, iptables_restore_path, tf.name))
tf.file.close()
def cleanup_iptables_marking(self):
# Currently unneeded
ipt_conf = """
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
COMMIT
*mangle
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
COMMIT
"""
self._apply_iptables_conf(ipt_conf)
def setup_iptables_marking(self):
# Currently unneeded
ipt_conf = """
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
COMMIT
*mangle
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
-A PREROUTING -i lo -p tcp -m tcp --dport 3306 -j MARK --set-xmark %(mysql)s
-A PREROUTING -i lo -p tcp -m tcp --sport 3306 -j MARK --set-xmark %(mysql)s
-A PREROUTING -i lo -p tcp -m tcp --dport 53 -j MARK --set-xmark %(dns_tcp)s
-A PREROUTING -i lo -p tcp -m tcp --sport 53 -j MARK --set-xmark %(dns_tcp)s
-A PREROUTING -i lo -p udp -m udp --dport 53 -j MARK --set-xmark %(dns_udp)s
-A PREROUTING -i lo -p udp -m udp --sport 53 -j MARK --set-xmark %(dns_udp)s
COMMIT
"""
marks = dict((k, "0x%d/0xffffffff" % v)
for k, v in self.protocol_marks.items())
ipt_conf = ipt_conf % marks
self._apply_iptables_conf(ipt_conf)
def cleanup_tc(self):
"""Clean up tc conf
"""
out = self.run_tc('qdisc show dev lo')
if out:
log.debug("Cleaning up tc conf")
self.run_tc('qdisc del dev lo root')
else:
log.debug("No tc conf to be cleaned up")
def setup_tc(self, dns_latency_ms=0, dns_packet_loss_perc=0,
db_latency_ms=1, db_packet_loss_perc=1):
"""Setup traffic control
"""
self.cleanup_tc()
# Create HTB at the root
self.run_tc("qdisc add dev lo handle 1: root htb")
self.run_tc("class add dev lo parent 1: classid 1:5 htb rate 1000Mbps")
self.run_tc("class add dev lo parent 1: classid 1:7 htb rate 1000Mbps")
# TCP DNS
self._setup_tc_block('1:8', 'tcp', 53, dns_latency_ms,
dns_packet_loss_perc)
# UDP DNS
self._setup_tc_block('1:9', 'udp', 53, dns_latency_ms,
dns_packet_loss_perc)
# TCP mDNS
self._setup_tc_block('1:10', 'tcp', 5354, dns_latency_ms,
dns_packet_loss_perc)
# UDP mDNS
self._setup_tc_block('1:11', 'udp', 5354, dns_latency_ms,
dns_packet_loss_perc)
# MySQL
self._setup_tc_block('1:12', 'tcp', 3306, 1, 1)
# RabbitMQ port: 5672
self._setup_tc_block('1:13', 'tcp', 5672, 1, 1)
# MemcacheD
self._setup_tc_block('1:14', 'tcp', 11211, 1, 1)
def _setup_tc_block(self, class_id, proto, port, latency_ms,
packet_loss_perc):
"""Setup tc htb entry, netem and filter"""
assert proto in ('tcp', 'udp')
cmd = "class add dev lo parent 1: classid %s htb rate 1000Mbps" % \
class_id
self.run_tc(cmd)
self._setup_netem(class_id, latency_ms, latency_ms, packet_loss_perc)
self._setup_filter(proto, 'sport %d' % port, class_id)
self._setup_filter(proto, 'dport %d' % port, class_id)
def _setup_netem(self, classid, latency1, latency2, loss_perc):
"""Setup tc netem
"""
# This could be done with the FireQOS tool instead:
# https://firehol.org/tutorial/fireqos-new-user/
cmd = ("qdisc add dev lo parent {cid} netem"
" corrupt 0.1%"
" delay {lat1}ms {lat2}ms distribution normal"
" duplicate 0.1%"
" loss {packet_loss_perc}%"
" reorder 25% 50%")
cmd = cmd.format(cid=classid, lat1=latency1, lat2=latency2,
packet_loss_perc=loss_perc)
self.run_tc(cmd)
def _setup_filter(self, protocol, filter, flowid):
"""Setup tc filter
"""
protocol_nums = dict(tcp=6, udp=17)
pnum = protocol_nums[protocol]
cmd = "filter add dev lo protocol ip prio 1 u32 match ip protocol " \
"%(pnum)d 0xff match ip %(filter)s 0xffff flowid %(flowid)s"
self.run_tc(cmd % dict(pnum=pnum, filter=filter, flowid=flowid))
class Digger(object):
def __init__(self):
self.ns_ipaddr = self.get_nameserver_ipaddr()
self._setup_resolver()
self.max_probes_per_second = 30
self.reset_goals()
@property
def prober_is_running(self):
try:
return self._prober_thread.is_alive()
except AttributeError:
return False
def _setup_resolver(self, timeout=1):
resolver = dns.resolver.Resolver(configure=False)
resolver.timeout = timeout
resolver.lifetime = timeout
resolver.nameservers = [self.ns_ipaddr]
self.resolver = resolver
def get_nameserver_ipaddr(self):
# FIXME: find a better way to do this
out = run_shell('sudo netstat -nlpt | grep pdns_server')
ipaddr = out[0].split()[3]
ipaddr = ipaddr.split(':', 1)[0]
log.debug("Resolver ipaddr: %s" % ipaddr)
return ipaddr
def query_a_record(self, record_name, timeout=3):
try:
answer = self.resolver.query(record_name, 'A')
if answer.rrset:
return answer.rrset[0].address
except Exception:
return None
def query_soa(self, zone_name, timeout=3):
try:
soa_answer = self.resolver.query(zone_name, 'SOA')
soa_serial = soa_answer[0].serial
return soa_serial
except Exception:
return None
def reset_goals(self):
assert not self.prober_is_running
self.goals = set()
self.summary = dict(
success_cnt=0,
total_time_to_success=0,
)
def add_goal(self, goal):
self.goals.add(goal + (time.time(), ))
def _print_summary(self, final=True):
"""Log out a summary of the current run
"""
remaining = len(self.goals)
success_cnt = self.summary['success_cnt']
try:
avg_t = (self.summary['total_time_to_success'] / success_cnt)
avg_t = ", avg time to success: %2.3fs" % avg_t
except ZeroDivisionError:
avg_t = ''
logf = log.info if final else log.debug
logf(" test summary: success %3d, remaining %3d %s" % (
success_cnt, remaining, avg_t))
def _probe_resolver(self):
"""Probe the local resolver, report achieved goals
"""
log.debug("Starting prober")
assert self.prober_is_running is True
self._progress_report_time = 0
now = time.time()
while (self.goals or not self.prober_can_stop) and \
now < self.prober_timeout_time:
for goal in tuple(self.goals):
goal_type = goal[0]
if goal_type == 'zone_serial_ge':
goal_type, zone_name, serial, t0 = goal
actual_serial = self.query_soa(zone_name)
if actual_serial and actual_serial >= serial:
deltat = time.time() - t0
log.debug(" reached %s in %.3fs" % (repr(goal),
deltat))
self.goals.discard(goal)
self.summary['success_cnt'] += 1
self.summary['total_time_to_success'] += deltat
elif goal_type == 'record_a':
goal_type, record_name, ipaddr, t0 = goal
actual_ipaddr = self.query_a_record(record_name)
if actual_ipaddr == ipaddr:
deltat = time.time() - t0
log.debug(" reached %s in %.3fs" % (repr(goal),
deltat))
self.goals.discard(goal)
self.summary['success_cnt'] += 1
self.summary['total_time_to_success'] += deltat
else:
log.error("Unknown goal %r" % goal)
if time.time() < self.prober_timeout_time:
time.sleep(1.0 / self.max_probes_per_second)
else:
break
if time.time() > self._progress_report_time:
self._print_summary(final=False)
self._progress_report_time = time.time() + 10
time.sleep(1.0 / self.max_probes_per_second)
now = time.time()
if now > self.prober_timeout_time:
log.info("prober timed out after %d s" % (
now - self.prober_start_time))
self._print_summary()
def probe_resolver(self, timeout=600):
"""Probe the local resolver in a dedicated thread until all
goals have been achieved or timeout occours
"""
assert not self.prober_is_running
self.prober_can_stop = False
self.prober_start_time = time.time()
self.prober_timeout_time = self.prober_start_time + timeout
self._prober_thread = Thread(target=self._probe_resolver)
self._prober_thread.daemon = True
self._prober_thread.start()
def stop_prober(self):
self.prober_can_stop = True
self.prober_timeout_time = 0
def wait_on_prober(self):
self.prober_can_stop = True
self._prober_thread.join()
assert self.prober_is_running is False
def list_zones(cli):
zones = [z["name"] for z in cli.run_json('zone list')]
log.debug("Found zones: %r", zones)
return zones
def delete_zone_by_name(cli, zn, ignore_missing=False):
if ignore_missing:
# Return if the zone is not present
zones = list_zones(cli)
if zn not in zones:
return
cli.run('zone delete %s' % zn)
def create_and_probe_a_record(cli, digger, zone_id, record_name, ipaddr):
cli.run_json('recordset create %s %s --type A --records %s' %
(zone_id, record_name, ipaddr))
digger.add_goal(('record_a', record_name, ipaddr))
def delete_all_zones(cli):
zones = list_zones(cli)
log.info("%d zones to be deleted" % len(zones))
for zone in zones:
log.info("Deleting %s", zone)
delete_zone_by_name(cli, zone)
def create_zone_with_retry_on_duplicate(cli, digger, zn, timeout=300,
dig=False):
"""Create a zone, retry when a duplicate is found,
optionally monitor for propagation
:returns: dict
"""
t0 = time.time()
timeout_time = timeout + t0
created = False
while time.time() < timeout_time:
try:
output = cli.run_json(
"zone create %s --email devstack@example.org" % zn)
created = True
log.debug(" zone created after %f" % (time.time() - t0))
break
except CalledProcessError as e:
if e.output == 'Duplicate Zone':
# dup zone, sleep and retry
time.sleep(1)
pass
elif e.output == 'over_quota':
raise RuntimeError('over_quota')
else:
raise
assert output['serial']
if not created:
raise RuntimeError('timeout')
if dig:
digger.reset_goals()
digger.add_goal(('zone_serial_ge', zn, int(output['serial'])))
digger.probe_resolver(timeout=timeout)
digger.wait_on_prober()
return output
def test_create_list_delete_loop(cli, digger, cycles_num, zn='cld.org.'):
"""Create, list, delete a zone in a loop
Monitor for propagation time
"""
log.info("Test zone creation, list, deletion")
delete_zone_by_name(cli, zn, ignore_missing=True)
for cycle_cnt in range(cycles_num):
zone = create_zone_with_retry_on_duplicate(cli, digger, zn, dig=True)
zones = cli.runcsv('domain-list')
assert any(z['name'] == zn for z in zones), zones
cli.run('domain-delete %s' % zone['id'])
zones = cli.runcsv('domain-list')
assert not any(z['name'] == zn for z in zones), zones
log.info("done")
def test_one_big_zone(cli, digger, zone_size):
"""Create a zone with many records,
perform CRUD on records and monitor for propagation time
"""
t0 = time.time()
zn = 'bigzone-%s.org.' % gen_random_name(12)
delete_zone_by_name(cli, zn, ignore_missing=True)
zone = create_zone_with_retry_on_duplicate(cli, digger, zn, dig=True)
assert 'serial' in zone, zone
assert 'id' in zone, zone
try:
digger.reset_goals()
digger.add_goal(('zone_serial_ge', zn, int(zone['serial'])))
digger.probe_resolver(timeout=60)
record_creation_threads = []
for record_num in range(zone_size):
record_name = "rec%d" % record_num
ipaddr = "127.%d.%d.%d" % (
(record_num >> 16) % 256,
(record_num >> 8) % 256,
record_num % 256,
)
t = Thread(target=create_and_probe_a_record,
args=(cli, digger, zone['id'], record_name, ipaddr))
t.start()
record_creation_threads.append(t)
time.sleep(.5)
digger.wait_on_prober()
except KeyboardInterrupt:
log.info("Exiting on keyboard")
raise
finally:
digger.stop_prober()
delete_zone_by_name(cli, zone['name'])
log.info("Done in %ds" % (time.time() - t0))
def test_servers_are_configured(cli):
servers = cli.runcsv('server-list')
assert servers[0]['name'] == 'ns1.devstack.org.'
log.info("done")
def test_big_zone(args, cli, digger, tc):
log.info("Test creating many records in one big zone")
dns_latencies_ms = (1, 100)
dns_packet_losses = (1, 15)
zone_size = 20
for dns_latency_ms, dns_packet_loss_perc in product(dns_latencies_ms,
dns_packet_losses):
tc.cleanup_tc()
tc.setup_tc(dns_latency_ms=dns_latency_ms,
dns_packet_loss_perc=dns_packet_loss_perc)
log.info("Running test with DNS latency %dms packet loss %d%%" % (
dns_latency_ms, dns_packet_loss_perc))
test_one_big_zone(cli, digger, zone_size)
def run_tests(args, cli, digger, tc):
"""Run all integration tests
"""
# test_servers_are_configured(cli)
# test_create_list_delete_loop(cli, digger, 10)
test_big_zone(args, cli, digger, tc)
def main():
args = parse_args()
loglevel = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(
level=loglevel,
format='%(relativeCreated)8d %(levelname)s %(funcName)20s %(message)s',
)
cli = DesignateCLI()
cli.setup_quota(10000)
digger = Digger()
delete_all_zones(cli)
tc = TrafficControl()
tc.cleanup_tc()
try:
run_tests(args, cli, digger, tc)
finally:
tc.cleanup_tc()
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,3 @@
#!/bin/bash
IF=lo
watch -n1 "tc -p -s -d qdisc show dev $IF; echo; tc class show dev $IF; echo; tc filter show dev $IF"

View File

@ -1,50 +1,402 @@
# plugin.sh - DevStack plugin.sh dispatch script
# Install and start **Designate** service in Devstack
function install_tatu {
...
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
# Get backend configuration
# -------------------------
if is_service_enabled designate && [[ -r $DESIGNATE_PLUGINS/backend-$DESIGNATE_BACKEND_DRIVER ]]; then
# Load plugin
source $DESIGNATE_PLUGINS/backend-$DESIGNATE_BACKEND_DRIVER
fi
# Helper Functions
# ----------------
function setup_colorized_logging_designate {
local conf_file=$1
local conf_section=$2
local project_var=${3:-"project_name"}
local user_var=${4:-"user_name"}
setup_colorized_logging $conf_file $conf_section $project_var $user_var
# Override the logging_context_format_string value chosen by
# setup_colorized_logging.
iniset $conf_file $conf_section logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_identity)s%(color)s] %(instance)s%(color)s%(message)s"
}
function init_tatu {
...
# DevStack Plugin
# ---------------
# cleanup_designate - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_designate {
sudo rm -rf $DESIGNATE_STATE_PATH $DESIGNATE_AUTH_CACHE_DIR
cleanup_designate_backend
}
function configure_tatu {
...
# configure_designate - Set config files, create data dirs, etc
function configure_designate {
[ ! -d $DESIGNATE_CONF_DIR ] && sudo mkdir -m 755 -p $DESIGNATE_CONF_DIR
sudo chown $STACK_USER $DESIGNATE_CONF_DIR
[ ! -d $DESIGNATE_LOG_DIR ] && sudo mkdir -m 755 -p $DESIGNATE_LOG_DIR
sudo chown $STACK_USER $DESIGNATE_LOG_DIR
# (Re)create ``designate.conf``
rm -f $DESIGNATE_CONF
# General Configuration
iniset_rpc_backend designate $DESIGNATE_CONF DEFAULT
iniset $DESIGNATE_CONF DEFAULT rpc_response_timeout 5
iniset $DESIGNATE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $DESIGNATE_CONF DEFAULT state_path $DESIGNATE_STATE_PATH
iniset $DESIGNATE_CONF DEFAULT root-helper sudo designate-rootwrap $DESIGNATE_ROOTWRAP_CONF
iniset $DESIGNATE_CONF storage:sqlalchemy connection `database_connection_url designate`
# Quota Configuration
iniset $DESIGNATE_CONF DEFAULT quota_zones $DESIGNATE_QUOTA_ZONES
iniset $DESIGNATE_CONF DEFAULT quota_zone_recordsets $DESIGNATE_QUOTA_ZONE_RECORDSETS
iniset $DESIGNATE_CONF DEFAULT quota_zone_records $DESIGNATE_QUOTA_ZONE_RECORDS
iniset $DESIGNATE_CONF DEFAULT quota_recordset_records $DESIGNATE_QUOTA_RECORDSET_RECORDS
iniset $DESIGNATE_CONF DEFAULT quota_api_export_size $DESIGNATE_QUOTA_API_EXPORT_SIZE
# Coordination Configuration
if [[ -n "$DESIGNATE_COORDINATION_URL" ]]; then
iniset $DESIGNATE_CONF coordination backend_url $DESIGNATE_COORDINATION_URL
fi
if is_service_enabled designate-pool-manager; then
# Pool Manager Configuration
iniset $DESIGNATE_CONF service:pool_manager pool_id $DESIGNATE_POOL_ID
iniset $DESIGNATE_CONF service:pool_manager cache_driver $DESIGNATE_POOL_MANAGER_CACHE_DRIVER
iniset $DESIGNATE_CONF service:pool_manager periodic_recovery_interval $DESIGNATE_PERIODIC_RECOVERY_INTERVAL
iniset $DESIGNATE_CONF service:pool_manager periodic_sync_interval $DESIGNATE_PERIODIC_SYNC_INTERVAL
# Pool Manager Cache
if [ "$DESIGNATE_POOL_MANAGER_CACHE_DRIVER" == "sqlalchemy" ]; then
iniset $DESIGNATE_CONF pool_manager_cache:sqlalchemy connection `database_connection_url designate_pool_manager`
fi
fi
# API Configuration
sudo cp $DESIGNATE_DIR/etc/designate/api-paste.ini $DESIGNATE_APIPASTE_CONF
iniset $DESIGNATE_CONF service:api enabled_extensions_v2 $DESIGNATE_ENABLED_EXTENSIONS_V2
iniset $DESIGNATE_CONF service:api enabled_extensions_admin $DESIGNATE_ENABLED_EXTENSIONS_ADMIN
iniset $DESIGNATE_CONF service:api api_base_uri $DESIGNATE_SERVICE_PROTOCOL://$DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT/
iniset $DESIGNATE_CONF service:api enable_api_v2 $DESIGNATE_ENABLE_API_V2
iniset $DESIGNATE_CONF service:api enable_api_admin $DESIGNATE_ENABLE_API_ADMIN
# mDNS Configuration
iniset $DESIGNATE_CONF service:mdns listen ${DESIGNATE_SERVICE_HOST}:${DESIGNATE_SERVICE_PORT_MDNS}
# Worker Configuration
if ! is_service_enabled designate-pool-manager; then
iniset $DESIGNATE_CONF service:worker enabled True
iniset $DESIGNATE_CONF service:worker notify True
iniset $DESIGNATE_CONF service:worker poll_max_retries $DESIGNATE_POLL_RETRIES
iniset $DESIGNATE_CONF service:worker poll_retry_interval $DESIGNATE_POLL_INTERVAL
fi
# Set up Notifications/Ceilometer Integration
iniset $DESIGNATE_CONF DEFAULT notification_driver "$DESIGNATE_NOTIFICATION_DRIVER"
iniset $DESIGNATE_CONF DEFAULT notification_topics "$DESIGNATE_NOTIFICATION_TOPICS"
# Root Wrap
sudo cp $DESIGNATE_DIR/etc/designate/rootwrap.conf.sample $DESIGNATE_ROOTWRAP_CONF
iniset $DESIGNATE_ROOTWRAP_CONF DEFAULT filters_path $DESIGNATE_DIR/etc/designate/rootwrap.d root-helper
# Oslo Concurrency
iniset $DESIGNATE_CONF oslo_concurrency lock_path "$DESIGNATE_STATE_PATH"
# Set up the rootwrap sudoers for designate
local rootwrap_sudoer_cmd="$DESIGNATE_BIN_DIR/designate-rootwrap $DESIGNATE_ROOTWRAP_CONF *"
local tempfile=`mktemp`
echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudoer_cmd" >$tempfile
chmod 0440 $tempfile
sudo chown root:root $tempfile
sudo mv $tempfile /etc/sudoers.d/designate-rootwrap
# TLS Proxy Configuration
if is_service_enabled tls-proxy; then
# Set the service port for a proxy to take the original
iniset $DESIGNATE_CONF service:api listen ${DESIGNATE_SERVICE_HOST}:${DESIGNATE_SERVICE_PORT_INT}
else
iniset $DESIGNATE_CONF service:api listen ${DESIGNATE_SERVICE_HOST}:${DESIGNATE_SERVICE_PORT}
fi
# Setup the Keystone Integration
if is_service_enabled keystone; then
iniset $DESIGNATE_CONF service:api auth_strategy keystone
configure_auth_token_middleware $DESIGNATE_CONF designate $DESIGNATE_AUTH_CACHE_DIR
fi
# Logging Configuration
if [ "$SYSLOG" != "False" ]; then
iniset $DESIGNATE_CONF DEFAULT use_syslog True
fi
# Format logging
if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
setup_colorized_logging_designate $DESIGNATE_CONF DEFAULT "tenant" "user"
fi
# Backend Plugin Configuation
configure_designate_backend
}
# check for service enabled
if is_service_enabled tatu; then
function configure_designatedashboard {
# Compile message catalogs
if [ -d ${DESIGNATEDASHBOARD_DIR}/designatedashboard/locale ]; then
(cd ${DESIGNATEDASHBOARD_DIR}/designatedashboard; DJANGO_SETTINGS_MODULE=openstack_dashboard.settings ../manage.py compilemessages)
fi
}
if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
# Set up system services
echo_summary "Configuring system services tatu"
# Configure the needed tempest options
function configure_designate_tempest() {
if is_service_enabled tempest; then
# Tell tempest we're available
iniset $TEMPEST_CONFIG service_available designate True
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
# Perform installation of service source
echo_summary "Installing tatu"
install_tatu
# Tell tempest which APIs are available
iniset $TEMPEST_CONFIG dns_feature_enabled api_v2 $DESIGNATE_ENABLE_API_V2
iniset $TEMPEST_CONFIG dns_feature_enabled api_admin $DESIGNATE_ENABLE_API_ADMIN
iniset $TEMPEST_CONFIG dns_feature_enabled api_v2_root_recordsets True
iniset $TEMPEST_CONFIG dns_feature_enabled api_v2_quotas True
iniset $TEMPEST_CONFIG dns_feature_enabled bug_1573141_fixed True
# Tell tempest where are nameservers are.
nameservers=$DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT_DNS
# TODO(kiall): Remove hardcoded list of plugins
case $DESIGNATE_BACKEND_DRIVER in
bind9|powerdns)
nameservers="$DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT_DNS"
;;
akamai)
nameservers="$DESIGNATE_AKAMAI_NAMESERVERS"
;;
dynect)
nameservers="$DESIGNATE_DYNECT_NAMESERVERS"
;;
esac
if [ ! -z "$DESIGNATE_NAMESERVERS" ]; then
nameservers=$DESIGNATE_NAMESERVERS
fi
iniset $TEMPEST_CONFIG dns nameservers $nameservers
# For legacy functionaltests
iniset $TEMPEST_CONFIG designate nameservers $nameservers
fi
}
# create_designate_accounts - Set up common required designate accounts
# Tenant User Roles
# ------------------------------------------------------------------
# service designate admin # if enabled
function create_designate_accounts {
if is_service_enabled designate-api; then
create_service_user "designate"
get_or_create_service "designate" "dns" "Designate DNS Service"
get_or_create_endpoint "dns" \
"$REGION_NAME" \
"$DESIGNATE_SERVICE_PROTOCOL://$DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT/"
fi
}
# create_designate_pool_configuration - Create Pool Configuration
function create_designate_pool_configuration {
# Sync Pools Config
designate-manage pool update --file $DESIGNATE_CONF_DIR/pools.yaml
# Allow Backends to do backend specific tasks
if function_exists create_designate_pool_configuration_backend; then
create_designate_pool_configuration_backend
fi
}
# init_designate - Initialize etc.
function init_designate {
# Create cache dir
sudo mkdir -p $DESIGNATE_AUTH_CACHE_DIR
sudo chown $STACK_USER $DESIGNATE_AUTH_CACHE_DIR
rm -f $DESIGNATE_AUTH_CACHE_DIR/*
# Some Designate Backends require mdns be bound to port 53, make that
# doable.
sudo setcap 'cap_net_bind_service=+ep' $(readlink -f /usr/bin/python)
# (Re)create designate database
recreate_database designate utf8
# Init and migrate designate database
designate-manage database sync
if [ "$DESIGNATE_POOL_MANAGER_CACHE_DRIVER" == "sqlalchemy" ]; then
# (Re)create designate_pool_manager cache
recreate_database designate_pool_manager utf8
# Init and migrate designate pool-manager-cache
designate-manage pool-manager-cache sync
fi
init_designate_backend
}
# install_designate - Collect source and prepare
function install_designate {
if is_ubuntu; then
install_package libcap2-bin
elif is_fedora; then
# bind-utils package provides `dig`
install_package libcap bind-utils
fi
git_clone $DESIGNATE_REPO $DESIGNATE_DIR $DESIGNATE_BRANCH
setup_develop $DESIGNATE_DIR
install_designate_backend
}
# install_designateclient - Collect source and prepare
function install_designateclient {
if use_library_from_git "python-designateclient"; then
git_clone_by_name "python-designateclient"
setup_dev_lib "python-designateclient"
else
pip_install_gr "python-designateclient"
fi
}
# install_designatedashboard - Collect source and prepare
function install_designatedashboard {
git_clone_by_name "designate-dashboard"
setup_dev_lib "designate-dashboard"
for panel in _1710_project_dns_panel_group.py \
_1720_project_dns_panel.py \
_1721_dns_zones_panel.py \
_1722_dns_reversedns_panel.py; do
ln -fs $DESIGNATEDASHBOARD_DIR/designatedashboard/enabled/$panel $HORIZON_DIR/openstack_dashboard/local/enabled/$panel
done
}
# install_designatetempest - Collect source and prepare
function install_designatetempest {
git_clone_by_name "designate-tempest-plugin"
setup_dev_lib "designate-tempest-plugin"
}
# start_designate - Start running processes
function start_designate {
start_designate_backend
run_process designate-central "$DESIGNATE_BIN_DIR/designate-central --config-file $DESIGNATE_CONF"
run_process designate-api "$DESIGNATE_BIN_DIR/designate-api --config-file $DESIGNATE_CONF"
run_process designate-mdns "$DESIGNATE_BIN_DIR/designate-mdns --config-file $DESIGNATE_CONF"
run_process designate-agent "$DESIGNATE_BIN_DIR/designate-agent --config-file $DESIGNATE_CONF"
run_process designate-sink "$DESIGNATE_BIN_DIR/designate-sink --config-file $DESIGNATE_CONF"
if is_service_enabled designate-pool-manager; then
run_process designate-pool-manager "$DESIGNATE_BIN_DIR/designate-pool-manager --config-file $DESIGNATE_CONF"
run_process designate-zone-manager "$DESIGNATE_BIN_DIR/designate-zone-manager --config-file $DESIGNATE_CONF"
else
run_process designate-worker "$DESIGNATE_BIN_DIR/designate-worker --config-file $DESIGNATE_CONF"
run_process designate-producer "$DESIGNATE_BIN_DIR/designate-producer --config-file $DESIGNATE_CONF"
fi
# Start proxies if enabled
if is_service_enabled designate-api && is_service_enabled tls-proxy; then
start_tls_proxy designate-api '*' $DESIGNATE_SERVICE_PORT $DESIGNATE_SERVICE_HOST $DESIGNATE_SERVICE_PORT_INT &
fi
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $DESIGNATE_SERVICE_PROTOCOL://$DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT; do sleep 1; done"; then
die $LINENO "Designate did not start"
fi
}
# stop_designate - Stop running processes
function stop_designate {
stop_process designate-central
stop_process designate-api
stop_process designate-pool-manager
stop_process designate-zone-manager
stop_process designate-mdns
stop_process designate-agent
stop_process designate-sink
stop_process designate-worker
stop_process designate-producer
stop_designate_backend
}
# This is the main for plugin.sh
if is_service_enabled designate; then
# Sanify check for agent backend
# ------------------------------
if ! is_service_enabled designate-agent && [ "$DESIGNATE_BACKEND_DRIVER" == "agent" ]; then
die $LINENO "To use the agent backend, you must enable the designate-agent service"
fi
if [[ "$1" == "stack" && "$2" == "install" ]]; then
echo_summary "Installing Designate client"
install_designateclient
echo_summary "Installing Designate"
install_designate
if is_service_enabled horizon; then
echo_summary "Installing Designate dashboard"
install_designatedashboard
fi
if is_service_enabled tempest; then
echo_summary "Installing Designate Tempest Plugin"
install_designatetempest
fi
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
# Configure after the other layer 1 and 2 services have been configured
echo_summary "Configuring tatu"
configure_tatu
echo_summary "Configuring Designate"
configure_designate
if is_service_enabled horizon; then
echo_summary "Configuring Designate dashboard"
configure_designatedashboard
fi
if is_service_enabled keystone; then
echo_summary "Creating Designate Keystone accounts"
create_designate_accounts
fi
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
# Initialize and start the tatu service
echo_summary "Initializing tatu"
init_tatu
echo_summary "Initializing Designate"
init_designate
echo_summary "Starting Designate"
start_designate
echo_summary "Creating Pool Configuration"
create_designate_pool_configuration
elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
echo_summary "Configuring Tempest options for Designate"
configure_designate_tempest
fi
if [[ "$1" == "unstack" ]]; then
# Shut down tatu services
# no-op
:
stop_designate
fi
if [[ "$1" == "clean" ]]; then
# Remove state and transient data
# Remember clean.sh first calls unstack.sh
# no-op
:
echo_summary "Cleaning Designate"
cleanup_designate
fi
fi
fi
# Restore xtrace
$XTRACE

View File

@ -1 +1,92 @@
enable_service tatu
define_plugin tatu
plugin_requires tatu designate
plugin_requires tatu barbican
plugin_requires tatu dragonflow # TODO(pino): remove this dependency
# Default options
TATU_USE_BARBICAN=${TATU_USE_BARBICAN:-"True"}
TATU_USE_PAT_BASTIONS=${TATU_USE_PAT_BASTIONS:-"True"}
TATU_TOTAL_PAT_BASTIONS=${TATU_TOTAL_PATS:-2}
TATU_PAT_BASTIONS_PER_INSTANCE=${TATU_TOTAL_PATS:-2}
TATU_DNS_ZONE_NAME=${TATU_DNS_ZONE_NAME:}
DESIGNATE_BACKEND_DRIVER=${DESIGNATE_BACKEND_DRIVER:=bind9}
DESIGNATE_AGENT_BACKEND_DRIVER=${DESIGNATE_AGENT_BACKEND_DRIVER:-"fake"}
DESIGNATE_POOL_MANAGER_CACHE_DRIVER=${DESIGNATE_POOL_MANAGER_CACHE_DRIVER:-memcache}
DESIGNATE_POOL_ID=${DESIGNATE_POOL_ID:-794ccc2c-d751-44fe-b57f-8894c9f5c842}
DESIGNATE_DEFAULT_NS_RECORD=${DESIGNATE_DEFAULT_NS_RECORD:-ns1.devstack.org.}
DESIGNATE_NOTIFICATION_DRIVER=${DESIGNATE_NOTIFICATION_DRIVER:-}
DESIGNATE_NOTIFICATION_TOPICS=${DESIGNATE_NOTIFICATION_TOPICS:-notifications}
DESIGNATE_PERIODIC_RECOVERY_INTERVAL=${DESIGNATE_PERIODIC_RECOVERY_INTERVAL:-120}
DESIGNATE_PERIODIC_SYNC_INTERVAL=${DESIGNATE_PERIODIC_SYNC_INTERVAL:-1800}
DESIGNATE_COORDINATION_URL=${DESIGNATE_COORDINATION_URL:-}
DESIGNATE_POLL_INTERVAL=${DESIGNATE_POLL_INTERVAL:-5}
DESIGNATE_POLL_RETRIES=${DESIGNATE_POLL_RETRIES:-6}
# Quota Options
DESIGNATE_QUOTA_ZONES=${DESIGNATE_QUOTA_ZONES:-100}
DESIGNATE_QUOTA_ZONE_RECORDSETS=${DESIGNATE_QUOTA_ZONE_RECORDSETS:-500}
DESIGNATE_QUOTA_ZONE_RECORDS=${DESIGNATE_QUOTA_ZONE_RECORDS:-500}
DESIGNATE_QUOTA_RECORDSET_RECORDS=${DESIGNATE_QUOTA_RECORDSET_RECORDS:-20}
DESIGNATE_QUOTA_API_EXPORT_SIZE=${DESIGNATE_QUOTA_API_EXPORT_SIZE:-1000}
# Default APIs and Extensions
DESIGNATE_ENABLE_API_V2=${DESIGNATE_ENABLE_API_V2:-"True"}
DESIGNATE_ENABLE_API_ADMIN=${DESIGNATE_ENABLE_API_ADMIN:-"True"}
DESIGNATE_ENABLED_EXTENSIONS_V2=${DESIGNATE_ENABLED_EXTENSIONS_V2:-""}
DESIGNATE_ENABLED_EXTENSIONS_ADMIN=${DESIGNATE_ENABLED_EXTENSIONS_ADMIN:-"quotas"}
# Public facing bits
if is_service_enabled tls-proxy; then
DESIGNATE_SERVICE_PROTOCOL="https"
fi
# Default IP/port settings
DESIGNATE_SERVICE_PROTOCOL=${DESIGNATE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
DESIGNATE_SERVICE_HOST=${DESIGNATE_SERVICE_HOST:-$SERVICE_HOST}
DESIGNATE_SERVICE_PORT=${DESIGNATE_SERVICE_PORT:-9001}
DESIGNATE_SERVICE_PORT_INT=${DESIGNATE_SERVICE_PORT_INT:-19001}
DESIGNATE_SERVICE_PORT_DNS=${DESIGNATE_SERVICE_PORT_DNS:-53}
DESIGNATE_SERVICE_PORT_MDNS=${DESIGNATE_SERVICE_PORT_MDNS:-5354}
DESIGNATE_SERVICE_PORT_AGENT=${DESIGNATE_SERVICE_PORT_AGENT:-5358}
# Default directories
DESIGNATE_BIN_DIR=$(get_python_exec_prefix)
DESIGNATE_DIR=$DEST/designate
DESIGNATEDASHBOARD_DIR=$DEST/designate-dashboard
DESIGNATE_CONF_DIR=/etc/designate
DESIGNATE_STATE_PATH=${DESIGNATE_STATE_PATH:=$DATA_DIR/designate}
DESIGNATE_CONF=$DESIGNATE_CONF_DIR/designate.conf
DESIGNATE_LOG_DIR=/var/log/designate
DESIGNATE_AUTH_CACHE_DIR=${DESIGNATE_AUTH_CACHE_DIR:-/var/cache/designate}
DESIGNATE_ROOTWRAP_CONF=$DESIGNATE_CONF_DIR/rootwrap.conf
DESIGNATE_APIPASTE_CONF=$DESIGNATE_CONF_DIR/api-paste.ini
DESIGNATE_PLUGINS=$DESIGNATE_DIR/devstack/designate_plugins
# Default repositories
DESIGNATE_REPO=${DESIGNATE_REPO:-${GIT_BASE}/openstack/designate.git}
DESIGNATE_BRANCH=${DESIGNATE_BRANCH:-master}
GITREPO["designate-dashboard"]=${DESIGNATEDASHBOARD_REPO:-${GIT_BASE}/openstack/designate-dashboard.git}
GITBRANCH["designate-dashboard"]=${DESIGNATEDASHBOARD_BRANCH:-master}
GITDIR["designate-dashboard"]=$DEST/designate-dashboard
GITREPO["python-designateclient"]=${DESIGNATECLIENT_REPO:-${GIT_BASE}/openstack/python-designateclient.git}
GITBRANCH["python-designateclient"]=${DESIGNATECLIENT_BRANCH:-master}
GITDIR["python-designateclient"]=$DEST/python-designateclient
GITREPO["designate-tempest-plugin"]=${DESIGNATETEMPEST_REPO:-${GIT_BASE}/openstack/designate-tempest-plugin.git}
GITBRANCH["designate-tempest-plugin"]=${DESIGNATETEMPEST_BRANCH:-master}
GITDIR["designate-tempest-plugin"]=$DEST/designate-tempest-plugin
# Tell Tempest this project is present
TEMPEST_SERVICES+=,designate
# Turn on all Designate services by default
enable_service designate
enable_service designate-central
enable_service designate-api
enable_service designate-worker
enable_service designate-producer
enable_service designate-mdns
enable_service designate-agent
enable_service designate-sink

View File

@ -0,0 +1,61 @@
#!/usr/bin/env python
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A simple mock UDP server to receive monasca-statsd traffic
Log to stdout or to a file.
"""
from argparse import ArgumentParser
import sys
from time import gmtime
from time import strftime
import SocketServer
def parse_args():
ap = ArgumentParser()
ap.add_argument('--addr', default='127.0.0.1',
help='Listen IP addr (default: 127.0.0.1)')
ap.add_argument('--port', default=8125, type=int,
help='UDP port (default: 8125)')
ap.add_argument('--output-fname', default=None,
help='Output file (default: stdout)')
return ap.parse_args()
class StatsdMessageHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request[0].strip()
tstamp = strftime("%Y-%m-%dT%H:%M:%S", gmtime())
if self._output_fd:
self._output_fd.write("%s %s\n" % (tstamp, data))
else:
print("%s %s" % (tstamp, data))
def main():
args = parse_args()
fd = open(args.output_fname, 'a') if args.output_fname else None
StatsdMessageHandler._output_fd = fd
server = SocketServer.UDPServer(
(args.addr, args.port),
StatsdMessageHandler,
)
server.serve_forever()
if __name__ == "__main__":
sys.exit(main())

View File

@ -0,0 +1,195 @@
#!/bin/bash
set -o errexit
source $GRENADE_DIR/grenaderc
source $GRENADE_DIR/functions
source $TOP_DIR/openrc admin admin
source $TOP_DIR/stackrc
set -o xtrace
DESIGNATE_PROJECT=designate_grenade
DESIGNATE_USER=designate_grenade
DESIGNATE_PASS=designate_grenade
DESIGNATE_ZONE_NAME=example.com.
DESIGNATE_ZONE_EMAIL=hostmaster@example.com
DESIGNATE_RRSET_NAME=www.example.com.
DESIGNATE_RRSET_TYPE=A
DESIGNATE_RRSET_RECORD=10.0.0.1
# used with dig to look up in DNS
DIG_FLAGS="-p $DESIGNATE_SERVICE_PORT_DNS @$SERVICE_HOST"
DIG_TIMEOUT=30
function _set_designate_user {
OS_TENANT_NAME=$DESIGNATE_PROJECT
OS_PROJECT_NAME=$DESIGNATE_PROJECT
OS_USERNAME=$DESIGNATE_USER
OS_PASSWORD=$DESIGNATE_PASS
}
function _ensure_recordset_present {
local record_name=$1
local record_type=$2
local record_value=$3
if [ "$DESIGNATE_BACKEND_DRIVER" = "fake" ] ; then
# if the backend is fake, there will be no actual DNS records
return 0
fi
if ! timeout $DIG_TIMEOUT sh -c "while ! dig +short $DIG_FLAGS $record_name $record_type | grep \"$record_value\"; do sleep 1; done"; then
die $LINENO "Error: record $record_name ($record_type) not found in DNS"
fi
# Display for debugging
dig $DIG_FLAGS $record_name $record_type
return 0
}
function create {
# create a tenant for the server
eval $(openstack project create -f shell -c id $DESIGNATE_PROJECT)
if [[ -z "$id" ]]; then
die $LINENO "Didn't create $DESIGNATE_PROJECT project"
fi
resource_save designate project_id $id
local project_id=$id
# create the user, and set $id locally
eval $(openstack user create $DESIGNATE_USER \
--project $project_id \
--password $DESIGNATE_PASS \
-f shell -c id)
if [[ -z "$id" ]]; then
die $LINENO "Didn't create $DESIGNATE_USER user"
fi
resource_save designate user_id $id
# BUG(sdague): this really shouldn't be required, in Keystone v2 a
# user created in a project was assigned to that project, in v3 it
# is not - https://bugs.launchpad.net/keystone/+bug/1662911
openstack role add Member --user $id --project $project_id
_set_designate_user
# Create a zone, and save the id
eval $(openstack zone create --email $DESIGNATE_ZONE_EMAIL \
$DESIGNATE_ZONE_NAME \
-f shell -c id)
resource_save designate zone_id $id
eval $(openstack recordset create --records $DESIGNATE_RRSET_RECORD \
--type $DESIGNATE_RRSET_TYPE \
$DESIGNATE_ZONE_NAME \
$DESIGNATE_RRSET_NAME \
-f shell -c id)
resource_save designate rrset_id $id
# wait until rrset moves to active state
local timeleft=1000
while [[ $timeleft -gt 0 ]]; do
local status
eval $(openstack recordset show $DESIGNATE_ZONE_NAME \
$DESIGNATE_RRSET_NAME \
-f shell -c status)
if [[ "$status" != "ACTIVE" ]]; then
if [[ "$cluster_state" == "Error" ]]; then
die $LINENO "Zone is in Error state"
fi
echo "Zone is still not in Active state"
sleep 10
timeleft=$((timeleft - 10))
if [[ $timeleft == 0 ]]; then
die $LINENO "Zone hasn't moved to Active state \
during 1000 seconds"
fi
else
break
fi
done
}
function verify {
_set_designate_user
# check that cluster is in Active state
local zone_id
zone_id=$(resource_get designate zone_id)
local rrset_id
rrset_id=$(resource_get designate rrset_id)
eval $(openstack zone show $zone_id -f shell -c status)
echo -n $status
if [[ "$status" != "ACTIVE" ]]; then
die $LINENO "Zone is not in Active state anymore"
fi
eval $(openstack recordset show $zone_id $rrset_id -f shell -c status)
echo -n $status
if [[ "$status" != "ACTIVE" ]]; then
die $LINENO "Recordset is not in Active state anymore"
fi
echo "Designate verification: SUCCESS"
}
function verify_noapi {
_ensure_recordset_present $DESIGNATE_RRSET_NAME $DESIGNATE_RRSET_TYPE $DESIGNATE_RRSET_RECORD
}
function destroy {
_set_designate_user
set +o errexit
# delete cluster
local cluster_id
zone_id=$(resource_get designate zone_id)
openstack zone delete $zone_id > /dev/null
# wait for cluster deletion
local timeleft=500
while [[ $timeleft -gt 0 ]]; do
openstack zone show $zone_id > /dev/null
local rc=$?
if [[ "$rc" != 1 ]]; then
echo "Zone still exists"
sleep 5
timeleft=$((timeleft - 5))
if [[ $timeleft == 0 ]]; then
die $LINENO "Zone hasn't been deleted during 500 seconds"
fi
else
break
fi
done
}
# Dispatcher
case $1 in
"create")
create
;;
"verify_noapi")
verify_noapi
;;
"verify")
verify
;;
"destroy")
destroy
;;
"force_destroy")
set +o errexit
destroy
;;
esac

11
devstack/upgrade/settings Normal file
View File

@ -0,0 +1,11 @@
register_project_for_upgrade designate
register_db_to_save designate
devstack_localrc base enable_plugin designate https://git.openstack.org/openstack/designate
devstack_localrc target enable_plugin designate https://git.openstack.org/openstack/designate
devstack_localrc base enable_service designate-api designate-central designate-producer designate-worker designate-mdns designate-agent designate-sink designate horizon
devstack_localrc target enable_service designate-api designate-central designate-producer designate-worker designate-mdns designate-agent designate-sink designate horizon
BASE_RUN_SMOKE=False
TARGET_RUN_SMOKE=False

View File

@ -0,0 +1,38 @@
#!/bin/bash
# ``shutdown-designate``
set -o errexit
source $GRENADE_DIR/grenaderc
source $GRENADE_DIR/functions
# We need base DevStack functions for this
source $BASE_DEVSTACK_DIR/functions
source $BASE_DEVSTACK_DIR/stackrc # needed for status directory
source $BASE_DEVSTACK_DIR/lib/tls
source ${GITDIR[designate]}/devstack/plugin.sh
set -o xtrace
stop_process designate-central
stop_process designate-api
stop_process designate-mdns
stop_process designate-agent
stop_process designate-sink
if is_service_enabled designate-worker; then
stop_process designate-worker
stop_process designate-producer
else
stop_process designate-pool-manager
stop_process designate-zone-manager
fi
# sanity check that service is actually down
ensure_services_stopped designate-api designate-central designate-mdns designate-agent designate-sink
if is_service_enabled designate-worker; then
ensure_services_stopped designate-worker designate-producer
else
ensure_services_stopped designate-pool-manager designate-zone-manager
fi

View File

@ -0,0 +1,96 @@
#!/usr/bin/env bash
# ``upgrade-designate``
echo "*********************************************************************"
echo "Begin $0"
echo "*********************************************************************"
# Clean up any resources that may be in use
cleanup() {
set +o errexit
echo "********************************************************************"
echo "ERROR: Abort $0"
echo "********************************************************************"
# Kill ourselves to signal any calling process
trap 2; kill -2 $$
}
trap cleanup SIGHUP SIGINT SIGTERM
# Keep track of the grenade directory
RUN_DIR=$(cd $(dirname "$0") && pwd)
# Source params
source $GRENADE_DIR/grenaderc
# Import common functions
source $GRENADE_DIR/functions
# This script exits on an error so that errors don't compound and you see
# only the first error that occurred.
set -o errexit
# Upgrade designate
# ============
# Get functions from current DevStack
source $TARGET_DEVSTACK_DIR/stackrc
source $TARGET_DEVSTACK_DIR/lib/tls
source $(dirname $(dirname $BASH_SOURCE))/plugin.sh
source $(dirname $(dirname $BASH_SOURCE))/settings
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
# Save current config files for posterity
[[ -d $SAVE_DIR/etc.designate ]] || cp -pr $DESIGNATE_CONF_DIR $SAVE_DIR/etc.designate
# install_designate()
if is_ubuntu; then
install_package libcap2-bin
elif is_fedora; then
# bind-utils package provides `dig`
install_package libcap bind-utils
fi
git_clone $DESIGNATE_REPO $DESIGNATE_DIR $DESIGNATE_BRANCH
setup_develop $DESIGNATE_DIR
install_designateclient
# calls upgrade-designate for specific release
upgrade_project designate $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH
# Migrate the database
$DESIGNATE_BIN_DIR/designate-manage --config-file $DESIGNATE_CONF \
database sync || die $LINENO "DB sync error"
# Start designate
run_process designate-central "$DESIGNATE_BIN_DIR/designate-central --config-file $DESIGNATE_CONF"
run_process designate-api "$DESIGNATE_BIN_DIR/designate-api --config-file $DESIGNATE_CONF"
run_process designate-producer "$DESIGNATE_BIN_DIR/designate-producer --config-file $DESIGNATE_CONF"
run_process designate-worker "$DESIGNATE_BIN_DIR/designate-worker --config-file $DESIGNATE_CONF"
run_process designate-mdns "$DESIGNATE_BIN_DIR/designate-mdns --config-file $DESIGNATE_CONF"
run_process designate-agent "$DESIGNATE_BIN_DIR/designate-agent --config-file $DESIGNATE_CONF"
run_process designate-sink "$DESIGNATE_BIN_DIR/designate-sink --config-file $DESIGNATE_CONF"
# Start proxies if enabled
if is_service_enabled designate-api && is_service_enabled tls-proxy; then
start_tls_proxy '*' $DESIGNATE_SERVICE_PORT $DESIGNATE_SERVICE_HOST $DESIGNATE_SERVICE_PORT_INT &
fi
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $DESIGNATE_SERVICE_PROTOCOL://$DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT; do sleep 1; done"; then
die $LINENO "Designate did not start"
fi
# Don't succeed unless the service come up
ensure_services_started designate-api designate-central designate-producer designate-worker designate-mdns designate-agent designate-sink
set +o xtrace
echo "*********************************************************************"
echo "SUCCESS: End $0"
echo "*********************************************************************"

50
devstack_old/plugin.sh Normal file
View File

@ -0,0 +1,50 @@
# plugin.sh - DevStack plugin.sh dispatch script
function install_tatu {
...
}
function init_tatu {
...
}
function configure_tatu {
...
}
# check for service enabled
if is_service_enabled tatu; then
if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
# Set up system services
echo_summary "Configuring system services tatu"
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
# Perform installation of service source
echo_summary "Installing tatu"
install_tatu
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
# Configure after the other layer 1 and 2 services have been configured
echo_summary "Configuring tatu"
configure_tatu
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
# Initialize and start the tatu service
echo_summary "Initializing tatu"
init_tatu
fi
if [[ "$1" == "unstack" ]]; then
# Shut down tatu services
# no-op
:
fi
if [[ "$1" == "clean" ]]; then
# Remove state and transient data
# Remember clean.sh first calls unstack.sh
# no-op
:
fi
fi

1
devstack_old/settings Normal file
View File

@ -0,0 +1 @@
enable_service tatu