From 47d8d491335149d71c9f2fc024f7827bae44247d Mon Sep 17 00:00:00 2001
From: baigk <baiguoku@huawei.com>
Date: Mon, 16 Nov 2015 22:48:47 -0800
Subject: [PATCH] Merge branch "https://github.com/baigk/compass-core.git into
 dev/opnfv

Change-Id: Ie2be5756f0c425a5d40e3092f52f245709fccbf3
Signed-off-by: baigk <baiguoku@huawei.com>
---
 bin/client.py                                 | 1858 ++++++++---------
 bin/refresh.sh                                |    9 -
 compass/actions/install_callback.py           |   26 +-
 compass/actions/util.py                       |   62 +-
 compass/db/api/cluster.py                     |   20 +-
 .../deployment/installers/config_manager.py   |  803 +++----
 .../os_installers/cobbler/cobbler.py          |   33 +
 .../ansible_installer/ansible_installer.py    |   31 +-
 .../os_installers/cobbler/test_cobbler.py     |   31 +-
 .../installers/test_config_manager.py         |    5 -
 conf/adapter/ansible_openstack.conf           |    7 -
 conf/adapter/ansible_openstack_juno.conf      |    7 +
 conf/adapter/ansible_openstack_kilo.conf      |    7 +
 ...sible.conf => openstack_juno_ansible.conf} |   13 +-
 conf/flavor/openstack_kilo_ansible.conf       |   32 +
 .../HA-ansible-multinodes-juno.conf           |   19 +
 .../HA-ansible-multinodes-kilo.conf           |   19 +
 conf/os/centos7.1.conf                        |    3 +
 conf/os/ubuntu14.04.3.conf                    |    3 +
 conf/os_metadata/general.conf                 |   16 +-
 conf/package_installer/ansible-juno.conf      |    4 +-
 conf/package_installer/ansible-kilo.conf      |   13 +
 conf/package_metadata/openstack.conf          |  303 ++-
 ...sible.conf => openstack_juno_ansible.conf} |   19 +
 conf/role/openstack_kilo_ansible.conf         |   85 +
 .../ansible_cfg/HA-ansible-multinodes.tmpl    |    7 +
 .../openstack_juno/ansible_cfg/allinone.tmpl  |    1 +
 .../ansible_cfg/multinodes.tmpl               |    1 +
 .../ansible_cfg/single-controller.tmpl        |    1 +
 .../hosts/HA-ansible-multinodes.tmpl          |   22 +
 .../inventories/HA-ansible-multinodes.tmpl    |   63 +
 .../inventories/single-controller.tmpl        |   20 +
 .../vars/HA-ansible-multinodes.tmpl           |  184 ++
 .../openstack_juno/vars/allinone.tmpl         |    6 +-
 .../openstack_juno/vars/multinodes.tmpl       |   16 +-
 .../vars/single-controller.tmpl               |    9 +-
 .../ansible_cfg/HA-ansible-multinodes.tmpl    |    7 +
 .../openstack_kilo/ansible_cfg/allinone.tmpl  |    6 +
 .../ansible_cfg/multinodes.tmpl               |    6 +
 .../ansible_cfg/single-controller.tmpl        |    6 +
 .../hosts/HA-ansible-multinodes.tmpl          |   22 +
 .../openstack_kilo/hosts/allinone.tmpl        |   10 +
 .../openstack_kilo/hosts/multinodes.tmpl      |  110 +
 .../hosts/single-controller.tmpl              |   40 +
 .../inventories/HA-ansible-multinodes.tmpl    |   63 +
 .../openstack_kilo/inventories/allinone.tmpl  |   47 +
 .../inventories/multinodes.tmpl               |  123 ++
 .../inventories/single-controller.tmpl        |   67 +
 .../vars/HA-ansible-multinodes.tmpl           |  173 ++
 .../openstack_kilo/vars/allinone.tmpl         |   96 +
 .../openstack_kilo/vars/multinodes.tmpl       |  165 ++
 .../vars/single-controller.tmpl               |  108 +
 .../profile.tmpl                              |    3 +
 .../system.tmpl                               |   76 +
 .../ubuntu-14.04.3-server-x86_64/profile.tmpl |    3 +
 .../ubuntu-14.04.3-server-x86_64/system.tmpl  |   75 +
 56 files changed, 3545 insertions(+), 1419 deletions(-)
 delete mode 100644 conf/adapter/ansible_openstack.conf
 create mode 100644 conf/adapter/ansible_openstack_juno.conf
 create mode 100644 conf/adapter/ansible_openstack_kilo.conf
 rename conf/flavor/{openstack_ansible.conf => openstack_juno_ansible.conf} (61%)
 create mode 100644 conf/flavor/openstack_kilo_ansible.conf
 create mode 100644 conf/flavor_metadata/HA-ansible-multinodes-juno.conf
 create mode 100644 conf/flavor_metadata/HA-ansible-multinodes-kilo.conf
 create mode 100644 conf/os/centos7.1.conf
 create mode 100644 conf/os/ubuntu14.04.3.conf
 create mode 100644 conf/package_installer/ansible-kilo.conf
 rename conf/role/{openstack_ansible.conf => openstack_juno_ansible.conf} (79%)
 create mode 100644 conf/role/openstack_kilo_ansible.conf
 create mode 100644 conf/templates/ansible_installer/openstack_juno/ansible_cfg/HA-ansible-multinodes.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_juno/hosts/HA-ansible-multinodes.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_juno/inventories/HA-ansible-multinodes.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_juno/vars/HA-ansible-multinodes.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_kilo/ansible_cfg/HA-ansible-multinodes.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_kilo/ansible_cfg/allinone.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_kilo/ansible_cfg/multinodes.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_kilo/ansible_cfg/single-controller.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_kilo/hosts/HA-ansible-multinodes.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_kilo/hosts/allinone.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_kilo/hosts/multinodes.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_kilo/hosts/single-controller.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_kilo/inventories/HA-ansible-multinodes.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_kilo/inventories/allinone.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_kilo/inventories/multinodes.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_kilo/inventories/single-controller.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_kilo/vars/HA-ansible-multinodes.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_kilo/vars/allinone.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_kilo/vars/multinodes.tmpl
 create mode 100644 conf/templates/ansible_installer/openstack_kilo/vars/single-controller.tmpl
 create mode 100644 conf/templates/cobbler/CentOS-7-Minimal-1503-01-x86_64/profile.tmpl
 create mode 100644 conf/templates/cobbler/CentOS-7-Minimal-1503-01-x86_64/system.tmpl
 create mode 100644 conf/templates/cobbler/ubuntu-14.04.3-server-x86_64/profile.tmpl
 create mode 100644 conf/templates/cobbler/ubuntu-14.04.3-server-x86_64/system.tmpl

diff --git a/bin/client.py b/bin/client.py
index 350a85b8..d8eb59fc 100755
--- a/bin/client.py
+++ b/bin/client.py
@@ -15,177 +15,321 @@
 # limitations under the License.
 
 """binary to deploy a cluster by compass client api."""
-import logging
+from collections import defaultdict
+import itertools
+import json
+import netaddr
 import os
 import re
+import requests
+from restful import Client
 import socket
 import sys
 import time
+import yaml
+
+ROLE_UNASSIGNED = True
+ROLE_ASSIGNED = False
+
+import log as logging
+LOG = logging.getLogger(__name__)
+
+from oslo_config import cfg
+CONF = cfg.CONF
 
 
-current_dir = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(current_dir)
+def byteify(input):
+    if isinstance(input, dict):
+        return dict([(byteify(key), byteify(value))
+                    for key, value in input.iteritems()])
+    elif isinstance(input, list):
+        return [byteify(element) for element in input]
+    elif isinstance(input, unicode):
+        return input.encode('utf-8')
+    else:
+        return input
+
+opts = [
+    cfg.StrOpt(
+        'compass_server',
+        help='compass server url',
+        default='http://127.0.0.1/api'
+    ),
+    cfg.StrOpt(
+        'compass_user_email',
+        help='compass user email',
+        default='admin@huawei.com'
+    ),
+    cfg.StrOpt(
+        'compass_user_password',
+        help='compass user password',
+        default='admin'
+    ),
+    cfg.StrOpt(
+        'switch_ips',
+        help='comma seperated switch ips',
+        default=''
+    ),
+    cfg.StrOpt(
+        'switch_credential',
+        help='comma separated <credential key>=<credential value>',
+        default='version=2c,community=public'
+    ),
+    cfg.IntOpt(
+        'switch_max_retries',
+        help='max retries of poll switch',
+        default=10
+    ),
+    cfg.IntOpt(
+        'switch_retry_interval',
+        help='interval to repoll switch',
+        default=10
+    ),
+    cfg.BoolOpt(
+        'poll_switches',
+        help='if the client polls switches',
+        default=True
+    ),
+    cfg.StrOpt(
+        'machines',
+        help='comma separated mac addresses of machines',
+        default=''
+    ),
+    cfg.StrOpt(
+        'subnets',
+        help='comma seperated subnets',
+        default=''
+    ),
+    cfg.StrOpt(
+        'adapter_name',
+        help='adapter name',
+        default=''
+    ),
+    cfg.StrOpt(
+        'adapter_os_pattern',
+        help='adapter os name',
+        default=r'^(?i)centos.*'
+    ),
+    cfg.StrOpt(
+        'adapter_target_system_pattern',
+        help='adapter target system name',
+        default='^openstack$'
+    ),
+    cfg.StrOpt(
+        'adapter_flavor_pattern',
+        help='adapter flavor name',
+        default='allinone'
+    ),
+    cfg.StrOpt(
+        'cluster_name',
+        help='cluster name',
+        default='cluster1'
+    ),
+    cfg.StrOpt(
+        'language',
+        help='language',
+        default='EN'
+    ),
+    cfg.StrOpt(
+        'timezone',
+        help='timezone',
+        default='GMT'
+    ),
+    cfg.StrOpt(
+        'http_proxy',
+        help='http proxy',
+        default=''
+    ),
+    cfg.StrOpt(
+        'https_proxy',
+        help='https proxy',
+        default=''
+    ),
+    cfg.StrOpt(
+        'no_proxy',
+        help='no proxy',
+        default=''
+    ),
+    cfg.StrOpt(
+        'ntp_server',
+        help='ntp server',
+        default=''
+    ),
+    cfg.StrOpt(
+        'dns_servers',
+        help='dns servers',
+        default=''
+    ),
+    cfg.StrOpt(
+        'domain',
+        help='domain',
+        default=''
+    ),
+    cfg.StrOpt(
+        'search_path',
+        help='search path',
+        default=''
+    ),
+    cfg.StrOpt(
+        'local_repo_url',
+        help='local repo url',
+        default=''
+    ),
+    cfg.StrOpt(
+        'default_gateway',
+        help='default gateway',
+        default=''
+    ),
+    cfg.StrOpt(
+        'server_credential',
+        help=(
+            'server credential formatted as '
+            '<username>=<password>'
+        ),
+        default='root=root'
+    ),
+    cfg.StrOpt(
+        'os_config_json_file',
+        help='json formatted os config file',
+        default=''
+    ),
+    cfg.StrOpt(
+        'service_credentials',
+        help=(
+            'comma seperated service credentials formatted as '
+            '<servicename>:<username>=<password>,...'
+        ),
+        default=''
+    ),
+    cfg.StrOpt(
+        'console_credentials',
+        help=(
+            'comma seperated console credential formated as '
+            '<consolename>:<username>=<password>'
+        ),
+        default=''
+    ),
+    cfg.StrOpt(
+        'hostnames',
+        help='comma seperated hostname',
+        default=''
+    ),
+    cfg.StrOpt(
+        'host_networks',
+        help=(
+            'semicomma seperated host name and its networks '
+            '<hostname>:<interface_name>=<ip>|<is_mgmt>|<is_promiscuous>,...'
+        ),
+        default=''
+    ),
+    cfg.StrOpt(
+        'partitions',
+        help=(
+            'comma seperated partitions '
+            '<partition name>=<partition_value>'
+        ),
+        default='tmp:percentage=10%,var:percentage=30%,home:percentage=30%'
+    ),
+    cfg.StrOpt(
+        'network_mapping',
+        help=(
+            'comma seperated network mapping '
+            '<network_type>=<interface_name>'
+        ),
+        default=''
+    ),
+    cfg.StrOpt(
+        'package_config_json_file',
+        help='json formatted os config file',
+        default=''
+    ),
+    cfg.StrOpt(
+        'host_roles',
+        help=(
+            'semicomma separated host roles '
+            '<hostname>=<comma separated roles>'
+        ),
+        default=''
+    ),
+    cfg.StrOpt(
+        'default_roles',
+        help=(
+            'comma seperated default roles '
+            '<rolename>'
+        ),
+        default=''
+    ),
+    cfg.IntOpt(
+        'action_timeout',
+        help='action timeout in seconds',
+        default=60
+    ),
+    cfg.IntOpt(
+        'deployment_timeout',
+        help='deployment timeout in minutes',
+        default=60
+    ),
+    cfg.IntOpt(
+        'progress_update_check_interval',
+        help='progress update status check interval in seconds',
+        default=60
+    ),
+    cfg.StrOpt(
+        'dashboard_url',
+        help='dashboard url',
+        default=''
+    ),
+    cfg.StrOpt(
+        'dashboard_link_pattern',
+        help='dashboard link pattern',
+        default=r'(?m)(http://\d+\.\d+\.\d+\.\d+:5000/v2\.0)'
+    ),
+    cfg.StrOpt(
+        'cluster_vip',
+        help='cluster ip address',
+        default=''
+    ),
+    cfg.StrOpt(
+        'enable_secgroup',
+        help='enable security group',
+        default='true'
+    ),
+    cfg.StrOpt(
+        'network_cfg',
+        help='netowrk config file',
+        default=''
+    ),
+    cfg.StrOpt(
+        'neutron_cfg',
+        help='netowrk config file',
+        default=''
+    ),
+    cfg.StrOpt(
+        'cluster_pub_vip',
+        help='cluster ip address',
+        default=''
+    ),
+    cfg.StrOpt(
+        'cluster_prv_vip',
+        help='cluster ip address',
+        default=''
+    ),
+    cfg.StrOpt(
+        'repo_name',
+        help='repo name',
+        default=''
+    ),
+    cfg.StrOpt(
+        'deploy_type',
+        help='deploy type',
+        default='virtual'
+    ),
+]
+CONF.register_cli_opts(opts)
 
 
-import switch_virtualenv
-
-import netaddr
-import requests
-import simplejson as json
-
-from compass.apiclient.restful import Client
-from compass.utils import flags
-from compass.utils import logsetting
-from compass.utils import util
-
-
-flags.add('compass_server',
-          help='compass server url',
-          default='http://127.0.0.1/api')
-flags.add('compass_user_email',
-          help='compass user email',
-          default='admin@huawei.com')
-flags.add('compass_user_password',
-          help='compass user password',
-          default='admin')
-flags.add('switch_ips',
-          help='comma seperated switch ips',
-          default='')
-flags.add('switch_credential',
-          help='comma separated <credential key>=<credential value>',
-          default='version=2c,community=public')
-flags.add('switch_max_retries', type='int',
-          help='max retries of poll switch',
-          default=10)
-flags.add('switch_retry_interval', type='int',
-          help='interval to repoll switch',
-          default=10)
-flags.add_bool('poll_switches',
-               help='if the client polls switches',
-               default=True)
-flags.add('machines',
-          help='comma separated mac addresses of machines',
-          default='')
-flags.add('subnets',
-          help='comma seperated subnets',
-          default='')
-flags.add('adapter_name',
-          help='adapter name',
-          default='')
-flags.add('adapter_os_pattern',
-          help='adapter os name',
-          default=r'^(?i)centos.*')
-flags.add('adapter_flavor_pattern',
-          help='adapter flavor name',
-          default='allinone')
-flags.add('cluster_name',
-          help='cluster name',
-          default='cluster1')
-flags.add('language',
-          help='language',
-          default='EN')
-flags.add('timezone',
-          help='timezone',
-          default='GMT')
-flags.add('http_proxy',
-          help='http proxy',
-          default='')
-flags.add('https_proxy',
-          help='https proxy',
-          default='')
-flags.add('no_proxy',
-          help='no proxy',
-          default='')
-flags.add('ntp_server',
-          help='ntp server',
-          default='')
-flags.add('dns_servers',
-          help='dns servers',
-          default='')
-flags.add('domain',
-          help='domain',
-          default='')
-flags.add('search_path',
-          help='search path',
-          default='')
-flags.add('local_repo_url',
-          help='local repo url',
-          default='')
-flags.add('default_gateway',
-          help='default gateway',
-          default='')
-flags.add('server_credential',
-          help=(
-              'server credential formatted as '
-              '<username>=<password>'
-          ),
-          default='root=root')
-flags.add('os_config_json_file',
-          help='json formatted os config file',
-          default='')
-flags.add('service_credentials',
-          help=(
-              'comma seperated service credentials formatted as '
-              '<servicename>:<username>=<password>,...'
-          ),
-          default='')
-flags.add('console_credentials',
-          help=(
-              'comma seperated console credential formated as '
-              '<consolename>:<username>=<password>'
-          ),
-          default='')
-flags.add('hostnames',
-          help='comma seperated hostname',
-          default='')
-flags.add('host_networks',
-          help=(
-              'semicomma seperated host name and its networks '
-              '<hostname>:<interface_name>=<ip>|<is_mgmt>|<is_promiscuous>,...'
-          ),
-          default='')
-flags.add('partitions',
-          help=(
-              'comma seperated partitions '
-              '<partition name>=<partition_value>'
-          ),
-          default='tmp:percentage=10%,var:percentage=30%,home:percentage=30%')
-flags.add('network_mapping',
-          help=(
-              'comma seperated network mapping '
-              '<network_type>=<interface_name>'
-          ),
-          default='')
-flags.add('package_config_json_file',
-          help='json formatted os config file',
-          default='')
-flags.add('host_roles',
-          help=(
-              'semicomma separated host roles '
-              '<hostname>=<comma separated roles>'
-          ),
-          default='')
-flags.add('default_roles',
-          help=(
-              'comma seperated default roles '
-              '<rolename>'
-          ),
-          default='')
-flags.add('action_timeout',
-          help='action timeout in seconds',
-          default=60)
-flags.add('deployment_timeout',
-          help='deployment timeout in minutes',
-          default=60)
-flags.add('progress_update_check_interval',
-          help='progress update status check interval in seconds',
-          default=60)
-flags.add('dashboard_url',
-          help='dashboard url',
-          default='')
-flags.add('dashboard_link_pattern',
-          help='dashboard link pattern',
-          default=r'(?m)(http://\d+\.\d+\.\d+\.\d+:5000/v2\.0)')
+def is_role_unassigned(role):
+    return role
 
 
 def _load_config(config_filename):
@@ -196,819 +340,667 @@ def _load_config(config_filename):
         return json.loads(content)
 
 
-def _get_client():
-    """get apiclient object."""
-    return Client(flags.OPTIONS.compass_server)
+class CompassClient(object):
+    def __init__(self):
+        LOG.info("xh: compass_server=%s" % CONF.compass_server)
+        self.client = Client(CONF.compass_server)
+        self.subnet_mapping = {}
+        self.role_mapping = {}
+        self.host_mapping = {}
+        self.host_ips = defaultdict(list)
+        self.host_roles = {}
 
+        self.login()
 
-def _login(client):
-    """get apiclient token."""
-    status, resp = client.get_token(
-        flags.OPTIONS.compass_user_email,
-        flags.OPTIONS.compass_user_password
-    )
-    logging.info(
-        'login status: %s, resp: %s',
-        status, resp
-    )
-    if status >= 400:
-        raise Exception(
-            'failed to login %s with user %s',
-            flags.OPTIONS.compass_server,
-            flags.OPTIONS.compass_user_email
+    def is_ok(self, status):
+        if status < 300 and status >= 200:
+            return True
+
+    def login(self):
+        status, resp = self.client.get_token(
+            CONF.compass_user_email,
+            CONF.compass_user_password
         )
-    return resp['token']
 
-
-def _get_machines(client):
-    """get machines connected to the switch."""
-    status, resp = client.list_machines()
-    logging.info(
-        'get all machines status: %s, resp: %s', status, resp)
-    if status >= 400:
-        msg = 'failed to get machines'
-        raise Exception(msg)
-
-    machines_to_add = set([
-        machine for machine in flags.OPTIONS.machines.split(',')
-        if machine
-    ])
-    logging.info('machines to add: %s', list(machines_to_add))
-    machines = {}
-    for machine in resp:
-        mac = machine['mac']
-        if mac in machines_to_add:
-            machines[machine['id']] = mac
-
-    logging.info('found machines: %s', machines.values())
-
-    if set(machines.values()) != machines_to_add:
-        msg = 'machines %s is missing' % (
-            list(machines_to_add - set(machines.values()))
+        LOG.info(
+            'login status: %s, resp: %s',
+            status, resp
         )
-        raise Exception(msg)
-
-    return machines
-
-
-def _poll_switches(client):
-    """get all switches."""
-    status, resp = client.list_switches()
-    logging.info('get all switches status: %s resp: %s', status, resp)
-    if status >= 400:
-        msg = 'failed to get switches'
-        raise Exception(msg)
-
-    all_switches = {}
-    for switch in resp:
-        all_switches[switch['ip']] = switch
-
-    # add a switch.
-    switch_ips = [
-        switch_ip for switch_ip in flags.OPTIONS.switch_ips.split(',')
-        if switch_ip
-    ]
-    if not switch_ips:
-        raise Exception(
-            'there is no switches to poll')
-
-    switch_credential = dict([
-        credential.split('=', 1)
-        for credential in flags.OPTIONS.switch_credential.split(',')
-        if '=' in credential
-    ])
-    for switch_ip in switch_ips:
-        if switch_ip not in all_switches:
-            status, resp = client.add_switch(switch_ip, **switch_credential)
-            logging.info('add switch %s status: %s resp: %s',
-                         switch_ip, status, resp)
-            if status >= 400:
-                msg = 'failed to add switch %s' % switch_ip
-                raise Exception(msg)
-
-            all_switches[switch_ip] = resp
+        if self.is_ok(status):
+            return resp["token"]
         else:
-            logging.info('switch %s is already added', switch_ip)
+            raise Exception(
+                'failed to login %s with user %s',
+                CONF.compass_server,
+                CONF.compass_user_email
+            )
 
-    remain_retries = flags.OPTIONS.switch_max_retries
-    while True:
-        for switch_ip, switch in all_switches.items():
-            status, resp = client.poll_switch(switch['id'])
-            logging.info(
-                'get switch %s status %s: %s',
-                switch_ip, status, resp)
-            if status >= 400:
-                msg = 'failed to update switch %s' % switch_ip
-                raise Exception(msg)
-        remain_retries -= 1
-        time.sleep(flags.OPTIONS.switch_retry_interval)
-        for switch_ip, switch in all_switches.items():
-            switch_id = switch['id']
-            # if the switch is not in under_monitoring, wait for the
-            # poll switch task update the switch information and change
-            # the switch state.
-            logging.info(
-                'waiting for the switch %s into under_monitoring',
-                switch_ip)
-            status, resp = client.get_switch(switch_id)
-            logging.info('get switch %s status: %s, resp: %s',
-                         switch_ip, status, resp)
-            if status >= 400:
-                msg = 'failed to get switch %s' % switch_ip
-                raise Exception(msg)
+    def get_machines(self):
+        status, resp = self.client.list_machines()
+        LOG.info(
+            'get all machines status: %s, resp: %s', status, resp)
+        if not self.is_ok(status):
+            raise RuntimeError('failed to get machines')
 
-            switch = resp
-            all_switches[switch_ip] = switch
+        machines_to_add = list(set([
+            machine for machine in CONF.machines.split(',')
+            if machine
+        ]))
 
-            if switch['state'] == 'notsupported':
-                msg = 'switch %s is not supported', switch_ip
-                raise Exception(msg)
-            elif switch['state'] in ['initialized', 'repolling']:
-                logging.info('switch %s is not updated', switch_ip)
-            elif switch['state'] == 'under_monitoring':
-                logging.info('switch %s is ready', switch_ip)
-        try:
-            return _get_machines(client)
-        except Exception:
-            logging.error('failed to get all machines')
+        LOG.info('machines to add: %s', machines_to_add)
+        machines_db = [str(m["mac"]) for m in resp]
+        LOG.info('machines in db: %s', machines_db)
+        assert(set(machines_db) == set(machines_to_add))
 
-        if remain_retries <= 0:
-            msg = 'max retries reached'
-            raise Exception(msg)
+        return [m["id"] for m in resp]
 
+    def get_adapter(self):
+        """get adapter."""
+        status, resp = self.client.list_adapters(name=CONF.adapter_name)
+        LOG.info(
+            'get all adapters status: %s, resp: %s',
+            status, resp
+        )
 
-def _get_adapter(client):
-    """get adapter."""
-    adapter_name = flags.OPTIONS.adapter_name
-    status, resp = client.list_adapters(
-        name=adapter_name
-    )
-    logging.info(
-        'get all adapters for name %s status: %s, resp: %s',
-        adapter_name, status, resp
-    )
-    if status >= 400:
-        msg = 'failed to get adapters'
-        raise Exception(msg)
+        if not self.is_ok(status) or not resp:
+            raise RuntimeError('failed to get adapters')
 
-    if not resp:
-        msg = 'no adapter found'
-        raise Exception(msg)
+        os_re = re.compile(CONF.adapter_os_pattern)
+        flavor_re = re.compile(CONF.adapter_flavor_pattern)
 
-    adapter = resp[0]
-    os_pattern = flags.OPTIONS.adapter_os_pattern
-    if os_pattern:
-        os_re = re.compile(os_pattern)
-    else:
-        os_re = None
-    flavor_pattern = flags.OPTIONS.adapter_flavor_pattern
-    if flavor_pattern:
-        flavor_re = re.compile(flavor_pattern)
-    else:
-        flavor_re = None
+        adapter_id = None
+        os_id = None
+        flavor_id = None
+        adapter = None
 
-    adapter_id = adapter['id']
-    os_id = None
-    flavor_id = None
-    for supported_os in adapter['supported_oses']:
-        if not os_re or os_re.match(supported_os['name']):
-            os_id = supported_os['os_id']
-            break
-
-    if 'flavors' in adapter:
-        for flavor in adapter['flavors']:
-            if not flavor_re or flavor_re.match(flavor['name']):
-                flavor_id = flavor['id']
+        adapter = resp[0]
+        adapter_id = adapter['id']
+        for supported_os in adapter['supported_oses']:
+            if not os_re or os_re.match(supported_os['name']):
+                os_id = supported_os['os_id']
                 break
 
-    if not os_id:
-        msg = 'no os found for %s' % os_pattern
-        raise Exception(msg)
-
-    if flavor_re and not flavor_id:
-        msg = 'no flavor found for %s' % flavor_pattern
-        raise Exception(msg)
-
-    logging.info('adpater for deploying a cluster: %s', adapter_id)
-    return (adapter_id, os_id, flavor_id)
-
-
-def _add_subnets(client):
-    status, resp = client.list_subnets()
-    logging.info('get all subnets status: %s resp: %s', status, resp)
-    if status >= 400:
-        msg = 'failed to get subnets'
-        raise Exception(msg)
-
-    all_subnets = {}
-    for subnet in resp:
-        all_subnets[subnet['subnet']] = subnet
-
-    subnets = [
-        subnet for subnet in flags.OPTIONS.subnets.split(',')
-        if subnet
-    ]
-    subnet_mapping = {}
-    for subnet in subnets:
-        if subnet not in all_subnets:
-            status, resp = client.add_subnet(subnet)
-            logging.info('add subnet %s status %s response %s',
-                         subnet, status, resp)
-            if status >= 400:
-                msg = 'failed to add subnet %s' % subnet
-                raise Exception(msg)
-            subnet_mapping[resp['subnet']] = resp['id']
-        else:
-            subnet_mapping[subnet] = all_subnets[subnet]['id']
-    if not subnet_mapping:
-        raise Exception(
-            'there is not subnets found'
-        )
-    return subnet_mapping
-
-
-def _add_cluster(client, adapter_id, os_id, flavor_id, machines):
-    """add a cluster."""
-    cluster_name = flags.OPTIONS.cluster_name
-    if not cluster_name:
-        raise Exception(
-            'no cluster name set')
-    status, resp = client.add_cluster(
-        cluster_name, adapter_id,
-        os_id, flavor_id)
-    logging.info('add cluster %s status: %s, resp: %s',
-                 cluster_name, status, resp)
-    if status >= 400:
-        msg = 'failed to add cluster %s with adapter %s os %s flavor %s' % (
-            cluster_name, adapter_id, os_id, flavor_id)
-        raise Exception(msg)
-
-    cluster = resp
-    cluster_id = cluster['id']
-    if 'flavor' in cluster:
-        flavor = cluster['flavor']
-    else:
-        flavor = None
-    if flavor and 'roles' in flavor:
-        roles = flavor['roles']
-    else:
-        roles = []
-    role_mapping = {}
-    for role in roles:
-        if role.get('optional', False):
-            role_mapping[role['name']] = 0
-        else:
-            role_mapping[role['name']] = 1
-    logging.info('cluster %s role mapping: %s', cluster_id, role_mapping)
-
-    hostnames = [
-        hostname for hostname in flags.OPTIONS.hostnames.split(',')
-        if hostname
-    ]
-    if len(machines) != len(hostnames):
-        msg = 'hostname %s length does not match machines mac %s length' % (
-            hostnames, machines)
-        raise Exception(msg)
-
-    machines_dict = []
-    for machine_id, hostname in map(None, machines, hostnames):
-        machines_dict.append({
-            'machine_id': machine_id,
-            'name': hostname
-        })
-    # add hosts to the cluster.
-    status, resp = client.add_hosts_to_cluster(
-        cluster_id,
-        {'machines': machines_dict})
-    logging.info('add machines %s to cluster %s status: %s, resp: %s',
-                 machines_dict, cluster_id, status, resp)
-    if status >= 400:
-        msg = 'failed to add machines %s to cluster %s' % (
-            machines, cluster_name)
-        raise Exception(msg)
-    host_mapping = {}
-    for host in resp['hosts']:
-        host_mapping[host['hostname']] = host['id']
-    logging.info('added hosts in cluster %s: %s', cluster_id, host_mapping)
-    if len(host_mapping) != len(machines):
-        msg = 'machines %s to add to the cluster %s while hosts %s' % (
-            machines, cluster_name, host_mapping)
-        raise Exception(msg)
-    return (cluster_id, host_mapping, role_mapping)
-
-
-def _set_cluster_os_config(client, cluster_id, host_ips):
-    """set cluster os config."""
-    os_config = {}
-    language = flags.OPTIONS.language
-    timezone = flags.OPTIONS.timezone
-    http_proxy = flags.OPTIONS.http_proxy
-    https_proxy = flags.OPTIONS.https_proxy
-    if not https_proxy and http_proxy:
-        https_proxy = http_proxy
-    no_proxy = [
-        no_proxy for no_proxy in flags.OPTIONS.no_proxy.split(',')
-        if no_proxy
-    ]
-    compass_name = socket.gethostname()
-    compass_ip = socket.gethostbyname(compass_name)
-    if http_proxy:
-        for hostname, ips in host_ips.items():
-            no_proxy.append(hostname)
-            no_proxy.extend(ips)
-    ntp_server = flags.OPTIONS.ntp_server
-    if not ntp_server:
-        ntp_server = compass_ip
-    dns_servers = [
-        dns_server for dns_server in flags.OPTIONS.dns_servers.split(',')
-        if dns_server
-    ]
-    if not dns_servers:
-        dns_servers = [compass_ip]
-    domain = flags.OPTIONS.domain
-    if not domain:
-        raise Exception('domain is not defined')
-    search_path = [
-        search_path for search_path in flags.OPTIONS.search_path.split(',')
-        if search_path
-    ]
-    if not search_path:
-        search_path = [domain]
-    default_gateway = flags.OPTIONS.default_gateway
-    if not default_gateway:
-        raise Exception('default gateway is not defined')
-    os_config['general'] = {
-        'language': language,
-        'timezone': timezone,
-        'ntp_server': ntp_server,
-        'dns_servers': dns_servers,
-        'default_gateway': default_gateway
-    }
-    if http_proxy:
-        os_config['general']['http_proxy'] = http_proxy
-    if https_proxy:
-        os_config['general']['https_proxy'] = https_proxy
-    if no_proxy:
-        os_config['general']['no_proxy'] = no_proxy
-    if domain:
-        os_config['general']['domain'] = domain
-    if search_path:
-        os_config['general']['search_path'] = search_path
-    server_credential = flags.OPTIONS.server_credential
-    if '=' in server_credential:
-        server_username, server_password = server_credential.split('=', 1)
-    elif server_credential:
-        server_username = server_credential
-        server_password = server_username
-    else:
-        server_username = 'root'
-        server_password = 'root'
-    os_config['server_credentials'] = {
-        'username': server_username,
-        'password': server_password
-    }
-    partitions = [
-        partition for partition in flags.OPTIONS.partitions.split(',')
-        if partition
-    ]
-    os_config['partition'] = {}
-    for partition in partitions:
-        if '=' not in partition:
-            raise Exception(
-                'there is no = in partition %s' % partition
-            )
-        partition_name, partition_value = partition.split('=', 1)
-        if not partition_name:
-            raise Exception(
-                'there is no partition name in %s' % partition)
-        if not partition_value:
-            raise Exception(
-                'there is no partition value in %s' % partition)
-
-        if partition_value.endswith('%'):
-            partition_type = 'percentage'
-            partition_value = int(partition_value[:-1])
-        else:
-            partition_type = 'size'
-        os_config['partition'][partition_name] = {
-            partition_type: partition_value
-        }
-    local_repo_url = flags.OPTIONS.local_repo_url
-    if local_repo_url:
-        os_config['general']['local_repo'] = local_repo_url
-    os_config_filename = flags.OPTIONS.os_config_json_file
-    if os_config_filename:
-        util.merge_dict(
-            os_config, _load_config(os_config_filename)
-        )
-    status, resp = client.update_cluster_config(
-        cluster_id, os_config=os_config)
-    logging.info(
-        'set os config %s to cluster %s status: %s, resp: %s',
-        os_config, cluster_id, status, resp)
-    if status >= 400:
-        msg = 'failed to set os config %s to cluster %s' % (
-            os_config, cluster_id)
-        raise Exception(msg)
-
-
-def _set_host_networking(client, host_mapping, subnet_mapping):
-    """set cluster hosts networking."""
-    host_ips = {}
-    for host_network in flags.OPTIONS.host_networks.split(';'):
-        hostname, networks_str = host_network.split(':', 1)
-        if hostname not in host_mapping:
-            msg = 'hostname %s does not exist in host mapping %s' % (
-                hostname, host_mapping
-            )
-            raise Exception(msg)
-        host_id = host_mapping[hostname]
-        networks = networks_str.split(',')
-        for network in networks:
-            interface, network_properties_str = network.split('=', 1)
-            network_properties = network_properties_str.split('|')
-            ip_addr = network_properties[0]
-            if not ip_addr:
-                raise Exception(
-                    'ip is not set for host %s interface %s' % (
-                        hostname, interface
-                    )
-                )
-            ip = netaddr.IPAddress(ip_addr)
-            subnet_id = None
-            for subnet_addr, subnetid in subnet_mapping.items():
-                subnet = netaddr.IPNetwork(subnet_addr)
-                if ip in subnet:
-                    subnet_id = subnetid
+        if 'flavors' in adapter:
+            for flavor in adapter['flavors']:
+                if not flavor_re or flavor_re.match(flavor['name']):
+                    flavor_id = flavor['id']
                     break
-            if not subnet_id:
-                msg = 'no subnet found for ip %s' % ip_addr
-                raise Exception(msg)
-            properties = dict([
-                (network_property, True)
-                for network_property in network_properties[1:]
-            ])
-            logging.info(
-                'add host %s interface %s ip %s network proprties %s',
-                hostname, interface, ip_addr, properties)
-            status, response = client.add_host_network(
-                host_id, interface, ip=ip_addr, subnet_id=subnet_id,
-                **properties
-            )
-            logging.info(
-                'add host %s interface %s ip %s network properties %s '
-                'status %s: %s',
-                hostname, interface, ip_addr, properties,
-                status, response
-            )
-            if status >= 400:
-                msg = 'failed to set host %s interface %s network' % (
-                    hostname, interface
-                )
-                raise Exception(msg)
-            host_ips.setdefault(hostname, []).append(ip_addr)
-    return host_ips
 
+        assert(os_id and flavor_id)
+        return (adapter_id, os_id, flavor_id)
 
-def _set_cluster_package_config(client, cluster_id):
-    """set cluster package config."""
-    package_config = {
-    }
-    service_credentials = [
-        service_credential
-        for service_credential in flags.OPTIONS.service_credentials.split(',')
-        if service_credential
-    ]
-    logging.debug(
-        'service credentials: %s', service_credentials
-    )
-    for service_credential in service_credentials:
-        if ':' not in service_credential:
-            raise Exception(
-                'there is no : in service credential %s' % service_credential
+    def add_subnets(self):
+        subnets = [
+            subnet for subnet in CONF.subnets.split(',')
+            if subnet
+        ]
+
+        assert(subnets)
+
+        subnet_mapping = {}
+        for subnet in subnets:
+            try:
+                netaddr.IPNetwork(subnet)
+            except Exception:
+                raise RuntimeError('subnet %s format is invalid' % subnet)
+
+            status, resp = self.client.add_subnet(subnet)
+            LOG.info(
+                'add subnet %s status %s response %s',
+                subnet,
+                status,
+                resp
             )
-        service_name, service_pair = service_credential.split(':', 1)
-        if '=' not in service_pair:
-            raise Exception(
-                'there is no = in service %s security' % service_name
-            )
-        username, password = service_pair.split('=', 1)
-        package_config.setdefault(
-            'security', {}
-        ).setdefault(
-            'service_credentials', {}
-        )[service_name] = {
-            'username': username,
-            'password': password
+            if not self.is_ok(status):
+                raise RuntimeError('failed to add subnet %s' % subnet)
+
+            subnet_mapping[resp['subnet']] = resp['id']
+
+        self.subnet_mapping = subnet_mapping
+
+    def add_cluster(self, adapter_id, os_id, flavor_id):
+        """add a cluster."""
+        cluster_name = CONF.cluster_name
+        assert(cluster_name)
+        status, resp = self.client.add_cluster(
+            cluster_name, adapter_id,
+            os_id, flavor_id)
+
+        if not self.is_ok(status):
+            raise RuntimeError("add cluster failed")
+
+        LOG.info(
+            'add cluster %s status: %s resp:%s',
+            cluster_name,
+            status,
+            resp
+        )
+
+        if isinstance(resp, list):
+            cluster = resp[0]
+        else:
+            cluster = resp
+
+        cluster_id = cluster['id']
+        flavor = cluster.get('flavor', {})
+        roles = flavor.get('roles', [])
+
+        for role in roles:
+            if role.get('optional', False):
+                self.role_mapping[role['name']] = ROLE_ASSIGNED
+            else:
+                self.role_mapping[role['name']] = ROLE_UNASSIGNED
+
+        return cluster_id
+
+    def add_cluster_hosts(self, cluster_id, machines):
+        hostnames = [
+            hostname for hostname in CONF.hostnames.split(',')
+            if hostname
+        ]
+
+        assert(len(machines) == len(hostnames))
+
+        machines_dict = []
+        for machine_id, hostname in zip(machines, hostnames):
+            machines_dict.append({
+                'machine_id': machine_id,
+                'name': hostname
+            })
+
+        # add hosts to the cluster.
+        status, resp = self.client.add_hosts_to_cluster(
+            cluster_id,
+            {'machines': machines_dict})
+
+        LOG.info(
+            'add machines %s to cluster %s status: %s, resp: %s',
+            machines_dict,
+            cluster_id,
+            status,
+            resp
+        )
+
+        if not self.is_ok(status):
+            raise RuntimeError("add host to cluster failed")
+
+        for host in resp['hosts']:
+            self.host_mapping[host['hostname']] = host['id']
+
+        assert(len(self.host_mapping) == len(machines))
+
+    def set_cluster_os_config(self, cluster_id):
+        """set cluster os config."""
+        os_config = {}
+        language = CONF.language
+        timezone = CONF.timezone
+        http_proxy = CONF.http_proxy
+        https_proxy = CONF.https_proxy
+        local_repo_url = CONF.local_repo_url
+        repo_name = CONF.repo_name
+        deploy_type = CONF.deploy_type
+        if not https_proxy and http_proxy:
+            https_proxy = http_proxy
+
+        no_proxy = [
+            no_proxy for no_proxy in CONF.no_proxy.split(',')
+            if no_proxy
+        ]
+
+        compass_server = CONF.compass_server
+        if http_proxy:
+            for hostname, ips in self.host_ips.items():
+                no_proxy.append(hostname)
+                no_proxy.extend(ips)
+
+        ntp_server = CONF.ntp_server or compass_server
+
+        dns_servers = [
+            dns_server for dns_server in CONF.dns_servers.split(',')
+            if dns_server
+        ]
+        if not dns_servers:
+            dns_servers = [compass_server]
+
+        domain = CONF.domain
+        if not domain:
+            raise Exception('domain is not defined')
+
+        search_path = [
+            search_path for search_path in CONF.search_path.split(',')
+            if search_path
+        ]
+
+        if not search_path:
+            search_path = [domain]
+
+        default_gateway = CONF.default_gateway
+        if not default_gateway:
+            raise Exception('default gateway is not defined')
+
+        general_config = {
+            'language': language,
+            'timezone': timezone,
+            'ntp_server': ntp_server,
+            'dns_servers': dns_servers,
+            'default_gateway': default_gateway
         }
-    console_credentials = [
-        console_credential
-        for console_credential in flags.OPTIONS.console_credentials.split(',')
-        if console_credential
-    ]
-    logging.debug(
-        'console credentials: %s', console_credentials
-    )
-    for console_credential in console_credentials:
-        if ':' not in console_credential:
-            raise Exception(
-                'there is no : in console credential %s' % console_credential
-            )
-        console_name, console_pair = console_credential.split(':', 1)
-        if '=' not in console_pair:
-            raise Exception(
-                'there is no = in console %s security' % console_name
-            )
-        username, password = console_pair.split('=', 1)
-        package_config.setdefault(
-            'security', {}
-        ).setdefault(
-            'console_credentials', {}
-        )[console_name] = {
-            'username': username,
-            'password': password
+
+        if http_proxy:
+            general_config['http_proxy'] = http_proxy
+        if https_proxy:
+            general_config['https_proxy'] = https_proxy
+        if no_proxy:
+            general_config['no_proxy'] = no_proxy
+        if domain:
+            general_config['domain'] = domain
+        if search_path:
+            general_config['search_path'] = search_path
+        if local_repo_url:
+            general_config['local_repo'] = local_repo_url
+        if repo_name:
+            general_config['repo_name'] = repo_name
+        if deploy_type:
+            general_config['deploy_type'] = deploy_type
+
+        os_config["general"] = general_config
+
+        server_credential = CONF.server_credential
+        if '=' in server_credential:
+            server_username, server_password = server_credential.split('=', 1)
+        elif server_credential:
+            server_username = server_password = server_credential
+        else:
+            server_username = 'root'
+            server_password = 'root'
+
+        os_config['server_credentials'] = {
+            'username': server_username,
+            'password': server_password
         }
-    network_mapping = dict([
-        network_pair.split('=', 1)
-        for network_pair in flags.OPTIONS.network_mapping.split(',')
-        if '=' in network_pair
-    ])
-    for network_type, network in network_mapping.items():
-        package_config.setdefault(
-            'network_mapping', {}
-        )[network_type] = network
-    package_config_filename = flags.OPTIONS.package_config_json_file
-    if package_config_filename:
-        util.merge_dict(
-            package_config, _load_config(package_config_filename)
-        )
-    status, resp = client.update_cluster_config(
-        cluster_id, package_config=package_config)
-    logging.info(
-        'set package config %s to cluster %s status: %s, resp: %s',
-        package_config, cluster_id, status, resp)
-    if status >= 400:
-        msg = 'failed to set package config %s to cluster %s' % (
-            package_config, cluster_id)
-        raise Exception(msg)
 
+        partitions = [
+            partition for partition in CONF.partitions.split(',')
+            if partition
+        ]
 
-def _set_host_roles(client, cluster_id, host_id, roles, role_mapping):
-    status, response = client.update_cluster_host(
-        cluster_id, host_id, roles=roles)
-    logging.info(
-        'set cluster %s host %s roles %s status %s: %s',
-        cluster_id, host_id, roles, status, response
-    )
-    if status >= 400:
-        raise Exception(
-            'failed to set cluster %s host %s roles %s' % (
-                cluster_id, host_id, roles
+        partition_config = {}
+        for partition in partitions:
+            assert("=" in partition)
+
+            partition_name, partition_value = partition.split('=', 1)
+            partition_name = partition_name.strip()
+            partition_value = partition_value.strip()
+
+            assert(partition_name and partition_value)
+
+            if partition_value.endswith('%'):
+                partition_type = 'percentage'
+                partition_value = int(partition_value[:-1])
+            else:
+                partition_type = 'size'
+
+            partition_config[partition_name] = {
+                partition_type: partition_value
+            }
+
+        os_config['partition'] = partition_config
+
+        """
+        os_config_filename = CONF.os_config_json_file
+        if os_config_filename:
+            util.merge_dict(
+                os_config, _load_config(os_config_filename)
             )
-        )
-    for role in roles:
-        if role in role_mapping and role_mapping[role] > 0:
-            role_mapping[role] -= 1
+        """
 
+        status, resp = self.client.update_cluster_config(
+            cluster_id, os_config=os_config)
+        LOG.info(
+            'set os config %s to cluster %s status: %s, resp: %s',
+            os_config, cluster_id, status, resp)
+        if not self.is_ok(status):
+            raise RuntimeError('failed to set os config %s to cluster %s'
+                               % (os_config, cluster_id))
 
-def _set_hosts_roles(client, cluster_id, host_mapping, role_mapping):
-    host_roles = {}
-    for host_str in flags.OPTIONS.host_roles.split(';'):
-        if not host_str:
-            continue
-        hostname, roles_str = host_str.split('=', 1)
-        if hostname not in host_mapping:
-            raise Exception(
-                'hostname %s not found in host mapping %s' % (
-                    hostname, host_mapping
+    def set_host_networking(self):
+        """set cluster hosts networking."""
+        def get_subnet(ip_str):
+            try:
+                LOG.info("subnets: %s" % self.subnet_mapping.keys())
+                ip = netaddr.IPAddress(ip_str)
+                for cidr, subnet_id in self.subnet_mapping.items():
+                    subnet = netaddr.IPNetwork(cidr)
+                    if ip in subnet:
+                        return True, subnet_id
+
+                    LOG.info("ip %s not in %s" % (ip_str, cidr))
+                return False, None
+            except Exception:
+                LOG.exception("ip addr %s is invalid" % ip_str)
+                return False, None
+
+        for host_network in CONF.host_networks.split(';'):
+            hostname, networks_str = host_network.split(':', 1)
+            hostname = hostname.strip()
+            networks_str = networks_str.strip()
+
+            assert(hostname in self.host_mapping)
+
+            host_id = self.host_mapping[hostname]
+            intf_list = networks_str.split(',')
+            for intf_str in intf_list:
+                interface, intf_properties = intf_str.split('=', 1)
+                intf_properties = intf_properties.strip().split('|')
+
+                assert(intf_properties)
+                ip_str = intf_properties[0]
+
+                status, subnet_id = get_subnet(ip_str)
+                if not status:
+                    raise RuntimeError("ip addr %s is invalid" % ip_str)
+
+                properties = dict([
+                    (intf_property, True)
+                    for intf_property in intf_properties[1:]
+                ])
+
+                LOG.info(
+                    'add host %s interface %s ip %s network proprties %s',
+                    hostname, interface, ip_str, properties)
+
+                status, response = self.client.add_host_network(
+                    host_id, interface, ip=ip_str, subnet_id=subnet_id,
+                    **properties
                 )
+
+                LOG.info(
+                    'add host %s interface %s ip %s network properties %s '
+                    'status %s: %s',
+                    hostname, interface, ip_str, properties,
+                    status, response
+                )
+
+                if not self.is_ok(status):
+                    raise RuntimeError("add host network failed")
+
+                self.host_ips[hostname].append(ip_str)
+
+    def set_cluster_package_config(self, cluster_id):
+        """set cluster package config."""
+        package_config = {"security": {}}
+
+        service_credentials = [
+            service_credential
+            for service_credential in CONF.service_credentials.split(',')
+            if service_credential
+        ]
+
+        service_credential_cfg = {}
+        LOG.info(
+            'service credentials: %s', service_credentials
+        )
+
+        for service_credential in service_credentials:
+            if ':' not in service_credential:
+                raise Exception(
+                    'no : in service credential %s' % service_credential
+                )
+            service_name, service_pair = service_credential.split(':', 1)
+            if '=' not in service_pair:
+                raise Exception(
+                    'there is no = in service %s security' % service_name
+                )
+
+            username, password = service_pair.split('=', 1)
+            service_credential_cfg[service_name] = {
+                'username': username,
+                'password': password
+            }
+
+        console_credentials = [
+            console_credential
+            for console_credential in CONF.console_credentials.split(',')
+            if console_credential
+        ]
+
+        LOG.info(
+            'console credentials: %s', console_credentials
+        )
+
+        console_credential_cfg = {}
+        for console_credential in console_credentials:
+            if ':' not in console_credential:
+                raise Exception(
+                    'there is no : in console credential %s'
+                    % console_credential
+                )
+            console_name, console_pair = console_credential.split(':', 1)
+            if '=' not in console_pair:
+                raise Exception(
+                    'there is no = in console %s security' % console_name
+                )
+            username, password = console_pair.split('=', 1)
+            console_credential_cfg[console_name] = {
+                'username': username,
+                'password': password
+            }
+
+        package_config["security"] = {
+            "service_credentials": service_credential_cfg,
+            "console_credentials": console_credential_cfg
+        }
+
+        network_mapping = dict([
+            network_pair.split('=', 1)
+            for network_pair in CONF.network_mapping.split(',')
+            if '=' in network_pair
+        ])
+
+        package_config['network_mapping'] = network_mapping
+
+        assert(os.path.exists(CONF.network_cfg))
+        network_cfg = yaml.load(open(CONF.network_cfg))
+        package_config["network_cfg"] = network_cfg
+
+        assert(os.path.exists(CONF.neutron_cfg))
+        neutron_cfg = yaml.load(open(CONF.neutron_cfg))
+        package_config["neutron_config"] = neutron_cfg
+
+        """
+        package_config_filename = CONF.package_config_json_file
+        if package_config_filename:
+            util.merge_dict(
+                package_config, _load_config(package_config_filename)
             )
-        host_id = host_mapping[hostname]
-        roles = [role for role in roles_str.split(',') if role]
-        _set_host_roles(client, cluster_id, host_id, roles, role_mapping)
-        host_roles[hostname] = roles
+        """
+        package_config['ha_proxy'] = {}
+        if CONF.cluster_vip:
+            package_config["ha_proxy"]["vip"] = CONF.cluster_vip
 
-    # assign unassigned roles to unassigned hosts
-    unassigned_hostnames = []
-    for hostname, _ in host_mapping.items():
-        if hostname not in host_roles:
-            unassigned_hostnames.append(hostname)
-    unassigned_roles = []
-    for role, count in role_mapping.items():
-        if count > 0:
-            unassigned_roles.append(role)
-    if len(unassigned_hostnames) < len(unassigned_roles):
-        raise Exception(
-            'there is no enough hosts %s to assign roles %s' % (
-                unassigned_hostnames, unassigned_roles
+        package_config['enable_secgroup'] = (CONF.enable_secgroup == "true")
+
+        status, resp = self.client.update_cluster_config(
+            cluster_id, package_config=package_config)
+        LOG.info(
+            'set package config %s to cluster %s status: %s, resp: %s',
+            package_config, cluster_id, status, resp)
+
+        if not self.is_ok(status):
+            raise RuntimeError("set cluster package_config failed")
+
+    def set_host_roles(self, cluster_id, host_id, roles):
+        status, response = self.client.update_cluster_host(
+            cluster_id, host_id, roles=roles)
+
+        LOG.info(
+            'set cluster %s host %s roles %s status %s: %s',
+            cluster_id, host_id, roles, status, response
+        )
+
+        if not self.is_ok(status):
+            raise RuntimeError("set host roles failed")
+
+        for role in roles:
+            if role in self.role_mapping:
+                self.role_mapping[role] = ROLE_ASSIGNED
+
+    def set_all_hosts_roles(self, cluster_id):
+        for host_str in CONF.host_roles.split(';'):
+            host_str = host_str.strip()
+            hostname, roles_str = host_str.split('=', 1)
+
+            assert(hostname in self.host_mapping)
+            host_id = self.host_mapping[hostname]
+
+            roles = [role.strip() for role in roles_str.split(',') if role]
+
+            self.set_host_roles(cluster_id, host_id, roles)
+            self.host_roles[hostname] = roles
+
+        unassigned_hostnames = list(set(self.host_mapping.keys())
+                                    - set(self.host_roles.keys()))
+
+        unassigned_roles = [role for role, status in self.role_mapping.items()
+                            if is_role_unassigned(status)]
+
+        assert(len(unassigned_hostnames) >= len(unassigned_roles))
+
+        for hostname, role in map(
+            None,
+            unassigned_hostnames,
+            unassigned_roles
+        ):
+            host_id = self.host_mapping[hostname]
+            self.set_host_roles(cluster_id, host_id, [role])
+            self.host_roles[hostname] = [role]
+
+        unassigned_hostnames = list(set(self.host_mapping.keys())
+                                    - set(self.host_roles.keys()))
+
+        if not unassigned_hostnames:
+            return
+
+        # assign default roles to unassigned hosts
+        default_roles = [
+            role for role in CONF.default_roles.split(',')
+            if role
+        ]
+
+        assert(default_roles)
+
+        cycle_roles = itertools.cycle(default_roles)
+        for hostname in unassigned_hostnames:
+            host_id = self.host_mapping[hostname]
+            roles = [cycle_roles.next()]
+            self.set_host_roles(cluster_id, host_id, roles)
+            self.host_roles[hostname] = roles
+
+    def deploy_clusters(self, cluster_id):
+        host_ids = self.host_mapping.values()
+
+        status, response = self.client.review_cluster(
+            cluster_id, review={'hosts': host_ids}
+        )
+        LOG.info(
+            'review cluster %s hosts %s, status %s: %s',
+            cluster_id, host_ids, status, response
+        )
+
+        # TODO('what this doning?')
+        if not self.is_ok(status):
+            raise RuntimeError("review cluster host failed")
+
+        status, response = self.client.deploy_cluster(
+            cluster_id, deploy={'hosts': host_ids}
+        )
+        LOG.info(
+            'deploy cluster %s hosts %s status %s: %s',
+            cluster_id, host_ids, status, response
+        )
+
+        if not self.is_ok(status):
+            raise RuntimeError("deploy cluster failed")
+
+    def get_installing_progress(self, cluster_id):
+        """get intalling progress."""
+        action_timeout = time.time() + 60 * float(CONF.action_timeout)
+        deployment_timeout = time.time() + 60 * float(
+            CONF.deployment_timeout)
+
+        current_time = time.time()
+        deployment_failed = True
+        while current_time < deployment_timeout:
+            status, cluster_state = self.client.get_cluster_state(cluster_id)
+            LOG.info(
+                'get cluster %s state status %s: %s',
+                cluster_id, status, cluster_state
             )
-        )
-    for offset, role in enumerate(unassigned_roles):
-        hostname = unassigned_hostnames[offset]
-        host_id = host_mapping[hostname]
-        roles = [role]
-        _set_host_roles(client, cluster_id, host_id, roles, role_mapping)
-        host_roles[hostname] = roles
-    unassigned_hostnames = unassigned_hostnames[len(unassigned_roles):]
-    unassigned_roles = []
+            if not self.is_ok(status):
+                raise RuntimeError("can not get cluster state")
 
-    # assign default roles to unassigned hosts
-    default_roles = [
-        role for role in flags.OPTIONS.default_roles.split(',')
-        if role
-    ]
-    if not default_roles and unassigned_hostnames:
-        raise Exception(
-            'hosts %s do not have roles set' % unassigned_hostnames
-        )
-    for hostname in unassigned_hostnames:
-        host_id = host_mapping[hostname]
-        roles = [default_roles[0]]
-        _set_host_roles(client, cluster_id, host_id, roles, role_mapping)
-        host_roles[hostname] = roles
-        default_roles = default_roles[1:]
-        default_roles.extend(roles)
+            if cluster_state['state'] in ['UNINITIALIZED', 'INITIALIZED']:
+                if current_time >= action_timeout:
+                    deployment_failed = True
+                    break
+                else:
+                    continue
 
-    return host_roles
-
-
-def _deploy_clusters(client, cluster_id, host_mapping):
-    """deploy cluster."""
-    host_ids = [host_id for _, host_id in host_mapping.items()]
-    status, response = client.review_cluster(
-        cluster_id, review={'hosts': host_ids}
-    )
-    logging.info(
-        'review cluster %s hosts %s, status %s: %s',
-        cluster_id, host_ids, status, response
-    )
-    if status >= 400:
-        raise Exception(
-            'review cluster %s fails' % cluster_id
-        )
-    status, response = client.deploy_cluster(
-        cluster_id, deploy={'hosts': host_ids}
-    )
-    logging.info(
-        'deploy cluster %s hosts %s status %s: %s',
-        cluster_id, host_ids, status, response
-    )
-    if status >= 400:
-        raise Exception(
-            'deploy cluster %s fails' % cluster_id
-        )
-
-
-def _get_installing_progress(client, cluster_id, host_mapping):
-    """get intalling progress."""
-    action_timeout = time.time() + 60 * float(flags.OPTIONS.action_timeout)
-    deployment_timeout = time.time() + 60 * float(
-        flags.OPTIONS.deployment_timeout)
-    cluster_installed = False
-    cluster_failed = False
-    hosts_installed = {}
-    hosts_failed = {}
-    install_finished = False
-    deployment_failed = False
-    current_time = time.time()
-    while current_time < deployment_timeout:
-        status, cluster_state = client.get_cluster_state(cluster_id)
-        logging.info(
-            'get cluster %s state status %s: %s',
-            cluster_id, status, cluster_state
-        )
-        if status >= 400:
-            raise Exception(
-                'failed to acquire cluster %s state' % cluster_id
-            )
-        if cluster_state['state'] in ['UNINITIALIZED', 'INITIALIZED']:
-            if current_time >= action_timeout:
+            elif cluster_state['state'] == 'SUCCESSFUL':
+                deployment_failed = False
+                break
+            elif cluster_state['state'] == 'ERROR':
                 deployment_failed = True
                 break
-            else:
-                continue
-        if cluster_state['state'] == 'SUCCESSFUL':
-            cluster_installed = True
-        if cluster_state['state'] == 'ERROR':
-            cluster_failed = True
-        for hostname, host_id in host_mapping.items():
-            status, host_state = client.get_cluster_host_state(
-                cluster_id, host_id
-            )
-            logging.info(
-                'get cluster %s host %s state status %s: %s',
-                cluster_id, host_id, status, host_state
-            )
-            if status >= 400:
-                raise Exception(
-                    'failed to acquire cluster %s host %s state' % (
-                        cluster_id, host_id
-                    )
-                )
-            if host_state['state'] in ['UNINITIALIZED', 'INITIALIZED']:
-                raise Exception(
-                    'unintended status for host %s: %s' % (
-                        hostname, host_state
-                    )
-                )
-            if host_state['state'] == 'SUCCESSFUL':
-                hosts_installed[host_id] = True
-            else:
-                hosts_installed[host_id] = False
-            if host_state['state'] == 'ERROR':
-                hosts_failed[host_id] = True
-            else:
-                hosts_failed[host_id] = False
 
-        cluster_finished = cluster_installed or cluster_failed
-        hosts_finished = {}
-        for _, host_id in host_mapping.items():
-            hosts_finished[host_id] = (
-                hosts_installed.get(host_id, False) or
-                hosts_failed.get(host_id, False)
-            )
-        if cluster_finished:
-            if not all(hosts_finished.values()):
-                raise Exception(
-                    'some host are not finished: %s' % hosts_finished
-                )
-            logging.info('all clusters/hosts are installed.')
-            install_finished = True
-            break
+        if deployment_failed:
+            raise RuntimeError("deploy cluster failed")
+
+    def check_dashboard_links(self, cluster_id):
+        dashboard_url = CONF.dashboard_url
+        if not dashboard_url:
+            LOG.info('no dashboarde url set')
+            return
+        dashboard_link_pattern = re.compile(
+            CONF.dashboard_link_pattern)
+        r = requests.get(dashboard_url, verify=False)
+        r.raise_for_status()
+        match = dashboard_link_pattern.search(r.text)
+        if match:
+            LOG.info(
+                'dashboard login page for cluster %s can be downloaded',
+                cluster_id)
         else:
-            logging.info(
-                'there are some clusters/hosts in installing.'
-                'sleep %s seconds and retry',
-                flags.OPTIONS.progress_update_check_interval)
-            time.sleep(float(flags.OPTIONS.progress_update_check_interval))
-        current_time = time.time()
-
-    if deployment_failed:
-        raise Exception(
-            'cluster %s deployment action fails: %s' % cluster_id
-        )
-    if not install_finished:
-        raise Exception(
-            'cluster %s installation not finished: '
-            'installed %s, failed: %s' % (
-                cluster_id, hosts_installed, hosts_failed
-            )
-        )
-    if cluster_failed or any(hosts_failed.values()):
-        msg = 'cluster %s hosts %s is not all finished. failed hosts %s' % (
-            cluster_id, host_mapping.values(), hosts_failed.keys()
-        )
-        raise Exception(msg)
-
-
-def _check_dashboard_links(client, cluster_id):
-    dashboard_url = flags.OPTIONS.dashboard_url
-    if not dashboard_url:
-        logging.info('no dashboarde url set')
-        return
-    dashboard_link_pattern = re.compile(
-        flags.OPTIONS.dashboard_link_pattern)
-    r = requests.get(dashboard_url, verify=False)
-    r.raise_for_status()
-    match = dashboard_link_pattern.search(r.text)
-    if match:
-        logging.info(
-            'dashboard login page for cluster %s can be downloaded',
-            cluster_id)
-    else:
-        msg = (
-            '%s failed to be downloaded\n'
-            'the context is:\n%s\n'
-        ) % (dashboard_url, r.text)
-        raise Exception(msg)
+            msg = (
+                '%s failed to be downloaded\n'
+                'the context is:\n%s\n'
+            ) % (dashboard_url, r.text)
+            raise Exception(msg)
 
 
 def main():
-    flags.init()
-    logsetting.init()
-    client = _get_client()
-    _login(client)
-    if flags.OPTIONS.poll_switches:
-        machines = _poll_switches(client)
-    else:
-        machines = _get_machines(client)
-    logging.info('machines are %s', machines)
-    subnet_mapping = _add_subnets(client)
-    adapter_id, os_id, flavor_id = _get_adapter(client)
-    cluster_id, host_mapping, role_mapping = _add_cluster(
-        client, adapter_id, os_id, flavor_id, machines)
-    host_ips = _set_host_networking(
-        client, host_mapping, subnet_mapping
-    )
-    _set_cluster_os_config(client, cluster_id, host_ips)
-    if flavor_id:
-        _set_cluster_package_config(client, cluster_id)
-    if role_mapping:
-        _set_hosts_roles(client, cluster_id, host_mapping, role_mapping)
-    _deploy_clusters(client, cluster_id, host_mapping)
-    _get_installing_progress(client, cluster_id, host_mapping)
-    _check_dashboard_links(client, cluster_id)
+    client = CompassClient()
+    machines = client.get_machines()
 
+    LOG.info('machines are %s', machines)
+
+    client.add_subnets()
+    adapter_id, os_id, flavor_id = client.get_adapter()
+    cluster_id = client.add_cluster(adapter_id, os_id, flavor_id)
+
+    client.add_cluster_hosts(cluster_id, machines)
+    client.set_host_networking()
+    client.set_cluster_os_config(cluster_id)
+
+    if flavor_id:
+        client.set_cluster_package_config(cluster_id)
+
+    client.set_all_hosts_roles(cluster_id)
+    client.deploy_clusters(cluster_id)
+
+    client.get_installing_progress(cluster_id)
+    client.check_dashboard_links(cluster_id)
 
 if __name__ == "__main__":
+    CONF(args=sys.argv[1:])
     main()
diff --git a/bin/refresh.sh b/bin/refresh.sh
index ddad90f0..d9f12814 100755
--- a/bin/refresh.sh
+++ b/bin/refresh.sh
@@ -7,25 +7,16 @@ service mysqld status || exit $?
 /opt/compass/bin/clean_installation_logs.py
 rm -rf /var/ansible/run/*
 service httpd restart
-sleep 10
 service httpd status || exit $?
 service rsyslog restart
-sleep 10
 service rsyslog status || exit $?
 service redis restart
-sleep 10
 service redis status || exit $?
 redis-cli flushall
 service cobblerd restart
-sleep 10
 service cobblerd status || exit $?
-chef-server-ctl restart
-sleep 10
-chef-server-ctl status || exit $?
 service compass-celeryd restart
-sleep 10
 service compass-celeryd status || exit $?
 service compass-progress-updated restart
-sleep 10
 service compass-progress-updated status || exit $?
 
diff --git a/compass/actions/install_callback.py b/compass/actions/install_callback.py
index dcf278ac..aae955a6 100644
--- a/compass/actions/install_callback.py
+++ b/compass/actions/install_callback.py
@@ -71,38 +71,20 @@ def os_installed(
 
             deploy_manager = DeployManager(
                 adapter_info, cluster_info, hosts_info)
+
             if not os_installed_triggered:
                 deploy_manager.os_installed()
                 util.ActionHelper.host_ready(host_id, True, user)
                 os_installed_triggered = True
 
             if clusterhost_ready:
-                deploy_manager.cluster_os_installed()
+                # deploy_manager.cluster_os_installed()
                 util.ActionHelper.cluster_host_ready(
                     cluster_id, host_id, False, user
                 )
 
-        for cluster_id, cluster_os_ready in clusters_os_ready.items():
-            if not cluster_os_ready and os_installed_triggered:
-                continue
-
-            cluster_info = util.ActionHelper.get_cluster_info(
-                cluster_id, user)
-            adapter_id = cluster_info[const.ADAPTER_ID]
-
-            adapter_info = util.ActionHelper.get_adapter_info(
-                adapter_id, cluster_id, user)
-            hosts_info = util.ActionHelper.get_hosts_info(
-                cluster_id, [host_id], user)
-
-            deploy_manager = DeployManager(
-                adapter_info, cluster_info, hosts_info)
-            if not os_installed_triggered:
-                deploy_manager.os_installed()
-                util.ActionHelper.host_ready(host_id, True, user)
-                os_installed_triggered = True
-
-            if cluster_os_ready:
+            if util.ActionHelper.is_cluster_os_ready(cluster_id, user):
+                logging.info("deploy_manager begin cluster_os_installed")
                 deploy_manager.cluster_os_installed()
 
 
diff --git a/compass/actions/util.py b/compass/actions/util.py
index 4665c297..4d9f855d 100644
--- a/compass/actions/util.py
+++ b/compass/actions/util.py
@@ -102,8 +102,8 @@ class ActionHelper(object):
               ...
            }
            To view a complete output, please refer to backend doc.
-
         """
+
         adapter_info = adapter_db.get_adapter(adapter_id, user=user)
         metadata = cluster_db.get_cluster_metadata(cluster_id, user=user)
         adapter_info.update({const.METADATA: metadata})
@@ -139,6 +139,7 @@ class ActionHelper(object):
                "owner": "xxx"
            }
         """
+
         cluster_info = cluster_db.get_cluster(cluster_id, user=user)
 
         # convert roles retrieved from db into a list of role names
@@ -161,34 +162,35 @@ class ActionHelper(object):
     def get_hosts_info(cluster_id, hosts_id_list, user):
         """Get hosts information. Return a dictionary as below,
 
-        {
-            "hosts": {
-                1($host_id): {
-                     "reinstall_os": True,
-                     "mac": "xxx",
-                     "name": "xxx",
-                     "roles": [xxx, yyy]
-                     },
-                     "networks": {
-                         "eth0": {
-                             "ip": "192.168.1.1",
-                             "netmask": "255.255.255.0",
-                             "is_mgmt": True,
-                             "is_promiscuous": False,
-                             "subnet": "192.168.1.0/24"
-                         },
-                         "eth1": {...}
-                     },
-                     "os_config": {},
-                     "package_config": {},
-                     "deployed_os_config": {},
-                     "deployed_package_config": {}
-                },
-                2: {...},
-                ....
-            }
-        }
+           {
+               "hosts": {
+                   1($host_id): {
+                        "reinstall_os": True,
+                        "mac": "xxx",
+                        "name": "xxx",
+                        "roles": [xxx, yyy]
+                        },
+                        "networks": {
+                            "eth0": {
+                                "ip": "192.168.1.1",
+                                "netmask": "255.255.255.0",
+                                "is_mgmt": True,
+                                "is_promiscuous": False,
+                                "subnet": "192.168.1.0/24"
+                            },
+                            "eth1": {...}
+                        },
+                        "os_config": {},
+                        "package_config": {},
+                        "deployed_os_config": {},
+                        "deployed_package_config": {}
+                   },
+                   2: {...},
+                   ....
+               }
+           }
         """
+
         hosts_info = {}
         for host_id in hosts_id_list:
             info = cluster_db.get_cluster_host(cluster_id, host_id, user=user)
@@ -322,6 +324,10 @@ class ActionHelper(object):
             user=user, ready=True
         )
 
+    @staticmethod
+    def is_cluster_os_ready(cluster_id, user=None):
+        return cluster_db.is_cluster_os_ready(cluster_id, user=user)
+
     @staticmethod
     def cluster_ready(cluster_id, from_database_only, user):
         """Trigger cluster ready."""
diff --git a/compass/db/api/cluster.py b/compass/db/api/cluster.py
index fe21735b..9d8472de 100644
--- a/compass/db/api/cluster.py
+++ b/compass/db/api/cluster.py
@@ -231,6 +231,23 @@ def get_cluster(
     )
 
 
+@database.run_in_session()
+@user_api.check_user_permission(
+    permission.PERMISSION_LIST_CLUSTERS)
+def is_cluster_os_ready(
+    cluster_id, exception_when_missing=True,
+    user=None, session=None, **kwargs
+):
+    cluster = utils.get_db_object(
+        session, models.Cluster, exception_when_missing, id=cluster_id)
+
+    all_states = ([i.host.state.ready for i in cluster.clusterhosts])
+
+    logging.info("is_cluster_os_ready: all_states %s" % all_states)
+
+    return all(all_states)
+
+
 def check_cluster_validated(cluster):
     """Check cluster is validated."""
     if not cluster.config_validated:
@@ -518,6 +535,7 @@ def get_cluster_metadata(cluster_id, user=None, session=None, **kwargs):
                 user=user, session=session
             )
         )
+
     return metadatas
 
 
@@ -2003,7 +2021,7 @@ def update_cluster_host_state_internal(
     )
     return _update_clusterhost_state(
         clusterhost, from_database_only=from_database_only,
-        session=None, **kwargs
+        session=session, **kwargs
     )
 
 
diff --git a/compass/deployment/installers/config_manager.py b/compass/deployment/installers/config_manager.py
index 2c3d1dcf..436406b0 100644
--- a/compass/deployment/installers/config_manager.py
+++ b/compass/deployment/installers/config_manager.py
@@ -12,345 +12,107 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-__author__ = "Grace Yu (grace.yu@huawei.com)"
+__author__ = "baigk baiguoku@huawei.com)"
 
-
-"""Module to manage and access cluster, hosts and adapter config.
-"""
+from collections import defaultdict
 from copy import deepcopy
+import json
 import logging
-
+import netaddr
 
 from compass.deployment.utils import constants as const
 
+ip_generator_map = {}
 
-class BaseConfigManager(object):
 
-    def __init__(self, adapter_info, cluster_info, hosts_info):
+def get_ip_addr(ip_ranges):
+    def _get_ip_addr():
+        for ip_range in ip_ranges:
+            for ip in netaddr.iter_iprange(*ip_range):
+                yield str(ip)
+
+    s = json.dumps(ip_ranges)
+    if s not in ip_generator_map:
+        ip_generator_map[s] = _get_ip_addr()
+        return ip_generator_map[s]
+    else:
+        return ip_generator_map[s]
+
+
+class AdapterInfo(object):
+    def __init__(self, adapter_info):
         self.adapter_info = adapter_info
+        self.name = self.adapter_info.get(const.NAME)
+        self.dist_system_name = self.name
+        self.health_check_cmd = self.adapter_info.get(const.HEALTH_CHECK_CMD)
+
+        self.os_installer = self.adapter_info.setdefault(
+            const.OS_INSTALLER, {}
+        )
+        self.os_installer.setdefault(const.INSTALLER_SETTINGS, {})
+
+        self.package_installer = self.adapter_info.setdefault(
+            const.PK_INSTALLER, {}
+        )
+        self.package_installer.setdefault(const.INSTALLER_SETTINGS, {})
+
+        self.metadata = self.adapter_info.setdefault(const.METADATA, {})
+        self.os_metadata = self.metadata.setdefault(const.OS_CONFIG, {})
+        self.package_metadata = self.metadata.setdefault(const.PK_CONFIG, {})
+
+        self.flavors = dict([(f[const.FLAVOR_NAME], f)
+                            for f in self.adapter_info.get(const.FLAVOR, [])])
+
+    @property
+    def flavor_list(self):
+        return self.flavors.values()
+
+    def get_flavor(self, flavor_name):
+        return self.flavors.get(flavor_name)
+
+
+class ClusterInfo(object):
+    def __init__(self, cluster_info):
         self.cluster_info = cluster_info
-        self.hosts_info = hosts_info
-
-    def get_cluster_id(self):
-        return self.__get_cluster_item(const.ID)
-
-    def get_clustername(self):
-        return self.__get_cluster_item(const.NAME)
-
-    def get_os_version(self):
-        return self.__get_cluster_item(const.OS_VERSION)
-
-    def get_cluster_baseinfo(self):
-        """Get cluster base information.
-
-        Including cluster_id, os_version and cluster_name.
-        """
-        attr_names = [const.ID, const.NAME, const.OS_VERSION]
-
-        base_info = {}
-        for name in attr_names:
-            base_info[name] = self.__get_cluster_item(name)
-
-        return base_info
-
-    def get_host_id_list(self):
-        if not self.hosts_info:
-            logging.info("hosts config is None or {}")
-            return []
-
-        return self.hosts_info.keys()
-
-    def get_hosts_id_list_for_os_installation(self):
-        """Get info of hosts which need to install/reinstall OS."""
-        result = []
-        all_host_ids = self.get_host_id_list()
-        for host_id in all_host_ids:
-            if self.hosts_info[host_id][const.REINSTALL_OS_FLAG]:
-                result.append(host_id)
-        return result
-
-    def get_cluster_flavor_info(self):
-        return self.__get_cluster_item(const.FLAVOR, {})
-
-    def get_cluster_flavor_name(self):
-        flavor_info = self.get_cluster_flavor_info()
-        return flavor_info.setdefault(const.FLAVOR_NAME, None)
-
-    def get_cluster_flavor_roles(self):
-        flavor_info = self.get_cluster_flavor_info()
-        return flavor_info.setdefault(const.ROLES, [])
-
-    def get_cluster_flavor_template(self):
-        flavor_info = self.get_cluster_flavor_info()
-        return flavor_info.setdefault(const.TMPL, None)
-
-    def get_cluster_os_config(self):
-        return deepcopy(self.__get_cluster_item(const.OS_CONFIG, {}))
-
-    def get_server_credentials(self):
-        cluster_os_config = self.get_cluster_os_config()
-        if not cluster_os_config:
-            logging.info("cluster os_config is None!")
-            return ()
-
-        username = cluster_os_config[const.SERVER_CREDS][const.USERNAME]
-        password = cluster_os_config[const.SERVER_CREDS][const.PASSWORD]
-        return (username, password)
-
-    def get_cluster_package_config(self):
-        return deepcopy(self.__get_cluster_item(const.PK_CONFIG, {}))
-
-    def get_cluster_network_mapping(self):
-        package_config = self.get_cluster_package_config()
-        if not package_config:
-            logging.info("cluster package_config is None or {}.")
-            return {}
-
-        mapping = package_config.setdefault(const.NETWORK_MAPPING, {})
-        logging.info("Network mapping in the config is '%s'!", mapping)
-
-        return mapping
-
-    def get_cluster_deployed_os_config(self):
-        return deepcopy(self.__get_cluster_item(const.DEPLOYED_OS_CONFIG, {}))
-
-    def get_cluster_deployed_package_config(self):
-        return deepcopy(self.__get_cluster_item(const.DEPLOYED_PK_CONFIG, {}))
-
-    def __get_cluster_item(self, item, default_value=None):
-        if not self.cluster_info:
-            logging.info("cluster config is None or {}")
-            return None
-
-        return self.cluster_info.setdefault(item, default_value)
-
-    def get_cluster_roles_mapping(self):
-        if not self.cluster_info:
-            logging.info("cluster config is None or {}")
-            return {}
-
-        deploy_config = self.get_cluster_deployed_package_config()
-        mapping = deploy_config.setdefault(const.ROLES_MAPPING, {})
-
-        if not mapping:
-            mapping = self._get_cluster_roles_mapping_helper()
-            deploy_config[const.ROLES_MAPPING] = mapping
-
-        return mapping
-
-    def _get_host_info(self, host_id):
-        if not self.hosts_info:
-            logging.info("hosts config is None or {}")
-            return {}
-
-        if host_id not in self.hosts_info:
-            logging.info("Cannot find host, ID is '%s'", host_id)
-            return {}
-
-        return self.hosts_info[host_id]
-
-    def __get_host_item(self, host_id, item, default_value=None):
-        host_info = self._get_host_info(host_id)
-        if not host_info:
-            return {}
-
-        return deepcopy(host_info.setdefault(item, default_value))
-
-    def get_host_baseinfo(self, host_id):
-        """Get host base information."""
-        host_info = self._get_host_info(host_id)
-        if not host_info:
-            return {}
-
-        attr_names = [const.REINSTALL_OS_FLAG, const.MAC_ADDR, const.NAME,
-                      const.HOSTNAME, const.NETWORKS]
-        base_info = {}
-        for attr in attr_names:
-            temp = host_info[attr]
-            if isinstance(temp, dict) or isinstance(temp, list):
-                base_info[attr] = deepcopy(temp)
-            else:
-                base_info[attr] = temp
-
-        base_info[const.DNS] = self.get_host_dns(host_id)
-
-        return base_info
-
-    def get_host_fullname(self, host_id):
-        return self.__get_host_item(host_id, const.NAME, None)
-
-    def get_host_dns(self, host_id):
-        host_info = self._get_host_info(host_id)
-        if not host_info:
-            return None
-
-        if const.DNS not in host_info:
-            hostname = host_info[const.HOSTNAME]
-            domain = self.get_host_domain(host_id)
-            host_info[const.DNS] = '.'.join((hostname, domain))
-
-        return host_info[const.DNS]
-
-    def get_host_mac_address(self, host_id):
-        return self.__get_host_item(host_id, const.MAC_ADDR, None)
-
-    def get_hostname(self, host_id):
-        return self.__get_host_item(host_id, const.HOSTNAME, None)
-
-    def get_host_networks(self, host_id):
-        return self.__get_host_item(host_id, const.NETWORKS, {})
-
-    def get_host_interfaces(self, host_id):
-        networks = self.get_host_networks(host_id)
-        return networks.keys()
-
-    def get_host_interface_config(self, host_id, interface):
-        networks = self.get_host_networks(host_id)
-        return networks.setdefault(interface, {})
-
-    def get_host_interface_ip(self, host_id, interface):
-        interface_config = self._get_host_interface_config(host_id, interface)
-        return interface_config.setdefault(const.IP_ADDR, None)
-
-    def get_host_interface_netmask(self, host_id, interface):
-        interface_config = self.get_host_interface_config(host_id, interface)
-        return interface_config.setdefault(const.NETMASK, None)
-
-    def get_host_interface_subnet(self, host_id, interface):
-        nic_config = self.get_host_interface_config(host_id, interface)
-        return nic_config.setdefault(const.SUBNET, None)
-
-    def is_interface_promiscuous(self, host_id, interface):
-        nic_config = self.get_host_interface_config(host_id, interface)
-        if not nic_config:
-            raise Exception("Cannot find interface '%s'", interface)
-
-        return nic_config[const.PROMISCUOUS_FLAG]
-
-    def is_interface_mgmt(self, host_id, interface):
-        nic_config = self.get_host_interface_config(host_id, interface)
-        if not nic_config:
-            raise Exception("Cannot find interface '%s'", interface)
-
-        return nic_config[const.MGMT_NIC_FLAG]
-
-    def get_host_os_config(self, host_id):
-        return self.__get_host_item(host_id, const.OS_CONFIG, {})
-
-    def get_host_domain(self, host_id):
-        os_config = self.get_host_os_config(host_id)
-        os_general_config = os_config.setdefault(const.OS_CONFIG_GENERAL, {})
-        domain = os_general_config.setdefault(const.DOMAIN, None)
-        if domain is None:
-            global_config = self.get_cluster_os_config()
-            global_general = global_config.setdefault(const.OS_CONFIG_GENERAL,
-                                                      {})
-            domain = global_general.setdefault(const.DOMAIN, None)
-
-        return domain
-
-    def get_host_network_mapping(self, host_id):
-        package_config = self.get_host_package_config(host_id)
-        if const.NETWORK_MAPPING not in package_config:
-            network_mapping = self.get_cluster_network_mapping()
-        else:
-            network_mapping = package_config[const.NETWORK_MAPPING]
-
-        return network_mapping
-
-    def get_host_package_config(self, host_id):
-        return self.__get_host_item(host_id, const.PK_CONFIG, {})
-
-    def get_host_deployed_os_config(self, host_id):
-        host_info = self._get_host_info(host_id)
-        return host_info.setdefault(const.DEPLOYED_OS_CONFIG, {})
-
-    def get_host_deployed_package_config(self, host_id):
-        host_info = self._get_host_info(host_id)
-        return host_info.setdefault(const.DEPLOYED_PK_CONFIG, {})
-
-    def get_host_roles(self, host_id):
-        return self.__get_host_item(host_id, const.ROLES, [])
-
-    def get_all_hosts_roles(self, hosts_id_list=None):
-        roles = []
-        if hosts_id_list is None:
-            hosts_id_list = self.get_host_id_list()
-
-        for host_id in hosts_id_list:
-            host_roles = self.get_host_roles(host_id)
-            roles.extend([role for role in host_roles if role not in roles])
-
-        return roles
-
-    def get_host_roles_mapping(self, host_id):
-        roles_mapping = {}
-        deployed_pk_config = self.get_host_package_config(host_id)
-
-        if const.ROLES_MAPPING not in deployed_pk_config:
-            roles_mapping = self._get_host_roles_mapping_helper(host_id)
-            deployed_pk_config[const.ROLES_MAPPING] = roles_mapping
-        else:
-            roles_mapping = deployed_pk_config[const.ROLES_MAPPING]
-
-        return deepcopy(roles_mapping)
-
-    def get_host_ipmi_info(self, host_id):
-        ipmi_info = self.__get_host_item(host_id, const.IPMI, {})
-
-        if not ipmi_info:
-            return (None, None, None)
-
-        ipmi_ip = ipmi_info[const.IP_ADDR]
-        ipmi_user = ipmi_info[const.IPMI_CREDS][const.USERNAME]
-        ipmi_pass = ipmi_info[const.IPMI_CREDS][const.PASSWORD]
-
-        return (ipmi_ip, ipmi_user, ipmi_pass)
-
-    def __get_adapter_item(self, item, default_value=None):
-        if not self.adapter_info:
-            logging.info("Adapter Info is None!")
-            return None
-
-        return deepcopy(self.adapter_info.setdefault(item, default_value))
-
-    def get_adapter_name(self):
-        return self.__get_adapter_item(const.NAME, None)
-
-    def get_dist_system_name(self):
-        return self.__get_adapter_item(const.NAME, None)
-
-    def get_adapter_health_check_cmd(self):
-        return self.__get_adapter_item(const.HEALTH_CHECK_CMD)
-
-    def get_os_installer_settings(self):
-        installer_info = self.__get_adapter_item(const.OS_INSTALLER, {})
-        return installer_info.setdefault(const.INSTALLER_SETTINGS, {})
-
-    def get_pk_installer_settings(self):
-        installer_info = self.__get_adapter_item(const.PK_INSTALLER, {})
-        return installer_info.setdefault(const.INSTALLER_SETTINGS, {})
-
-    def get_os_config_metadata(self):
-        metadata = self.__get_adapter_item(const.METADATA, {})
-        return metadata.setdefault(const.OS_CONFIG, {})
-
-    def get_pk_config_meatadata(self):
-        metadata = self.__get_adapter_item(const.METADATA, {})
-        return metadata.setdefault(const.PK_CONFIG, {})
-
-    def get_adapter_all_flavors(self):
-        return self.__get_adapter_item(const.FLAVORS, [])
-
-    def get_adapter_flavor(self, flavor_name):
-        flavors = self.__get_adapter_item(const.FLAVORS, [])
-        for flavor in flavors:
-            if flavor[const.FLAVOR_NAME] == flavor_name:
-                return flavor
-
-        return None
-
-    def _get_cluster_roles_mapping_helper(self):
+        self.id = self.cluster_info.get(const.ID)
+        self.name = self.cluster_info.get(const.NAME)
+        self.os_version = self.cluster_info.get(const.OS_VERSION)
+        self.flavor = self.cluster_info.setdefault(
+            const.FLAVOR, {}
+        )
+        self.os_config = self.cluster_info.setdefault(
+            const.OS_CONFIG, {}
+        )
+        self.package_config = self.cluster_info.setdefault(
+            const.PK_CONFIG, {}
+        )
+        self.deployed_os_config = self.cluster_info.setdefault(
+            const.DEPLOYED_OS_CONFIG, {}
+        )
+        self.deployed_package_config = self.cluster_info.setdefault(
+            const.DEPLOYED_PK_CONFIG, {}
+        )
+        self.network_mapping = self.package_config.setdefault(
+            const.NETWORK_MAPPING, {}
+        )
+
+        os_config_general = self.os_config.setdefault(
+            const.OS_CONFIG_GENERAL, {}
+        )
+        self.domain = os_config_general.setdefault(const.DOMAIN, None)
+        self.hosts = []
+
+    def add_host(self, host):
+        self.hosts.append(host)
+
+    @property
+    def roles_mapping(self):
+        deploy_config = self.deployed_package_config
+        return deploy_config.setdefault(
+            const.ROLES_MAPPING, self._get_cluster_roles_mapping()
+        )
+
+    def _get_cluster_roles_mapping(self):
         """The ouput format will be as below, for example:
 
         {
@@ -369,37 +131,364 @@ class BaseConfigManager(object):
                 ...
         }
         """
-        mapping = {}
-        hosts_id_list = self.get_host_id_list()
-        network_mapping = self.get_cluster_network_mapping()
-        if not network_mapping:
+        mapping = defaultdict(list)
+        for host in self.hosts:
+            for role, value in host.roles_mapping.iteritems():
+                mapping[role].append(value)
+
+        return dict(mapping)
+
+    @property
+    def base_info(self):
+        return {
+            const.ID: self.id,
+            const.NAME: self.name,
+            const.OS_VERSION: self.os_version
+        }
+
+
+class HostInfo(object):
+    def __init__(self, host_info, cluster_info):
+        self.host_info = host_info
+        self.cluster_info = cluster_info
+        self.id = self.host_info.get(const.ID)
+        self.name = self.host_info.get(const.NAME)
+        self.mac = self.host_info.get(const.MAC_ADDR)
+        self.hostname = self.host_info.get(const.HOSTNAME)
+        self.networks = self.host_info.setdefault(const.NETWORKS, {})
+        self.os_config = self.host_info.setdefault(const.OS_CONFIG, {})
+
+        self.package_config = self.host_info.setdefault(const.PK_CONFIG, {})
+        self.roles = self.host_info.setdefault(const.ROLES, [])
+        self.ipmi = deepcopy(self.host_info.setdefault(const.IPMI, {}))
+        self.reinstall_os_flag = self.host_info.get(const.REINSTALL_OS_FLAG)
+        self.deployed_os_config = self.host_info.setdefault(
+            const.DEPLOYED_OS_CONFIG, {}
+        )
+        self.deployed_package_config = self.host_info.setdefault(
+            const.DEPLOYED_PK_CONFIG, {}
+        )
+
+        os_general_config = self.os_config.setdefault(
+            const.OS_CONFIG_GENERAL, {}
+        )
+        domain = os_general_config.setdefault(const.DOMAIN, None)
+        if domain is None:
+            self.domain = self.cluster_info.domain
+        else:
+            self.domain = domain
+
+        if const.DNS in host_info:
+            self.dns = host_info[const.DNS]
+        else:
+            self.dns = '.'.join((self.hostname, self.domain))
+
+        if const.NETWORK_MAPPING not in self.package_config:
+            self.network_mapping = self.cluster_info.network_mapping
+        else:
+            self.network_mapping = self.package_config[const.NETWORK_MAPPING]
+
+        if const.ROLES_MAPPING not in self.deployed_package_config:
+            self.roles_mapping = self._get_host_roles_mapping()
+            self.deployed_package_config[
+                const.ROLES_MAPPING
+            ] = self.roles_mapping
+        else:
+            self.roles_mapping = \
+                self.deployed_package_config[const.ROLES_MAPPING]
+
+        self.cluster_info.add_host(self)
+
+    def valid_interface(self, interface):
+        if interface not in self.networks:
+            raise RuntimeError("interface %s is invalid" % interface)
+
+    def get_interface(self, interface):
+        self.valid_interface(interface)
+        return self.networks[interface]
+
+    def get_interface_ip(self, interface):
+        return self.get_interface(interface).get(const.IP_ADDR)
+
+    def get_interface_netmask(self, interface):
+        return self.get_interface(interface).get(const.NETMASK)
+
+    def get_interface_subnet(self, interface):
+        return self.get_interface(interface).get(const.SUBNET)
+
+    def is_interface_promiscuous(self, interface):
+        return self.get_interface(interface).get(const.PROMISCUOUS_FLAG)
+
+    def is_interface_mgmt(self, interface):
+        return self.get_interface(interface).get(const.MGMT_NIC_FLAG)
+
+    def _get_host_roles_mapping(self):
+        if not self.network_mapping:
             return {}
 
-        for host_id in hosts_id_list:
-            roles_mapping = self.get_host_roles_mapping(host_id)
-            for role, value in roles_mapping.items():
-                mapping.setdefault(role, []).append(value)
-        return mapping
-
-    def _get_host_roles_mapping_helper(self, host_id):
-        """The format will be the same as cluster roles mapping."""
-        network_mapping = self.get_host_network_mapping(host_id)
-        if not network_mapping:
-            return {}
-
-        hostname = self.get_hostname(host_id)
-        roles = self.get_host_roles(host_id)
-        interfaces = self.get_host_interfaces(host_id)
+        net_info = {const.HOSTNAME: self.hostname}
+        for k, v in self.network_mapping.items():
+            try:
+                net_info[k] = self.networks[v[const.NIC]]
+                net_info[k][const.NIC] = v[const.NIC]
+            except Exception:
+                pass
 
         mapping = {}
-        temp = {const.HOSTNAME: hostname}
-        for key in network_mapping:
-            nic = network_mapping[key][const.NIC]
-            if nic in interfaces:
-                temp[key] = self.get_host_interface_config(host_id, nic)
-                temp[key][const.NIC] = nic
-
-        for role in roles:
+        for role in self.roles:
             role = role.replace("-", "_")
-            mapping[role] = temp
+            mapping[role] = net_info
+
         return mapping
+
+    @property
+    def baseinfo(self):
+        return {
+            const.REINSTALL_OS_FLAG: self.reinstall_os_flag,
+            const.MAC_ADDR: self.mac,
+            const.NAME: self.name,
+            const.HOSTNAME: self.hostname,
+            const.DNS: self.dns,
+            const.NETWORKS: deepcopy(self.networks)
+        }
+
+
+class BaseConfigManager(object):
+    def __init__(self, adapter_info={}, cluster_info={}, hosts_info={}):
+        assert(adapter_info and isinstance(adapter_info, dict))
+        assert(cluster_info and isinstance(adapter_info, dict))
+        assert(hosts_info and isinstance(adapter_info, dict))
+
+        self.adapter_info = AdapterInfo(adapter_info)
+        self.cluster_info = ClusterInfo(cluster_info)
+        self.hosts_info = dict([(k, HostInfo(v, self.cluster_info))
+                               for k, v in hosts_info.iteritems()])
+
+    def get_adapter_name(self):
+        return self.adapter_info.name
+
+    def get_dist_system_name(self):
+        return self.adapter_info.dist_system_name
+
+    def get_adapter_health_check_cmd(self):
+        return self.adapter_info.health_check_cmd
+
+    def get_os_installer_settings(self):
+        return self.adapter_info.os_installer[const.INSTALLER_SETTINGS]
+
+    def get_pk_installer_settings(self):
+        return self.adapter_info.package_installer[const.INSTALLER_SETTINGS]
+
+    def get_os_config_metadata(self):
+        return self.adapter_info.metadata[const.OS_CONFIG]
+
+    def get_pk_config_meatadata(self):
+        return self.adapter_info.metadata[const.PK_CONFIG]
+
+    def get_adapter_all_flavors(self):
+        return self.adapter_info.flavor_list
+
+    def get_adapter_flavor(self, flavor_name):
+        return self.adapter_info.get_flavor(flavor_name)
+
+    def get_cluster_id(self):
+        return self.cluster_info.id
+
+    def get_clustername(self):
+        return self.cluster_info.name
+
+    def get_os_version(self):
+        return self.cluster_info.os_version
+
+    def get_cluster_os_config(self):
+        return self.cluster_info.os_config
+
+    def get_cluster_baseinfo(self):
+        return self.cluster_info.base_info
+
+    def get_cluster_flavor_name(self):
+        return self.cluster_info.flavor.get(const.FLAVOR_NAME)
+
+    def get_cluster_flavor_roles(self):
+        return self.cluster_info.flavor.get(const.ROLES, [])
+
+    def get_cluster_flavor_template(self):
+        return self.cluster_info.flavor.get(const.TMPL)
+
+    def get_cluster_package_config(self):
+        return self.cluster_info.package_config
+
+    def get_cluster_network_mapping(self):
+        mapping = self.cluster_info.network_mapping
+        logging.info("Network mapping in the config is '%s'!", mapping)
+        return mapping
+
+    def get_cluster_deployed_os_config(self):
+        return self.cluster_info.deployed_os_config
+
+    def get_cluster_deployed_package_config(self):
+        return self.cluster_info.deployed_package_config
+
+    def get_cluster_roles_mapping(self):
+        return self.cluster_info.roles_mapping
+
+    def validate_host(self, host_id):
+        if host_id not in self.hosts_info:
+            raise RuntimeError("host_id %s is invalid" % host_id)
+
+    def get_host_id_list(self):
+        return self.hosts_info.keys()
+
+    def get_hosts_id_list_for_os_installation(self):
+        """Get info of hosts which need to install/reinstall OS."""
+        return [
+            id for id, info in self.hosts_info.items()
+            if info.reinstall_os_flag
+        ]
+
+    def get_server_credentials(self):
+        cluster_os_config = self.get_cluster_os_config()
+        if not cluster_os_config:
+            logging.info("cluster os_config is None!")
+            return ()
+
+        username = cluster_os_config[const.SERVER_CREDS][const.USERNAME]
+        password = cluster_os_config[const.SERVER_CREDS][const.PASSWORD]
+        return (username, password)
+
+    def _get_host_info(self, host_id):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id]
+
+    def get_host_baseinfo(self, host_id):
+        self.validate_host(host_id)
+        host_info = self.hosts_info[host_id]
+        return host_info.baseinfo
+
+    def get_host_fullname(self, host_id):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].name
+
+    def get_host_dns(self, host_id):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].dns
+
+    def get_host_mac_address(self, host_id):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].mac
+
+    def get_hostname(self, host_id):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].hostname
+
+    def get_host_networks(self, host_id):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].networks
+
+    def get_host_interfaces(self, host_id):
+        # get interface names
+        return self.get_host_networks(host_id).keys()
+
+    def get_host_interface_ip(self, host_id, interface):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].get_interface_ip(interface)
+
+    def get_host_interface_netmask(self, host_id, interface):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].get_interface_netmask(interface)
+
+    def get_host_interface_subnet(self, host_id, interface):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].get_interface_subnet(interface)
+
+    def is_interface_promiscuous(self, host_id, interface):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].is_interface_promiscuous(interface)
+
+    def is_interface_mgmt(self, host_id, interface):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].is_interface_mgmt(interface)
+
+    def get_host_os_config(self, host_id):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].os_config
+
+    def get_host_domain(self, host_id):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].domain
+
+    def get_host_network_mapping(self, host_id):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].network_mapping
+
+    def get_host_package_config(self, host_id):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].package_config
+
+    def get_host_deployed_os_config(self, host_id):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].deployed_os_config
+
+    def get_host_deployed_package_config(self, host_id):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].deployed_package_config
+
+    def get_host_roles(self, host_id):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].roles
+
+    def get_all_hosts_roles(self, hosts_id_list=None):
+        roles = []
+        for host_id, host_info in self.hosts_info.iteritems():
+            roles.extend(host_info.roles)
+
+        return list(set(roles))
+
+    def get_hosts_ip_settings(self, ip_settings, sys_intf_mappings):
+        logging.info(
+            "get_hosts_ip_settings:ip_settings=%s, sys_intf_mappings=%s" %
+            (ip_settings, sys_intf_mappings)
+        )
+
+        intf_alias = {}
+        for m in sys_intf_mappings:
+            if "vlan_tag" in m:
+                intf_alias[m["name"]] = m["name"]
+            else:
+                intf_alias[m["name"]] = m["interface"]
+
+        mappings = {}
+        hosts_id_list = self.get_host_id_list()
+        for host_id in hosts_id_list:
+            hostname = self.get_hostname(host_id)
+            mappings[hostname] = []
+            for ip_info in ip_settings:
+                logging.info("ip_info=%s" % ip_info)
+                new_ip_info = deepcopy(ip_info)
+                del new_ip_info["ip_ranges"]
+
+                ip_ranges = ip_info["ip_ranges"]
+                new_ip_info["netmask"] = netaddr.IPNetwork(
+                    ip_info["cidr"]
+                ).netmask.bin.count("1")
+                new_ip_info["ip"] = get_ip_addr(ip_ranges).next()
+                new_ip_info["alias"] = intf_alias[ip_info["name"]]
+                mappings[hostname].append(new_ip_info)
+
+        return {"ip_settings": mappings}
+
+    def get_host_roles_mapping(self, host_id):
+        self.validate_host(host_id)
+        return self.hosts_info[host_id].roles_mapping
+
+    def get_host_ipmi_info(self, host_id):
+        self.validate_host(host_id)
+        if self.hosts_info[host_id].ipmi:
+            return (
+                self.hosts_info[host_id].ipmi[const.IP_ADDR],
+                self.hosts_info[host_id].ipmi
+                [const.IPMI_CREDS][const.USERNAME],
+                self.hosts_info[host_id].ipmi
+                [const.IPMI_CREDS][const.USERNAME])
+        else:
+            return (None, None, None)
diff --git a/compass/deployment/installers/os_installers/cobbler/cobbler.py b/compass/deployment/installers/os_installers/cobbler/cobbler.py
index cfd83105..302c9be2 100644
--- a/compass/deployment/installers/os_installers/cobbler/cobbler.py
+++ b/compass/deployment/installers/os_installers/cobbler/cobbler.py
@@ -39,6 +39,7 @@ class CobblerInstaller(OSInstaller):
     TMPL_DIR = 'tmpl_dir'
     SYS_TMPL = 'system.tmpl'
     SYS_TMPL_NAME = 'system.tmpl'
+    SYS_PROFILE_NAME = 'profile.tmpl'
     PROFILE = 'profile'
 
     POWER_TYPE = 'power_type'
@@ -130,6 +131,8 @@ class CobblerInstaller(OSInstaller):
 
         global_vars_dict = self._get_cluster_tmpl_vars_dict()
 
+        self.update_profile_config_to_cobbler(profile, global_vars_dict)
+
         hosts_deploy_config = {}
 
         for host_id in host_ids:
@@ -242,6 +245,14 @@ class CobblerInstaller(OSInstaller):
 
         return system_config
 
+    def _generate_profile_config(self, cluster_vars_dict):
+        os_version = self.config_manager.get_os_version()
+        tmpl_path = os.path.join(
+            os.path.join(self.tmpl_dir, os_version), self.SYS_PROFILE_NAME
+        )
+
+        return self.get_config_from_template(tmpl_path, cluster_vars_dict)
+
     def _get_profile_from_server(self, os_version):
         """Get profile from cobbler server."""
         result = self.remote.find_profile({'name': os_version})
@@ -267,6 +278,10 @@ class CobblerInstaller(OSInstaller):
 
         return sys_id
 
+    def _get_profile_id(self, profilename):
+        """get profile reference id for the cluster."""
+        return self.remote.get_profile_handle(profilename, self.token)
+
     def _clean_system(self, hostname):
         """clean system."""
         sys_name = hostname
@@ -283,6 +298,12 @@ class CobblerInstaller(OSInstaller):
 
         self.remote.save_system(sys_id, self.token)
 
+    def _update_profile_config(self, profile_id, profile_config):
+        for key, value in profile_config.iteritems():
+            self.remote.modify_profile(profile_id, str(key), value, self.token)
+
+        self.remote.save_profile(profile_id, self.token)
+
     def _netboot_enabled(self, sys_id):
         """enable netboot."""
         self.remote.modify_system(sys_id, 'netboot_enabled', True, self.token)
@@ -303,6 +324,18 @@ class CobblerInstaller(OSInstaller):
         self._update_system_config(sys_id, system_config)
         self._netboot_enabled(sys_id)
 
+    def update_profile_config_to_cobbler(self, profilename, cluster_vars_dict):
+        """update profile config and upload to cobbler server."""
+
+        profile_id = self._get_profile_id(profilename)
+
+        profile_config = self._generate_profile_config(cluster_vars_dict)
+        logging.debug(
+            '%s profile config to update: %s', profilename, profile_config
+        )
+
+        self._update_profile_config(profile_id, profile_config)
+
     def delete_hosts(self):
         hosts_id_list = self.config_manager.get_host_id_list()
         logging.debug('delete hosts %s', hosts_id_list)
diff --git a/compass/deployment/installers/pk_installers/ansible_installer/ansible_installer.py b/compass/deployment/installers/pk_installers/ansible_installer/ansible_installer.py
index 4f39b81b..6407d0e8 100644
--- a/compass/deployment/installers/pk_installers/ansible_installer/ansible_installer.py
+++ b/compass/deployment/installers/pk_installers/ansible_installer/ansible_installer.py
@@ -33,6 +33,18 @@ from compass.utils import util
 NAME = "AnsibleInstaller"
 
 
+def byteify(input):
+    if isinstance(input, dict):
+        return dict([(byteify(key), byteify(value))
+                    for key, value in input.iteritems()])
+    elif isinstance(input, list):
+        return [byteify(element) for element in input]
+    elif isinstance(input, unicode):
+        return input.encode('utf-8')
+    else:
+        return input
+
+
 class AnsibleInstaller(PKInstaller):
     INVENTORY_TMPL_DIR = 'inventories'
     GROUPVARS_TMPL_DIR = 'vars'
@@ -161,7 +173,15 @@ class AnsibleInstaller(PKInstaller):
         logging.info("cluster role mapping is %s", mapping)
         cluster_vars_dict[const.ROLES_MAPPING] = mapping
 
-        return cluster_vars_dict
+        # get ip settings to vars_dict
+        hosts_ip_settings = self.config_manager.get_hosts_ip_settings(
+            pk_meta_dict["network_cfg"]["ip_settings"],
+            pk_meta_dict["network_cfg"]["sys_intf_mappings"]
+        )
+        logging.info("hosts_ip_settings is %s", hosts_ip_settings)
+        cluster_vars_dict["ip_settings"] = hosts_ip_settings
+
+        return byteify(cluster_vars_dict)
 
     def _generate_inventory_attributes(self, global_vars_dict):
         inventory_tmpl_path = os.path.join(
@@ -244,11 +264,16 @@ class AnsibleInstaller(PKInstaller):
         dirs = self.runner_dirs
         files = self.runner_files
         for dir in dirs:
+            items = dir.split(':')
+            src, dst = items[0], items[-1]
+            if not os.path.exists(os.path.join(self.ansible_dir, src)):
+                continue
+
             shutil.copytree(
-                os.path.join(self.ansible_dir, dir),
+                os.path.join(self.ansible_dir, src),
                 os.path.join(
                     ansible_run_destination,
-                    dir
+                    dst
                 )
             )
         for file in files:
diff --git a/compass/tests/deployment/installers/os_installers/cobbler/test_cobbler.py b/compass/tests/deployment/installers/os_installers/cobbler/test_cobbler.py
index f68ceabe..4c020657 100644
--- a/compass/tests/deployment/installers/os_installers/cobbler/test_cobbler.py
+++ b/compass/tests/deployment/installers/os_installers/cobbler/test_cobbler.py
@@ -23,6 +23,7 @@ from copy import deepcopy
 from mock import Mock
 import os
 import unittest2
+import xmlrpclib
 
 
 os.environ['COMPASS_IGNORE_SETTING'] = 'true'
@@ -59,14 +60,16 @@ class TestCobblerInstaller(unittest2.TestCase):
                         "netmask": "255.255.255.0",
                         "is_mgmt": True,
                         "is_promiscuous": False,
-                        "subnet": "12.234.32.0/24"
+                        "subnet": "12.234.32.0/24",
+                        "interface": "vnet0"
                     },
                     "vnet1": {
                         "ip": "172.16.1.1",
                         "netmask": "255.255.255.0",
                         "is_mgmt": False,
                         "is_promiscuous": False,
-                        "subnet": "172.16.1.0/24"
+                        "subnet": "172.16.1.0/24",
+                        "interface": "vnet1"
                     }
                 }
             },
@@ -115,7 +118,8 @@ class TestCobblerInstaller(unittest2.TestCase):
                                            hosts_info)
 
         CobblerInstaller._get_cobbler_server = Mock()
-        CobblerInstaller._get_cobbler_server.return_value = "mock_server"
+        CobblerInstaller._get_cobbler_server.return_value = \
+            DummyCobblerRemote()
         CobblerInstaller._get_token = Mock()
         CobblerInstaller._get_token.return_value = "mock_token"
 
@@ -284,18 +288,17 @@ class TestCobblerInstaller(unittest2.TestCase):
         self.maxDiff = None
         self.assertDictEqual(expected_output, output)
 
-    def test_check_and_set_system_impi(self):
-        self.test_cobbler._update_system_config = Mock()
-        self.test_cobbler.dump_system_info = Mock()
-        self.test_cobbler.dump_system_info.return_value = {
-            'power_type': 'ipmilan',
-            'power_address': '',
-            'power_user': '',
-            'power_pass': ''
-        }
-        output = self.test_cobbler._check_and_set_system_impi(3, "test_sys_id")
-        self.assertTrue(output)
 
+class DummyCobblerRemote:
+
+    def __init__(self):
+        return
+
+    def get_profile_handle(self, profilename, token):
+        return "dummyprofilehandle"
+
+    def save_profile(self, profile_id, token):
+        return "dummysaveprofile"
 
 if __name__ == '__main__':
     flags.init()
diff --git a/compass/tests/deployment/installers/test_config_manager.py b/compass/tests/deployment/installers/test_config_manager.py
index 1be264a0..acfa5f21 100644
--- a/compass/tests/deployment/installers/test_config_manager.py
+++ b/compass/tests/deployment/installers/test_config_manager.py
@@ -61,11 +61,6 @@ class TestConfigManager(unittest2.TestCase):
         output = self.test_config_manager.get_host_id_list()
         self.assertEqual(expected_output, output)
 
-    def test_get_cluster_flavor_info(self):
-        expected_output = self.cluster_test_info[const.FLAVOR]
-        output = self.test_config_manager.get_cluster_flavor_info()
-        self.assertDictEqual(expected_output, output)
-
     def test_get_cluster_roles_mapping(self):
         expected_output = {
             "os_controller": [{
diff --git a/conf/adapter/ansible_openstack.conf b/conf/adapter/ansible_openstack.conf
deleted file mode 100644
index ad82e169..00000000
--- a/conf/adapter/ansible_openstack.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-NAME = 'openstack_juno'
-DISPLAY_NAME = 'Openstack Juno'
-PARENT = 'openstack'
-PACKAGE_INSTALLER = 'ansible_installer'
-OS_INSTALLER = 'cobbler'
-SUPPORTED_OS_PATTERNS = ['(?i)ubuntu-14\.04.*']
-DEPLOYABLE = True
diff --git a/conf/adapter/ansible_openstack_juno.conf b/conf/adapter/ansible_openstack_juno.conf
new file mode 100644
index 00000000..de52a059
--- /dev/null
+++ b/conf/adapter/ansible_openstack_juno.conf
@@ -0,0 +1,7 @@
+NAME = 'openstack_juno'
+DISPLAY_NAME = 'Openstack Juno'
+PARENT = 'openstack'
+PACKAGE_INSTALLER = 'ansible_installer_juno'
+OS_INSTALLER = 'cobbler'
+SUPPORTED_OS_PATTERNS = ['(?i)ubuntu-14\.04.*', '(?i)ubuntu-14\.04\.3.*', '(?i)CentOS-7.*1503-01.*']
+DEPLOYABLE = True
diff --git a/conf/adapter/ansible_openstack_kilo.conf b/conf/adapter/ansible_openstack_kilo.conf
new file mode 100644
index 00000000..328a689c
--- /dev/null
+++ b/conf/adapter/ansible_openstack_kilo.conf
@@ -0,0 +1,7 @@
+NAME = 'openstack_kilo'
+DISPLAY_NAME = 'Openstack kilo'
+PARENT = 'openstack'
+PACKAGE_INSTALLER = 'ansible_installer_kilo'
+OS_INSTALLER = 'cobbler'
+SUPPORTED_OS_PATTERNS = ['(?i)ubuntu-14\.04.*', '(?i)ubuntu-14\.04\.3.*', '(?i)CentOS-7.*1503-01.*']
+DEPLOYABLE = True
diff --git a/conf/flavor/openstack_ansible.conf b/conf/flavor/openstack_juno_ansible.conf
similarity index 61%
rename from conf/flavor/openstack_ansible.conf
rename to conf/flavor/openstack_juno_ansible.conf
index fc792e78..e935cf9d 100644
--- a/conf/flavor/openstack_ansible.conf
+++ b/conf/flavor/openstack_juno_ansible.conf
@@ -9,7 +9,7 @@ FLAVORS = [{
     'display_name': 'Single Controller',
     'template': 'single-controller.tmpl',
     'roles': [
-        'controller', 'compute', 'network', 'storage'
+        'controller', 'compute', 'network', 'storage', 'odl', 'onos'
     ],
 }, {
     'flavor': 'multinodes',
@@ -17,7 +17,16 @@ FLAVORS = [{
     'template': 'multinodes.tmpl',
     'roles': [
         'compute-controller', 'compute-worker', 'network-server',
-        'network-worker', 'database', 'messaging', 'image',
+        'network-worker', 'database', 'messaging', 'image', 'odl',
         'dashboard', 'identity', 'storage-controller', 'storage-volume'
     ],
+}, {
+    'flavor': 'HA-ansible-multinodes-juno',
+    'display_name': 'HA-ansible-multinodes-juno',
+    'template': 'HA-ansible-multinodes.tmpl',
+    'roles': [
+        'controller', 'compute', 'ha', 'odl', 'onos', 'ceph'
+    ],
 }]
+
+
diff --git a/conf/flavor/openstack_kilo_ansible.conf b/conf/flavor/openstack_kilo_ansible.conf
new file mode 100644
index 00000000..f525ffda
--- /dev/null
+++ b/conf/flavor/openstack_kilo_ansible.conf
@@ -0,0 +1,32 @@
+ADAPTER_NAME = 'openstack_kilo'
+FLAVORS = [{
+    'flavor': 'allinone',
+    'display_name': 'All-In-One',
+    'template': 'allinone.tmpl',
+    'roles': ['allinone-compute'],
+}, {
+    'flavor': 'single-controller',
+    'display_name': 'Single Controller',
+    'template': 'single-controller.tmpl',
+    'roles': [
+        'controller', 'compute', 'network', 'storage', 'odl', 'onos'
+    ],
+}, {
+    'flavor': 'multinodes',
+    'display_name': 'Multi-nodes',
+    'template': 'multinodes.tmpl',
+    'roles': [
+        'compute-controller', 'compute-worker', 'network-server',
+        'network-worker', 'database', 'messaging', 'image', 'odl',
+        'dashboard', 'identity', 'storage-controller', 'storage-volume'
+    ],
+}, {
+    'flavor': 'HA-ansible-multinodes-kilo',
+    'display_name': 'HA-ansible-multinodes',
+    'template': 'HA-ansible-multinodes.tmpl',
+    'roles': [
+        'controller', 'compute', 'ha', 'odl', 'onos', 'ceph'
+    ],
+}]
+
+
diff --git a/conf/flavor_metadata/HA-ansible-multinodes-juno.conf b/conf/flavor_metadata/HA-ansible-multinodes-juno.conf
new file mode 100644
index 00000000..e3ff468b
--- /dev/null
+++ b/conf/flavor_metadata/HA-ansible-multinodes-juno.conf
@@ -0,0 +1,19 @@
+ADAPTER = 'openstack_juno'
+FLAVOR = 'HA-ansible-multinodes-juno'
+METADATA = {
+    'ha_proxy': {
+        '_self': {
+        },
+        'vip': {
+            '_self': {
+                'is_required': True,
+                'field': 'general',
+                'mapping_to': 'ha_vip'
+            }
+        },
+        'test': {
+            '_self': {
+            },
+        }
+    }
+}
diff --git a/conf/flavor_metadata/HA-ansible-multinodes-kilo.conf b/conf/flavor_metadata/HA-ansible-multinodes-kilo.conf
new file mode 100644
index 00000000..c944c0dc
--- /dev/null
+++ b/conf/flavor_metadata/HA-ansible-multinodes-kilo.conf
@@ -0,0 +1,19 @@
+ADAPTER = 'openstack_kilo'
+FLAVOR = 'HA-ansible-multinodes-kilo'
+METADATA = {
+    'ha_proxy': {
+        '_self': {
+        },
+        'vip': {
+            '_self': {
+                'is_required': True,
+                'field': 'general',
+                'mapping_to': 'ha_vip'
+            }
+        },
+        'test': {
+            '_self': {
+            },
+        }
+    }
+}
diff --git a/conf/os/centos7.1.conf b/conf/os/centos7.1.conf
new file mode 100644
index 00000000..727d0114
--- /dev/null
+++ b/conf/os/centos7.1.conf
@@ -0,0 +1,3 @@
+NAME = 'CentOS-7-Minimal-1503-01-x86_64'
+PARENT = 'CentOS'
+DEPLOYABLE = True
diff --git a/conf/os/ubuntu14.04.3.conf b/conf/os/ubuntu14.04.3.conf
new file mode 100644
index 00000000..54b94a7c
--- /dev/null
+++ b/conf/os/ubuntu14.04.3.conf
@@ -0,0 +1,3 @@
+NAME = 'ubuntu-14.04.3-server-x86_64'
+PARENT = 'Ubuntu'
+DEPLOYABLE = True
diff --git a/conf/os_metadata/general.conf b/conf/os_metadata/general.conf
index 6848f3fa..f297fa2d 100644
--- a/conf/os_metadata/general.conf
+++ b/conf/os_metadata/general.conf
@@ -102,7 +102,21 @@ METADATA = {
                 'default_callback': default_localrepo,
                 'mapping_to': 'local_repo'
             }
-        }
+        },
+        'repo_name': {
+            '_self': {
+                'field': 'general',
+                'is_required': True,
+                'mapping_to': 'repo_name'
+            }
+        },
+        'deploy_type': {
+            '_self': {
+                'field': 'general',
+                'is_required': True,
+                'mapping_to': 'deploy_type'
+            }
+        },
     },
     'server_credentials': {
         '_self': {
diff --git a/conf/package_installer/ansible-juno.conf b/conf/package_installer/ansible-juno.conf
index 7f9ca184..10031b28 100644
--- a/conf/package_installer/ansible-juno.conf
+++ b/conf/package_installer/ansible-juno.conf
@@ -1,5 +1,5 @@
 NAME = 'ansible_installer'
-INSTANCE_NAME = 'ansible_installer'
+INSTANCE_NAME = 'ansible_installer_juno'
 SETTINGS = {
     'ansible_dir': '/var/ansible',
     'ansible_run_dir': '/var/ansible/run',
@@ -8,6 +8,6 @@ SETTINGS = {
     'inventory_file': 'inventory.yml',
     'group_variable': 'all',
     'etc_hosts_path': 'roles/common/templates/hosts',
-    'runner_dirs': ['roles']
+    'runner_dirs': ['roles','openstack_juno/templates:templates']
 }
 
diff --git a/conf/package_installer/ansible-kilo.conf b/conf/package_installer/ansible-kilo.conf
new file mode 100644
index 00000000..6a465d9f
--- /dev/null
+++ b/conf/package_installer/ansible-kilo.conf
@@ -0,0 +1,13 @@
+NAME = 'ansible_installer'
+INSTANCE_NAME = 'ansible_installer_kilo'
+SETTINGS = {
+    'ansible_dir': '/var/ansible',
+    'ansible_run_dir': '/var/ansible/run',
+    'ansible_config': 'ansible.cfg',
+    'playbook_file': 'site.yml',
+    'inventory_file': 'inventory.yml',
+    'group_variable': 'all',
+    'etc_hosts_path': 'roles/common/templates/hosts',
+    'runner_dirs': ['roles','openstack_kilo/templates:templates']
+}
+
diff --git a/conf/package_metadata/openstack.conf b/conf/package_metadata/openstack.conf
index fc44e3dc..b341fdcc 100644
--- a/conf/package_metadata/openstack.conf
+++ b/conf/package_metadata/openstack.conf
@@ -6,17 +6,17 @@ METADATA = {
         },
         'service_credentials': {
             '_self': {
-	            'required_in_whole_config': True,
-		        'key_extensions': {
-		        '$service': ['image', 'compute', 'dashboard', 'identity', 'metering', 'rabbitmq', 'volume', 'mysql']
-		        },
+                'required_in_whole_config': True,
+                'key_extensions': {
+                '$service': ['image', 'compute', 'dashboard', 'identity', 'metering', 'rabbitmq', 'volume', 'mysql']
+                },
                 'mapping_to': 'service_credentials'
             },
             '$service': {
-	           '_self': {
-		           'required_in_whole_config': True,
-		           'mapping_to': '$service'
-		       },
+               '_self': {
+                   'required_in_whole_config': True,
+                   'mapping_to': '$service'
+               },
                'username': {
                    '_self': {
                        'is_required': True,
@@ -34,18 +34,18 @@ METADATA = {
             }
         },
         'console_credentials': {
-	        '_self': {
-	            'required_in_whole_config': True,
-		        'key_extensions': {
-		            '$console': ['admin', 'compute', 'dashboard', 'image', 'metering', 'network', 'object-store', 'volume']
-		        },
+            '_self': {
+                'required_in_whole_config': True,
+                'key_extensions': {
+                    '$console': ['admin', 'compute', 'dashboard', 'image', 'metering', 'network', 'object-store', 'volume']
+                },
                 'mapping_to': 'console_credentials'
-	        },
+            },
             '$console': {
-	            '_self': {
-		            'required_in_whole_config': True,
-		            'mapping_to': '$console'
-		        },
+                '_self': {
+                    'required_in_whole_config': True,
+                    'mapping_to': '$console'
+                },
                 'username': {
                     '_self': {
                         'is_required': True,
@@ -63,6 +63,256 @@ METADATA = {
             }
         }
     },
+
+    'enable_secgroup': {
+        '_self': {
+            'mapping_to': 'enable_secgroup',
+            'field': 'anytype',
+            'is_required':False,
+            'default_value': True
+        }
+    },
+
+    'enable_fwaas': {
+        '_self': {
+            'mapping_to': 'enable_fwaas',
+            'field': 'anytype',
+            'is_required':False,
+            'default_value': True
+        }
+    },
+
+    'enable_vpnaas': {
+        '_self': {
+            'mapping_to': 'enable_vpnaas',
+            'field': 'anytype',
+            'is_required':False,
+            'default_value': True
+        }
+    },
+    'network_cfg': {
+        '_self': {
+            'mapping_to': 'network_cfg'
+        },
+
+        'nic_mappings': {
+            '_self': {
+                'mapping_to': 'nic_mappings',
+                'field': 'general_list'
+            }
+        },
+
+        'bond_mappings': {
+            '_self': {
+                'mapping_to': 'bond_mappings',
+                'field': 'general_list'
+            }
+        },
+
+        'sys_intf_mappings': {
+            '_self': {
+                'mapping_to': 'sys_intf_mappings',
+                'field': 'general_list'
+            }
+        },
+
+        'ip_settings': {
+             '_self': {
+                 'mapping_to': 'ip_settings',
+                 'field': 'general_list'
+             }
+        },
+
+        'provider_net_mappings': {
+            '_self': {
+                'mapping_to': 'provider_net_mappings',
+                'field': 'general_list'
+            }
+        },
+
+        'ceph_disk': {
+            '_self': {
+                'mapping_to': 'ceph_disk',
+                'field': 'general',
+                'is_required':False
+            }
+        },
+
+        'public_vip': {
+        '_self': {
+            'mapping_to': 'public_vip',
+            'is_required': False
+        },
+
+        'ip': {
+            '_self': {
+            'mapping_to': 'ip',
+            'is_required': True,
+            'field': 'general',
+            }
+        },
+        'netmask': {
+            '_self': {
+            'mapping_to': 'netmask',
+            'is_required': True,
+            'field': 'general',
+            }
+        },
+        'interface': {
+            '_self': {
+            'mapping_to': 'interface',
+            'is_required': True,
+            'field': 'general',
+            }
+        }
+        },
+
+        'internal_vip': {
+        '_self': {
+            'mapping_to': 'internal_vip',
+            'is_required': False
+        },
+
+        'ip': {
+            '_self': {
+            'mapping_to': 'ip',
+            'is_required': True,
+            'field': 'general',
+            }
+        },
+        'netmask': {
+            '_self': {
+            'mapping_to': 'netmask',
+            'is_required': True,
+            'field': 'general',
+            }
+        },
+        'interface': {
+            '_self': {
+            'mapping_to': 'interface',
+            'is_required': True,
+            'field': 'general',
+            }
+        }
+        },
+
+    'public_net_info': {
+        '_self': {
+            'mapping_to': 'public_net_info'
+        },
+
+        'enable': {
+            '_self': {
+            'mapping_to': 'enable',
+            'is_required': False,
+            'field': 'anytype',
+            'default_value': True
+            }
+        },
+
+        'network': {
+            '_self': {
+            'mapping_to': 'network',
+            'is_required': True,
+            'field': 'general',
+            'default_value': 'ext-net'
+            }
+        },
+
+        'type': {
+            '_self': {
+            'mapping_to': 'type',
+            'is_required': True,
+            'field': 'general',
+            'options': ['flat', 'vlan'],
+            'default_value': 'vlan'
+            }
+        },
+
+        'segment_id': {
+            '_self': {
+            'mapping_to': 'segment_id',
+            'is_required': False,
+            'field': 'anytype'
+            }
+        },
+
+        'subnet': {
+            '_self': {
+            'mapping_to': 'subnet',
+            'is_required': True,
+            'field': 'general',
+            'default_value': 'ext-subnet'
+            }
+        },
+
+        'provider_network': {
+            '_self': {
+            'mapping_to': 'provider_network',
+            'is_required': True,
+            'field': 'general',
+            'default_value': 'physnet'
+            }
+        },
+
+        'router': {
+            '_self': {
+            'mapping_to': 'router',
+            'is_required': True,
+            'field': 'general',
+            'default_value': 'ext-router'
+            }
+        },
+
+        'enable_dhcp': {
+            '_self': {
+            'mapping_to': 'enable_dhcp',
+            'is_required': True,
+            'field': 'anytype'
+            }
+        },
+
+        'no_gateway': {
+            '_self': {
+            'mapping_to': 'no_gateway',
+            'is_required': True,
+            'field': 'anytype'
+            }
+        },
+
+        'external_gw': {
+            '_self': {
+            'mapping_to': 'external_gw',
+            'is_required': False,
+            'field': 'general'
+            }
+        },
+
+        'floating_ip_cidr': {
+            '_self': {
+            'mapping_to': 'floating_ip_cidr',
+            'is_required': True,
+            'field': 'general'
+            }
+        },
+
+        'floating_ip_start': {
+            '_self': {
+            'mapping_to': 'floating_ip_start',
+            'is_required': True,
+            'field': 'general'
+            }
+        },
+
+        'floating_ip_end': {
+            '_self': {
+            'mapping_to': 'floating_ip_end',
+            'is_required': True,
+            'field': 'general'
+            }
+        }
+    },
+    },
     'neutron_config': {
         '_self': {
             'mapping_to': 'neutron_config'
@@ -110,29 +360,30 @@ METADATA = {
     'network_mapping': {
         '_self': {
             'required_in_whole_config': True,
-	        'key_extensions': {
-	    	    '$interface_type': ['management', 'external', 'storage', 'tenant']
-	        }
+                'key_extensions': {
+                '$interface_type': ['install']
+            }
         },
         '$interface_type': {
             '_self': {
                 'required_in_whole_config': True,
                 'field': 'anytype',
-		        'autofill_callback': autofill_network_mapping,
-		        'mapping_to': '$interface_type'
+                'autofill_callback': autofill_network_mapping,
+                'mapping_to': '$interface_type'
             },
-	        'interface': {
+            'interface': {
                 '_self': {
-		            'is_required': True,
+                    'is_required': True,
                     'field': 'general',
                 }
             },
             'subnet': {
                 '_self': {
-              	    'is_required': False,
+                    'is_required': False,
                     'field': 'general'
                 }
             }
         }
     },
+
 }
diff --git a/conf/role/openstack_ansible.conf b/conf/role/openstack_juno_ansible.conf
similarity index 79%
rename from conf/role/openstack_ansible.conf
rename to conf/role/openstack_juno_ansible.conf
index 7aa78dfa..65dace70 100644
--- a/conf/role/openstack_ansible.conf
+++ b/conf/role/openstack_juno_ansible.conf
@@ -63,4 +63,23 @@ ROLES = [{
     'role': 'network-worker',
     'display': 'Network worker node',
     'description': 'Network worker node'
+}, {
+    'role': 'odl',
+    'display': 'open day light',
+    'description': 'odl node',
+    'optional': True
+}, {
+    'role': 'onos',
+    'display': 'open network operating system',
+    'description': 'onos node',
+    'optional': True
+}, {
+    'role': 'ha',
+    'display': 'Cluster with HA',
+    'description': 'Cluster with HA node'    
+}, {
+    'role': 'ceph',
+    'display': 'Ceph storage',
+    'description': 'Ceph storage',
+    'optional': True
 }]
diff --git a/conf/role/openstack_kilo_ansible.conf b/conf/role/openstack_kilo_ansible.conf
new file mode 100644
index 00000000..cb8a8567
--- /dev/null
+++ b/conf/role/openstack_kilo_ansible.conf
@@ -0,0 +1,85 @@
+ADAPTER_NAME = 'openstack_kilo'
+ROLES = [{
+    'role': 'allinone-compute',
+    'display_name': 'all in one',
+    'description': 'All in One'
+}, {
+    'role': 'controller',
+    'display_name': 'controller node',
+    'description': 'Controller Node'
+}, {
+    'role': 'compute',
+    'display_name': 'compute node',
+    'description': 'Compute Node'
+}, {
+    'role': 'storage',
+    'display_name': 'storage node',
+    'description': 'Storage Node'
+}, {
+    'role': 'network',
+    'display_name': 'network node',
+    'description': 'Network Node'
+}, {
+    'role': 'compute-worker',
+    'display_name': 'Compute worker node',
+    'description': 'Compute worker node'
+}, {
+    'role': 'compute-controller',
+    'display_name': 'Compute controller node',
+    'description': 'Compute controller node'
+}, {
+    'role': 'network-server',
+    'display_name': 'Network server node',
+    'description': 'Network server node'
+}, {
+    'role': 'database',
+    'display_name': 'Database node',
+    'description': 'Database node'
+}, {
+    'role': 'messaging',
+    'display_name': 'Messaging queue node',
+    'description': 'Messaging queue node'
+}, {
+    'role': 'image',
+    'display': 'Image node',
+    'description': 'Image node'
+}, {
+    'role': 'dashboard',
+    'display': 'Dashboard node',
+    'description': 'Dashboard node'
+}, {
+    'role': 'identity',
+    'display': 'Identity node',
+    'description': 'Identity node'
+}, {
+    'role': 'storage-controller',
+    'display': 'Storage controller node',
+    'description': 'Storage controller node'
+}, {
+    'role': 'storage-volume',
+    'display': 'Storage volume node',
+    'description': 'Storage volume node'
+}, {
+    'role': 'network-worker',
+    'display': 'Network worker node',
+    'description': 'Network worker node'
+}, {
+    'role': 'odl',
+    'display': 'open day light',
+    'description': 'odl node',
+    'optional': True
+}, {
+    'role': 'onos',
+    'display': 'open network operating system',
+    'description': 'onos node',
+    'optional': True
+}, {
+    'role': 'ha',
+    'display': 'Cluster with HA',
+    'description': 'Cluster with HA node'    
+}, {
+    'role': 'ceph',
+    'display': 'Ceph storage',
+    'description': 'Ceph storage',
+    'optional': True
+}]
diff --git a/conf/templates/ansible_installer/openstack_juno/ansible_cfg/HA-ansible-multinodes.tmpl b/conf/templates/ansible_installer/openstack_juno/ansible_cfg/HA-ansible-multinodes.tmpl
new file mode 100644
index 00000000..264d4397
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_juno/ansible_cfg/HA-ansible-multinodes.tmpl
@@ -0,0 +1,7 @@
+#set cluster_name = $getVar('name', '')
+[defaults]
+log_path = /var/ansible/run/openstack_juno-$cluster_name/ansible.log
+host_key_checking = False
+callback_plugins = /opt/compass/bin/ansible_callbacks
+pipelining=True
+library = /opt/openstack-ansible-modules
diff --git a/conf/templates/ansible_installer/openstack_juno/ansible_cfg/allinone.tmpl b/conf/templates/ansible_installer/openstack_juno/ansible_cfg/allinone.tmpl
index 6221dbe5..35b8e0e4 100644
--- a/conf/templates/ansible_installer/openstack_juno/ansible_cfg/allinone.tmpl
+++ b/conf/templates/ansible_installer/openstack_juno/ansible_cfg/allinone.tmpl
@@ -3,3 +3,4 @@
 log_path = /var/ansible/run/openstack_juno-$cluster_name/ansible.log
 host_key_checking = False
 callback_plugins = /opt/compass/bin/ansible_callbacks
+pipelining=True
diff --git a/conf/templates/ansible_installer/openstack_juno/ansible_cfg/multinodes.tmpl b/conf/templates/ansible_installer/openstack_juno/ansible_cfg/multinodes.tmpl
index 6221dbe5..35b8e0e4 100644
--- a/conf/templates/ansible_installer/openstack_juno/ansible_cfg/multinodes.tmpl
+++ b/conf/templates/ansible_installer/openstack_juno/ansible_cfg/multinodes.tmpl
@@ -3,3 +3,4 @@
 log_path = /var/ansible/run/openstack_juno-$cluster_name/ansible.log
 host_key_checking = False
 callback_plugins = /opt/compass/bin/ansible_callbacks
+pipelining=True
diff --git a/conf/templates/ansible_installer/openstack_juno/ansible_cfg/single-controller.tmpl b/conf/templates/ansible_installer/openstack_juno/ansible_cfg/single-controller.tmpl
index 6221dbe5..35b8e0e4 100644
--- a/conf/templates/ansible_installer/openstack_juno/ansible_cfg/single-controller.tmpl
+++ b/conf/templates/ansible_installer/openstack_juno/ansible_cfg/single-controller.tmpl
@@ -3,3 +3,4 @@
 log_path = /var/ansible/run/openstack_juno-$cluster_name/ansible.log
 host_key_checking = False
 callback_plugins = /opt/compass/bin/ansible_callbacks
+pipelining=True
diff --git a/conf/templates/ansible_installer/openstack_juno/hosts/HA-ansible-multinodes.tmpl b/conf/templates/ansible_installer/openstack_juno/hosts/HA-ansible-multinodes.tmpl
new file mode 100644
index 00000000..af384d45
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_juno/hosts/HA-ansible-multinodes.tmpl
@@ -0,0 +1,22 @@
+# localhost
+127.0.0.1 localhost
+#set controllers = $getVar('controller', [])
+#set computes = $getVar('compute', [])
+#if not $isinstance($controllers, list)
+    #set controllers = [$controllers]
+#end if
+#if not $isinstance($compute, list)
+    #set computes = [$computes]
+#end if
+# controller
+#for worker in $controllers
+    #set worker_ip = $worker.install.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
+# compute
+#for worker in $computes
+    #set worker_ip = $worker.install.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
diff --git a/conf/templates/ansible_installer/openstack_juno/inventories/HA-ansible-multinodes.tmpl b/conf/templates/ansible_installer/openstack_juno/inventories/HA-ansible-multinodes.tmpl
new file mode 100644
index 00000000..9fb24ef5
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_juno/inventories/HA-ansible-multinodes.tmpl
@@ -0,0 +1,63 @@
+#set controllers = $getVar('controller', [])
+#set computes = $getVar('compute', [])
+#set has = $getVar('ha', [])
+#set odls = $getVar('odl', [])
+#set onoss = $getVar('onos', [])
+#set cephs = $getVar('ceph',[])
+#if not $isinstance($controllers, list)
+    #set controllers = [$controllers]
+#end if
+#if not $isinstance($computes, list)
+    #set computes = [$computes]
+#end if
+#if not $isinstance(has, list)
+    #set has = [has]
+#end if
+#if not $isinstance(odls, list)
+    #set odls = [odls]
+#end if
+#if not $isinstance(onoss, list)
+    #set onoss = [onoss]
+#end if
+#if not $isinstance(cephs, list)
+    #set cephs = [cephs]
+#end if
+#set credentials = $getVar('server_credentials', {})
+#set username = $credentials.get('username', 'root')
+#set password = $credentials.get('password', 'root')
+[controller]
+#for controller in $controllers
+    #set controller_ip = $controller.install.ip
+    #set controller_hostname = $controller.hostname
+$controller_hostname ansible_ssh_host=$controller_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+[compute]
+#for compute in $computes
+    #set compute_ip = $compute.install.ip
+    #set compute_hostname = $compute.hostname
+$compute_hostname ansible_ssh_host=$compute_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+[ha]
+#for ha in $has
+    #set ha_ip = $ha.install.ip
+    #set ha_hostname = $ha.hostname
+$ha_hostname ansible_ssh_host=$ha_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+[odl]
+#for odl in $odls
+    #set odl_ip = $odl.install.ip
+    #set odl_hostname = $odl.hostname
+$odl_hostname ansible_ssh_host=$odl_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+[onos]
+#for onos in $onoss
+    #set onos_ip = $onos.install.ip
+    #set onos_hostname = $onos.hostname
+$onos_hostname ansible_ssh_host=$onos_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+[ceph]
+#for ceph in $cephs
+    #set ceph_ip = $ceph.install.ip
+    #set ceph_hostname = $ceph.hostname
+$ceph_hostname ansible_ssh_host=$ceph_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
diff --git a/conf/templates/ansible_installer/openstack_juno/inventories/single-controller.tmpl b/conf/templates/ansible_installer/openstack_juno/inventories/single-controller.tmpl
index be51b01c..e1bf72c4 100644
--- a/conf/templates/ansible_installer/openstack_juno/inventories/single-controller.tmpl
+++ b/conf/templates/ansible_installer/openstack_juno/inventories/single-controller.tmpl
@@ -2,6 +2,8 @@
 #set computes = $getVar('compute', [])
 #set storages = $getVar('storage', [])
 #set networks = $getVar('network', [])
+#set odls = $getVar('odl', [])
+#set onoss = $getVar('onos', [])
 #if not $isinstance($controllers, list)
     #set controllers = [$controllers]
 #end if
@@ -14,6 +16,12 @@
 #if not $isinstance($networks, list)
     #set networks = [$networks]
 #end if
+#if not $isinstance($odls, list)
+    #set odls = [$odls]
+#end if
+#if not $isinstance($onoss, list)
+    #set onoss = [$onoss]
+#end if
 
 #set credentials = $getVar('server_credentials', {})
 #set username = $credentials.get('username', 'root')
@@ -45,3 +53,15 @@ $network_hostname ansible_ssh_host=$network_ip ansible_ssh_user=$username ansibl
     #set storage_hostname = $storage.hostname
 $storage_hostname ansible_ssh_host=$storage_ip ansible_ssh_user=$username ansible_ssh_password=$password
 #end for
+[odl]
+#for odl in odls
+    #set odl_ip = $odl.management.ip
+    #set odl_hostname = $odl.hostname
+$odl_hostname ansible_ssh_host=$odl_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+[storage]
+#for storage in storages
+    #set storage_ip = $storage.management.ip
+    #set storage_hostname = $storage.hostname
+$storage_hostname ansible_ssh_host=$storage_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
diff --git a/conf/templates/ansible_installer/openstack_juno/vars/HA-ansible-multinodes.tmpl b/conf/templates/ansible_installer/openstack_juno/vars/HA-ansible-multinodes.tmpl
new file mode 100644
index 00000000..4143a792
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_juno/vars/HA-ansible-multinodes.tmpl
@@ -0,0 +1,184 @@
+#from random import randint
+#set cluster_name = $getVar('name', '')
+#set network_cfg = $getVar('network_cfg', {})
+#set ntp_server = $getVar('ntp_server', "")
+#set ceph_disk = $getVar('ceph_disk',"")
+#set $sys_intf_mappings= {}
+#for $intf_info in $network_cfg.sys_intf_mappings
+#set $sys_intf_mappings[$intf_info["name"]] = $intf_info
+#end for  
+
+#set ip_settings={}
+#for k,v in $getVar('ip_settings', {}).items() 
+#set host_ip_settings={}
+#for intf in v
+#set $host_ip_settings[$intf["alias"]]=intf
+#end for
+#set $ip_settings[$k]=$host_ip_settings
+#end for
+ 
+#set neutron_cfg = $getVar('neutron_config', {})
+#set ovs_config = $neutron_cfg.openvswitch
+
+#set has = $getVar('ha', [])
+#set ha_vip = $getVar('ha_vip', [])
+
+#set controllers = $getVar('controller', [])
+#set computers = $getVar('compute', [])
+
+enable_secgroup: $getVar('enable_secgroup', True)
+enable_fwaas: $getVar('enable_fwaas', True)
+enable_vpnaas: $getVar('enable_vpnaas', True)
+ip_settings: $ip_settings
+network_cfg: $network_cfg
+sys_intf_mappings: $sys_intf_mappings
+deploy_type: $getVar('deploy_type', 'virtual')
+
+public_net_info: "{{ network_cfg.public_net_info }}"
+host_ip_settings: "{{ ip_settings[inventory_hostname] }}"
+
+ntp_server: $ntp_server
+internal_vip:
+  ip: $network_cfg["internal_vip"]["ip"]
+  netmask: $network_cfg["internal_vip"]["netmask"]
+#if "vlan_tag" in $sys_intf_mappings[$network_cfg["internal_vip"]["interface"]]
+  interface: $sys_intf_mappings[$network_cfg["internal_vip"]["interface"]]["name"]
+#else
+  interface: $sys_intf_mappings[$network_cfg["internal_vip"]["interface"]]["interface"]
+#end if
+
+public_vip:
+  ip: $network_cfg["public_vip"]["ip"]
+  netmask: $network_cfg["public_vip"]["netmask"]
+#if "vlan_tag" in $sys_intf_mappings[$network_cfg["public_vip"]["interface"]]
+  interface: $sys_intf_mappings[$network_cfg["public_vip"]["interface"]]["name"]
+#else
+  interface: $sys_intf_mappings[$network_cfg["public_vip"]["interface"]]["interface"]
+#end if
+
+db_host: "{{ internal_vip.ip }}"
+rabbit_host: "{{ internal_vip.ip }}"
+
+internal_ip: "{{ ip_settings[inventory_hostname]['mgmt']['ip'] }}"
+internal_nic: mgmt
+
+#set random_id = randint(1, 255)
+vrouter_id_internal: $random_id
+vrouter_id_public: $random_id
+
+identity_host: "{{ internal_ip }}"
+controllers_host: "{{ internal_ip }}"
+storage_controller_host: "{{ internal_ip }}"
+compute_controller_host: "{{ internal_ip }}"
+image_host: "{{ internal_ip }}"
+network_server_host: "{{ internal_ip }}"
+dashboard_host: "{{ internal_ip }}"
+
+haproxy_hosts:
+#for $item in $has
+#set $hostname=$item["hostname"]
+  $hostname: $ip_settings[$hostname]["mgmt"]["ip"]
+#end for
+
+ERLANG_TOKEN: YOWSJSJIGGAUFZTIBRAD
+#set credentials = $getVar('service_credentials', {})
+#set console_credentials = $getVar('console_credentials', {})
+#set rabbit_username = $credentials.rabbitmq.username
+#set rabbit_password = $credentials.rabbitmq.password
+#set rabbit_username = $credentials.rabbitmq.username
+#set rabbit_password = $credentials.rabbitmq.password
+#set keystone_dbpass = $credentials.identity.password
+#set glance_dbpass = $credentials.image.password
+#set glance_pass = $console_credentials.image.password
+#set nova_dbpass = $credentials.compute.password
+#set nova_pass = $console_credentials.compute.password
+#set dash_dbpass = $credentials.dashboard.password
+#set cinder_dbpass = $credentials.volume.password
+#set cinder_pass = $console_credentials.volume.password
+#set admin_pass = $console_credentials.admin.password
+#set neutron_pass = $console_credentials.network.password
+
+cluster_name: $cluster_name
+
+odl_controller: 10.1.0.15
+
+DEBUG: true
+VERBOSE: true
+NTP_SERVER_LOCAL: "{{ controllers_host }}"
+DB_HOST: "{{ db_host }}"
+MQ_BROKER: rabbitmq
+
+OPENSTACK_REPO: cloudarchive-juno.list
+juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
+ADMIN_TOKEN: admin
+CEILOMETER_TOKEN: c095d479023a0fd58a54
+erlang.cookie: DJJVECFMCJPVYQTJTDWG
+
+RABBIT_USER: $rabbit_username
+RABBIT_PASS: $rabbit_password
+KEYSTONE_DBPASS: $keystone_dbpass
+CEILOMETER_DBPASS: service
+CEILOMETER_PASS: console
+DEMO_PASS: demo_secret
+ADMIN_PASS: $admin_pass
+GLANCE_DBPASS: $glance_dbpass
+GLANCE_PASS: $glance_pass
+NOVA_DBPASS: $nova_dbpass
+NOVA_PASS: $nova_pass
+DASH_DBPASS: $dash_dbpass
+CINDER_DBPASS: $cinder_dbpass
+CINDER_PASS: $cinder_pass
+NEUTRON_DBPASS: $neutron_pass
+NEUTRON_PASS: $neutron_pass
+
+#set neutron_service_plugins=['router']
+
+#if $getVar('enable_fwaas', True)
+#neutron_service_plugins.append('firewall')
+#end if
+
+#if $getVar('enable_vpnaas', True)
+#neutron_service_plugins.append('vpnaas')
+#end if
+
+NEUTRON_SERVICE_PLUGINS: $neutron_service_plugins
+NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan', 'vlan']
+NEUTRON_TENANT_NETWORK_TYPES: ['$ovs_config["tenant_network_type"]']
+NEUTRON_OVS_BRIDGE_MAPPINGS: $ovs_config['bridge_mappings']
+#if 'vlan_ranges' in $ovs_config
+NEUTRON_VLAN_RANGES: $ovs_config['vlan_ranges']
+#else
+NEUTRON_VLAN_RANGES: []
+#end if
+#if 'tunnel_id_ranges' in $ovs_config
+NEUTRON_TUNNEL_ID_RANGES: $ovs_config['tunnel_id_ranges']
+#else
+NEUTRON_TUNNEL_ID_RANGES: []
+#end if
+
+#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
+NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
+NEUTRON_TUNNEL_TYPES: ['vxlan']
+METADATA_SECRET: metadata_secret
+WSREP_SST_USER: wsrep_sst
+WSREP_SST_PASS: wsrep_sst_sercet
+
+INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: "{{ internal_ip }}"
+
+#build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
+build_in_image: http://192.168.121.12:9999/img/cirros-0.3.3-x86_64-disk.img
+build_in_image_name: cirros-0.3.3-x86_64-disk.img
+
+physical_device: /dev/sdb
+
+odl_username: admin
+odl_password: admin
+odl_api_port: 8080
+
+odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz
+odl_pkg_name: karaf.tar.gz
+odl_home: "/opt/opendaylight-0.2.2/"
+odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'http', 'odl-base-all','odl-aaa-authn','odl-restconf','odl-nsf-all','odl-adsal-northbound','odl-mdsal-apidocs', 'odl-openflowplugin-all']
+odl_extra_features: ['odl-l2switch-switch', 'odl-ovsdb-plugin', 'odl-ovsdb-openstack', 'odl-ovsdb-northbound','odl-dlux-core', 'odl-restconf-all', 'odl-mdsal-clustering', 'odl-openflowplugin-flow-services', 'odl-netconf-connector', 'odl-netconf-connector-ssh', 'jolokia-osgi']
+odl_features: "{{ odl_base_features + odl_extra_features }}"
+odl_api_port: 8080
diff --git a/conf/templates/ansible_installer/openstack_juno/vars/allinone.tmpl b/conf/templates/ansible_installer/openstack_juno/vars/allinone.tmpl
index 7b1e382e..740397ca 100644
--- a/conf/templates/ansible_installer/openstack_juno/vars/allinone.tmpl
+++ b/conf/templates/ansible_installer/openstack_juno/vars/allinone.tmpl
@@ -44,8 +44,8 @@ dashboard_host: "{{ controller_host }}"
 cluster_name: $cluster_name
 odl_controller: 10.1.0.15
 
-DEBUG: False
-VERBOSE: False
+DEBUG: true
+VERBOSE: true
 NTP_SERVER_LOCAL: "{{ controller_host }}"
 DB_HOST: "{{ controller_host }}"
 MQ_BROKER: rabbitmq
@@ -55,6 +55,7 @@ juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-
 ADMIN_TOKEN: admin
 CEILOMETER_TOKEN: c095d479023a0fd58a54
 
+RABBIT_USER: $rabbit_username
 RABBIT_PASS: $rabbit_password
 KEYSTONE_DBPASS: $keystone_dbpass
 DEMO_PASS: demo_secret
@@ -88,6 +89,7 @@ physical_device: /dev/sdb
 
 internal_interface: "ansible_{{ INTERNAL_INTERFACE }}"
 internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
+HA_VIP: "{{ internal_ip }}"
 
 odl_username: admin
 odl_password: admin
diff --git a/conf/templates/ansible_installer/openstack_juno/vars/multinodes.tmpl b/conf/templates/ansible_installer/openstack_juno/vars/multinodes.tmpl
index 45c80a5a..da266a79 100644
--- a/conf/templates/ansible_installer/openstack_juno/vars/multinodes.tmpl
+++ b/conf/templates/ansible_installer/openstack_juno/vars/multinodes.tmpl
@@ -105,8 +105,8 @@ cluster_name: $cluster_name
 
 odl_controller: 10.1.0.15
 
-DEBUG: False
-VERBOSE: False
+DEBUG: true
+VERBOSE: true
 NTP_SERVER_LOCAL: "{{ compute_controller_host }}"
 DB_HOST: "{{ db_host }}"
 MQ_BROKER: rabbitmq
@@ -116,6 +116,7 @@ juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-
 ADMIN_TOKEN: admin
 CEILOMETER_TOKEN: c095d479023a0fd58a54
 
+RABBIT_USER: $rabbit_username
 RABBIT_PASS: $rabbit_password
 KEYSTONE_DBPASS: $keystone_dbpass
 DEMO_PASS: demo_secret
@@ -149,7 +150,16 @@ physical_device: /dev/sdb
 
 internal_interface: "ansible_{{ INTERNAL_INTERFACE }}"
 internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
-
+HA_VIP: "{{ internal_ip }}"
 odl_username: admin
 odl_password: admin
 odl_api_port: 8080
+
+odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz
+odl_pkg_name: karaf.tar.gz
+odl_home: "/opt/opendaylight-0.2.2/"
+odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'http', 'odl-base-all','odl-aaa-authn','odl-restconf','odl-nsf-all','odl-adsal-northbound','odl-mdsal-apidocs', 'odl-openflowplugin-all']
+odl_extra_features: ['odl-l2switch-switch', 'odl-ovsdb-plugin', 'odl-ovsdb-openstack', 'odl-ovsdb-northbound','odl-dlux-core', 'odl-restconf-all', 'odl-mdsal-clustering', 'odl-openflowplugin-flow-services', 'odl-netconf-connector', 'odl-netconf-connector-ssh', 'jolokia-osgi']
+odl_features: "{{ odl_base_features + odl_extra_features }}"
+odl_api_port: 8080
+
diff --git a/conf/templates/ansible_installer/openstack_juno/vars/single-controller.tmpl b/conf/templates/ansible_installer/openstack_juno/vars/single-controller.tmpl
index 0286df89..b24bc811 100644
--- a/conf/templates/ansible_installer/openstack_juno/vars/single-controller.tmpl
+++ b/conf/templates/ansible_installer/openstack_juno/vars/single-controller.tmpl
@@ -45,7 +45,7 @@ INTERNAL_INTERFACE: $network_internal_nic
 #set neutron_pass = $console_credentials.network.password
 
 cluster_name: $cluster_name
-
+deploy_type: $getVar('deploy_type', 'virtual')
 compute_controller_host: "{{ controller_host }}"
 db_host: "{{ controller_host }}"
 rabbit_host: "{{ controller_host }}"
@@ -56,8 +56,8 @@ network_server_host: "{{ controller_host }}"
 dashboard_host: "{{ controller_host }}"
 odl_controller: 10.1.0.15
 
-DEBUG: False
-VERBOSE: False
+DEBUG: true
+VERBOSE: true
 NTP_SERVER_LOCAL: "{{ controller_host }}"
 DB_HOST: "{{ controller_host }}"
 MQ_BROKER: rabbitmq
@@ -67,6 +67,7 @@ juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-
 ADMIN_TOKEN: admin
 CEILOMETER_TOKEN: c095d479023a0fd58a54
 
+RABBIT_USER: $rabbit_username
 RABBIT_PASS: $rabbit_password
 KEYSTONE_DBPASS: $keystone_dbpass
 DEMO_PASS: demo_secret
@@ -101,7 +102,7 @@ physical_device: /dev/sdb
 
 internal_interface: "ansible_{{ INTERNAL_INTERFACE }}"
 internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
-
+HA_VIP: "{{ internal_ip }}"
 odl_username: admin
 odl_password: admin
 odl_api_port: 8080
diff --git a/conf/templates/ansible_installer/openstack_kilo/ansible_cfg/HA-ansible-multinodes.tmpl b/conf/templates/ansible_installer/openstack_kilo/ansible_cfg/HA-ansible-multinodes.tmpl
new file mode 100644
index 00000000..81f55aba
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_kilo/ansible_cfg/HA-ansible-multinodes.tmpl
@@ -0,0 +1,7 @@
+#set cluster_name = $getVar('name', '')
+[defaults]
+log_path = /var/ansible/run/openstack_kilo-$cluster_name/ansible.log
+host_key_checking = False
+callback_plugins = /opt/compass/bin/ansible_callbacks
+pipelining=True
+library = /opt/openstack-ansible-modules
diff --git a/conf/templates/ansible_installer/openstack_kilo/ansible_cfg/allinone.tmpl b/conf/templates/ansible_installer/openstack_kilo/ansible_cfg/allinone.tmpl
new file mode 100644
index 00000000..35b8e0e4
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_kilo/ansible_cfg/allinone.tmpl
@@ -0,0 +1,6 @@
+#set cluster_name = $getVar('name', '')
+[defaults]
+log_path = /var/ansible/run/openstack_juno-$cluster_name/ansible.log
+host_key_checking = False
+callback_plugins = /opt/compass/bin/ansible_callbacks
+pipelining=True
diff --git a/conf/templates/ansible_installer/openstack_kilo/ansible_cfg/multinodes.tmpl b/conf/templates/ansible_installer/openstack_kilo/ansible_cfg/multinodes.tmpl
new file mode 100644
index 00000000..35b8e0e4
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_kilo/ansible_cfg/multinodes.tmpl
@@ -0,0 +1,6 @@
+#set cluster_name = $getVar('name', '')
+[defaults]
+log_path = /var/ansible/run/openstack_juno-$cluster_name/ansible.log
+host_key_checking = False
+callback_plugins = /opt/compass/bin/ansible_callbacks
+pipelining=True
diff --git a/conf/templates/ansible_installer/openstack_kilo/ansible_cfg/single-controller.tmpl b/conf/templates/ansible_installer/openstack_kilo/ansible_cfg/single-controller.tmpl
new file mode 100644
index 00000000..35b8e0e4
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_kilo/ansible_cfg/single-controller.tmpl
@@ -0,0 +1,6 @@
+#set cluster_name = $getVar('name', '')
+[defaults]
+log_path = /var/ansible/run/openstack_juno-$cluster_name/ansible.log
+host_key_checking = False
+callback_plugins = /opt/compass/bin/ansible_callbacks
+pipelining=True
diff --git a/conf/templates/ansible_installer/openstack_kilo/hosts/HA-ansible-multinodes.tmpl b/conf/templates/ansible_installer/openstack_kilo/hosts/HA-ansible-multinodes.tmpl
new file mode 100644
index 00000000..af384d45
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_kilo/hosts/HA-ansible-multinodes.tmpl
@@ -0,0 +1,22 @@
+# localhost
+127.0.0.1 localhost
+#set controllers = $getVar('controller', [])
+#set computes = $getVar('compute', [])
+#if not $isinstance($controllers, list)
+    #set controllers = [$controllers]
+#end if
+#if not $isinstance($compute, list)
+    #set computes = [$computes]
+#end if
+# controller
+#for worker in $controllers
+    #set worker_ip = $worker.install.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
+# compute
+#for worker in $computes
+    #set worker_ip = $worker.install.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
diff --git a/conf/templates/ansible_installer/openstack_kilo/hosts/allinone.tmpl b/conf/templates/ansible_installer/openstack_kilo/hosts/allinone.tmpl
new file mode 100644
index 00000000..b777815e
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_kilo/hosts/allinone.tmpl
@@ -0,0 +1,10 @@
+#set controllers = $getVar('allinone_compute', [])
+#if not $isinstance($controllers, list)
+    #set controllers = [$controllers]
+#end if
+# allinone
+#for controller in $controllers
+    #set controller_ip = $controller.management.ip
+    #set controller_hostname = $controller.hostname
+$controller_ip $controller_hostname
+#end for
diff --git a/conf/templates/ansible_installer/openstack_kilo/hosts/multinodes.tmpl b/conf/templates/ansible_installer/openstack_kilo/hosts/multinodes.tmpl
new file mode 100644
index 00000000..ca8c793f
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_kilo/hosts/multinodes.tmpl
@@ -0,0 +1,110 @@
+#set compute_controllers = $getVar('compute_controller', [])
+#set compute_workers = $getVar('compute_worker', [])
+#set network_servers = $getVar('network_server', [])
+#set network_workers = $getVar('network_worker', [])
+#set databases = $getVar('database', [])
+#set messagings = $getVar('messaging', [])
+#set images = $getVar('image', [])
+#set dashboards = $getVar('dashboard', [])
+#set identities = $getVar('identity', [])
+#set storage_controllers = $getVar('storage_controller', [])
+#set storage_volumes = $getVar('storage_volume', [])
+#if not $isinstance($compute_controllers, list)
+    #set compute_controllers = [$compute_controllers]
+#end if
+#if not $isinstance($compute_workers, list)
+    #set compute_workers = [$compute_workers]
+#end if
+#if not $isinstance($network_servers, list)
+    #set network_servers = [$network_servers]
+#end if
+#if not $isinstance($network_workers, list)
+    #set network_workers = [$network_workers]
+#end if
+#if not $isinstance($databases, list)
+    #set databases = [$databases]
+#end if
+#if not $isinstance($messagings, list)
+    #set messagings = [$messagings]
+#end if
+#if not $isinstance($images, list)
+    #set images = [$images]
+#end if
+#if not $isinstance($dashboards, list)
+    #set dashboards = [$dashboards]
+#end if
+#if not $isinstance($identities, list)
+    #set identities = [$identities]
+#end if
+#if not $isinstance($storage_controllers, list)
+    #set storage_controllers = [$storage_controllers]
+#end if
+#if not $isinstance($storage_volumes, list)
+    #set storage_volumes = [$storage_volumes]
+#end if
+# compute-controller
+#for worker in $compute_controllers
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
+# database
+#for worker in $databases
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
+# messaging
+#for worker in $messagings
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
+# storage-controller
+#for worker in $storage_controllers
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
+# image
+#for worker in $images
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
+# identity
+#for worker in $identities
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
+# network-server
+#for worker in $network_servers
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
+# dashboard
+#for worker in $dashboards
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
+# storage-volume
+#for worker in $storage_volumes
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
+# network-worker
+#for worker in $network_workers
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
+# compute-worker
+#for worker in $compute_workers
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
diff --git a/conf/templates/ansible_installer/openstack_kilo/hosts/single-controller.tmpl b/conf/templates/ansible_installer/openstack_kilo/hosts/single-controller.tmpl
new file mode 100644
index 00000000..3ed94694
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_kilo/hosts/single-controller.tmpl
@@ -0,0 +1,40 @@
+#set controllers = $getVar('controller', [])
+#set computes = $getVar('compute', [])
+#set storages = $getVar('storage', [])
+#set networks = $getVar('network', [])
+#if not $isinstance($controllers, list)
+    #set controllers = [$controllers]
+#end if
+#if not $isinstance($computes, list)
+    #set computes = [$computes]
+#end if
+#if not $isinstance($storages, list)
+    #set storages = [$storages]
+#end if
+#if not $isinstance($networks, list)
+    #set networks = [$networks]
+#end if
+# controller
+#for controller in $controllers
+    #set controller_ip = $controller.management.ip
+    #set controller_hostname = $controller.hostname
+$controller_ip $controller_hostname
+#end for
+# compute
+#for worker in $computes
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
+# storage
+#for worker in $storages
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
+# network
+#for worker in $networks
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_ip $worker_hostname
+#end for
diff --git a/conf/templates/ansible_installer/openstack_kilo/inventories/HA-ansible-multinodes.tmpl b/conf/templates/ansible_installer/openstack_kilo/inventories/HA-ansible-multinodes.tmpl
new file mode 100644
index 00000000..9fb24ef5
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_kilo/inventories/HA-ansible-multinodes.tmpl
@@ -0,0 +1,63 @@
+#set controllers = $getVar('controller', [])
+#set computes = $getVar('compute', [])
+#set has = $getVar('ha', [])
+#set odls = $getVar('odl', [])
+#set onoss = $getVar('onos', [])
+#set cephs = $getVar('ceph',[])
+#if not $isinstance($controllers, list)
+    #set controllers = [$controllers]
+#end if
+#if not $isinstance($computes, list)
+    #set computes = [$computes]
+#end if
+#if not $isinstance(has, list)
+    #set has = [has]
+#end if
+#if not $isinstance(odls, list)
+    #set odls = [odls]
+#end if
+#if not $isinstance(onoss, list)
+    #set onoss = [onoss]
+#end if
+#if not $isinstance(cephs, list)
+    #set cephs = [cephs]
+#end if
+#set credentials = $getVar('server_credentials', {})
+#set username = $credentials.get('username', 'root')
+#set password = $credentials.get('password', 'root')
+[controller]
+#for controller in $controllers
+    #set controller_ip = $controller.install.ip
+    #set controller_hostname = $controller.hostname
+$controller_hostname ansible_ssh_host=$controller_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+[compute]
+#for compute in $computes
+    #set compute_ip = $compute.install.ip
+    #set compute_hostname = $compute.hostname
+$compute_hostname ansible_ssh_host=$compute_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+[ha]
+#for ha in $has
+    #set ha_ip = $ha.install.ip
+    #set ha_hostname = $ha.hostname
+$ha_hostname ansible_ssh_host=$ha_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+[odl]
+#for odl in $odls
+    #set odl_ip = $odl.install.ip
+    #set odl_hostname = $odl.hostname
+$odl_hostname ansible_ssh_host=$odl_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+[onos]
+#for onos in $onoss
+    #set onos_ip = $onos.install.ip
+    #set onos_hostname = $onos.hostname
+$onos_hostname ansible_ssh_host=$onos_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+[ceph]
+#for ceph in $cephs
+    #set ceph_ip = $ceph.install.ip
+    #set ceph_hostname = $ceph.hostname
+$ceph_hostname ansible_ssh_host=$ceph_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
diff --git a/conf/templates/ansible_installer/openstack_kilo/inventories/allinone.tmpl b/conf/templates/ansible_installer/openstack_kilo/inventories/allinone.tmpl
new file mode 100644
index 00000000..38e0038b
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_kilo/inventories/allinone.tmpl
@@ -0,0 +1,47 @@
+#set controllers = $getVar('allinone_compute', [])
+#set computes = $getVar('allinone_compute', [])
+#set storages = $getVar('allinone_compute', [])
+#set networks = $getVar('allinone_compute', [])
+#if not $isinstance($controllers, list)
+    #set controllers = [$controllers]
+#end if
+#if not $isinstance($computes, list)
+    #set computes = [$computes]
+#end if
+#if not $isinstance($storages, list)
+    #set storages = [$storages]
+#end if
+#if not $isinstance($networks, list)
+    #set networks = [$networks]
+#end if
+
+#set credentials = $getVar('server_credentials', {})
+#set username = $credentials.get('username', 'root')
+#set password = $credentials.get('password', 'root')
+[controller]
+#for controller in $controllers
+    #set controller_ip = $controller.management.ip
+    #set controller_hostname = $controller.hostname
+$controller_hostname ansible_ssh_host=$controller_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+
+[compute]
+#for compute in $computes
+    #set compute_ip = $compute.management.ip
+    #set compute_hostname = $compute.hostname
+$compute_hostname ansible_ssh_host=$compute_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+
+[network]
+#for network in $networks
+    #set network_ip = $network.management.ip
+    #set network_hostname = $network.hostname
+$network_hostname ansible_ssh_host=$network_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+
+[storage]
+#for storage in storages
+    #set storage_ip = $storage.management.ip
+    #set storage_hostname = $storage.hostname
+$storage_hostname ansible_ssh_host=$storage_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
diff --git a/conf/templates/ansible_installer/openstack_kilo/inventories/multinodes.tmpl b/conf/templates/ansible_installer/openstack_kilo/inventories/multinodes.tmpl
new file mode 100644
index 00000000..7cdfbef3
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_kilo/inventories/multinodes.tmpl
@@ -0,0 +1,123 @@
+#set compute_controllers = $getVar('compute_controller', [])
+#set compute_workers = $getVar('compute_worker', [])
+#set network_servers = $getVar('network_server', [])
+#set network_workers = $getVar('network_worker', [])
+#set databases = $getVar('database', [])
+#set messagings = $getVar('messaging', [])
+#set images = $getVar('image', [])
+#set dashboards = $getVar('dashboard', [])
+#set identities = $getVar('identity', [])
+#set storage_controllers = $getVar('storage_controller', [])
+#set storage_volumes = $getVar('storage_volume', [])
+#if not $isinstance($compute_controllers, list)
+    #set compute_controllers = [$compute_controllers]
+#end if
+#if not $isinstance($compute_workers, list)
+    #set compute_workers = [$compute_workers]
+#end if
+#if not $isinstance($network_servers, list)
+    #set network_servers = [$network_servers]
+#end if
+#if not $isinstance($network_workers, list)
+    #set network_workers = [$network_workers]
+#end if
+#if not $isinstance($databases, list)
+    #set databases = [$databases]
+#end if
+#if not $isinstance($messagings, list)
+    #set messagings = [$messagings]
+#end if
+#if not $isinstance($images, list)
+    #set images = [$images]
+#end if
+#if not $isinstance($dashboards, list)
+    #set dashboards = [$dashboards]
+#end if
+#if not $isinstance($identities, list)
+    #set identities = [$identities]
+#end if
+#if not $isinstance($storage_controllers, list)
+    #set storage_controllers = [$storage_controllers]
+#end if
+#if not $isinstance($storage_volumes, list)
+    #set storage_volumes = [$storage_volumes]
+#end if
+#set credentials = $getVar('server_credentials', {})
+#set username = $credentials.get('username', 'root')
+#set password = $credentials.get('password', 'root')
+[compute-controller]
+#for controller in $compute_controllers
+    #set controller_ip = $controller.management.ip
+    #set controller_hostname = $controller.hostname
+$controller_hostname ansible_ssh_host=$controller_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+
+[compute-worker]
+#for compute in $compute_workers
+    #set compute_ip = $compute.management.ip
+    #set compute_hostname = $compute.hostname
+$compute_hostname ansible_ssh_host=$compute_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+
+[network-server]
+#for network in $network_servers
+    #set network_ip = $network.management.ip
+    #set network_hostname = $network.hostname
+$network_hostname ansible_ssh_host=$network_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+
+[network-worker]
+#for network in $network_workers
+    #set network_ip = $network.management.ip
+    #set network_hostname = $network.hostname
+$network_hostname ansible_ssh_host=$network_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+
+[database]
+#for worker in $databases
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+
+[messaging]
+#for worker in $messagings
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+
+[image]
+#for worker in $images
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+
+[dashboard]
+#for worker in $dashboards
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+
+[identity]
+#for worker in $identities
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+
+[storage-controller]
+#for worker in $storage_controllers
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+
+[storage-volume]
+#for worker in $storage_volumes
+    #set worker_ip = $worker.management.ip
+    #set worker_hostname = $worker.hostname
+$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
diff --git a/conf/templates/ansible_installer/openstack_kilo/inventories/single-controller.tmpl b/conf/templates/ansible_installer/openstack_kilo/inventories/single-controller.tmpl
new file mode 100644
index 00000000..e1bf72c4
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_kilo/inventories/single-controller.tmpl
@@ -0,0 +1,67 @@
+#set controllers = $getVar('controller', [])
+#set computes = $getVar('compute', [])
+#set storages = $getVar('storage', [])
+#set networks = $getVar('network', [])
+#set odls = $getVar('odl', [])
+#set onoss = $getVar('onos', [])
+#if not $isinstance($controllers, list)
+    #set controllers = [$controllers]
+#end if
+#if not $isinstance($computes, list)
+    #set computes = [$computes]
+#end if
+#if not $isinstance($storages, list)
+    #set storages = [$storages]
+#end if
+#if not $isinstance($networks, list)
+    #set networks = [$networks]
+#end if
+#if not $isinstance($odls, list)
+    #set odls = [$odls]
+#end if
+#if not $isinstance($onoss, list)
+    #set onoss = [$onoss]
+#end if
+
+#set credentials = $getVar('server_credentials', {})
+#set username = $credentials.get('username', 'root')
+#set password = $credentials.get('password', 'root')
+[controller]
+#for controller in $controllers
+    #set controller_ip = $controller.management.ip
+    #set controller_hostname = $controller.hostname
+$controller_hostname ansible_ssh_host=$controller_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+
+[compute]
+#for compute in $computes
+    #set compute_ip = $compute.management.ip
+    #set compute_hostname = $compute.hostname
+$compute_hostname ansible_ssh_host=$compute_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+
+[network]
+#for network in $networks
+    #set network_ip = $network.management.ip
+    #set network_hostname = $network.hostname
+$network_hostname ansible_ssh_host=$network_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+
+[storage]
+#for storage in storages
+    #set storage_ip = $storage.management.ip
+    #set storage_hostname = $storage.hostname
+$storage_hostname ansible_ssh_host=$storage_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+[odl]
+#for odl in odls
+    #set odl_ip = $odl.management.ip
+    #set odl_hostname = $odl.hostname
+$odl_hostname ansible_ssh_host=$odl_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
+[storage]
+#for storage in storages
+    #set storage_ip = $storage.management.ip
+    #set storage_hostname = $storage.hostname
+$storage_hostname ansible_ssh_host=$storage_ip ansible_ssh_user=$username ansible_ssh_password=$password
+#end for
diff --git a/conf/templates/ansible_installer/openstack_kilo/vars/HA-ansible-multinodes.tmpl b/conf/templates/ansible_installer/openstack_kilo/vars/HA-ansible-multinodes.tmpl
new file mode 100644
index 00000000..49eaadfb
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_kilo/vars/HA-ansible-multinodes.tmpl
@@ -0,0 +1,173 @@
+#from random import randint
+#set cluster_name = $getVar('name', '')
+#set network_cfg = $getVar('network_cfg', {})
+#set ntp_server = $getVar('ntp_server', "")
+#set ceph_disk = $getVar('ceph_disk',"")
+#set $sys_intf_mappings= {}
+#for $intf_info in $network_cfg.sys_intf_mappings
+#set $sys_intf_mappings[$intf_info["name"]] = $intf_info
+#end for  
+
+#set ip_settings={}
+#for k,v in $getVar('ip_settings', {}).items() 
+#set host_ip_settings={}
+#for intf in v
+#set $host_ip_settings[$intf["alias"]]=intf
+#end for
+#set $ip_settings[$k]=$host_ip_settings
+#end for
+ 
+#set neutron_cfg = $getVar('neutron_config', {})
+#set ovs_config = $neutron_cfg.openvswitch
+
+#set has = $getVar('ha', [])
+#set ha_vip = $getVar('ha_vip', [])
+
+#set controllers = $getVar('controller', [])
+#set computers = $getVar('compute', [])
+
+enable_secgroup: $getVar('enable_secgroup', True)
+enable_fwaas: $getVar('enable_fwaas', True)
+enable_vpnaas: $getVar('enable_vpnaas', True)
+ip_settings: $ip_settings
+network_cfg: $network_cfg
+sys_intf_mappings: $sys_intf_mappings
+deploy_type: $getVar('deploy_type', 'virtual')
+
+public_net_info: "{{ network_cfg.public_net_info }}"
+host_ip_settings: "{{ ip_settings[inventory_hostname] }}"
+
+ntp_server: $ntp_server
+internal_vip:
+  ip: $network_cfg["internal_vip"]["ip"]
+  netmask: $network_cfg["internal_vip"]["netmask"]
+#if "vlan_tag" in $sys_intf_mappings[$network_cfg["internal_vip"]["interface"]]
+  interface: $sys_intf_mappings[$network_cfg["internal_vip"]["interface"]]["name"]
+#else
+  interface: $sys_intf_mappings[$network_cfg["internal_vip"]["interface"]]["interface"]
+#end if
+
+public_vip:
+  ip: $network_cfg["public_vip"]["ip"]
+  netmask: $network_cfg["public_vip"]["netmask"]
+#if "vlan_tag" in $sys_intf_mappings[$network_cfg["public_vip"]["interface"]]
+  interface: $sys_intf_mappings[$network_cfg["public_vip"]["interface"]]["name"]
+#else
+  interface: $sys_intf_mappings[$network_cfg["public_vip"]["interface"]]["interface"]
+#end if
+
+db_host: "{{ internal_vip.ip }}"
+rabbit_host: "{{ internal_vip.ip }}"
+
+internal_ip: "{{ ip_settings[inventory_hostname]['mgmt']['ip'] }}"
+internal_nic: mgmt
+
+#set random_id = randint(1, 255)
+vrouter_id_internal: $random_id
+vrouter_id_public: $random_id
+
+identity_host: "{{ internal_ip }}"
+controllers_host: "{{ internal_ip }}"
+storage_controller_host: "{{ internal_ip }}"
+compute_controller_host: "{{ internal_ip }}"
+image_host: "{{ internal_ip }}"
+network_server_host: "{{ internal_ip }}"
+dashboard_host: "{{ internal_ip }}"
+
+haproxy_hosts:
+#for $item in $has
+#set $hostname=$item["hostname"]
+  $hostname: $ip_settings[$hostname]["mgmt"]["ip"]
+#end for
+
+ERLANG_TOKEN: YOWSJSJIGGAUFZTIBRAD
+#set credentials = $getVar('service_credentials', {})
+#set console_credentials = $getVar('console_credentials', {})
+#set rabbit_username = $credentials.rabbitmq.username
+#set rabbit_password = $credentials.rabbitmq.password
+#set rabbit_username = $credentials.rabbitmq.username
+#set rabbit_password = $credentials.rabbitmq.password
+#set keystone_dbpass = $credentials.identity.password
+#set glance_dbpass = $credentials.image.password
+#set glance_pass = $console_credentials.image.password
+#set nova_dbpass = $credentials.compute.password
+#set nova_pass = $console_credentials.compute.password
+#set dash_dbpass = $credentials.dashboard.password
+#set cinder_dbpass = $credentials.volume.password
+#set cinder_pass = $console_credentials.volume.password
+#set admin_pass = $console_credentials.admin.password
+#set neutron_pass = $console_credentials.network.password
+
+cluster_name: $cluster_name
+
+odl_controller: 10.1.0.15
+
+DEBUG: true
+VERBOSE: true
+NTP_SERVER_LOCAL: "{{ controllers_host }}"
+DB_HOST: "{{ db_host }}"
+MQ_BROKER: rabbitmq
+
+OPENSTACK_REPO: cloudarchive-juno.list
+juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
+ADMIN_TOKEN: admin
+CEILOMETER_TOKEN: c095d479023a0fd58a54
+erlang.cookie: DJJVECFMCJPVYQTJTDWG
+
+RABBIT_USER: $rabbit_username
+RABBIT_PASS: $rabbit_password
+KEYSTONE_DBPASS: $keystone_dbpass
+CEILOMETER_DBPASS: service
+CEILOMETER_PASS: console
+DEMO_PASS: demo_secret
+ADMIN_PASS: $admin_pass
+GLANCE_DBPASS: $glance_dbpass
+GLANCE_PASS: $glance_pass
+NOVA_DBPASS: $nova_dbpass
+NOVA_PASS: $nova_pass
+DASH_DBPASS: $dash_dbpass
+CINDER_DBPASS: $cinder_dbpass
+CINDER_PASS: $cinder_pass
+NEUTRON_DBPASS: $neutron_pass
+NEUTRON_PASS: $neutron_pass
+
+NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan', 'vlan']
+NEUTRON_TENANT_NETWORK_TYPES: ['$ovs_config["tenant_network_type"]']
+NEUTRON_OVS_BRIDGE_MAPPINGS: $ovs_config['bridge_mappings']
+#if 'vlan_ranges' in $ovs_config
+NEUTRON_VLAN_RANGES: $ovs_config['vlan_ranges']
+#else
+NEUTRON_VLAN_RANGES: []
+#end if
+#if 'tunnel_id_ranges' in $ovs_config
+NEUTRON_TUNNEL_ID_RANGES: $ovs_config['tunnel_id_ranges']
+#else
+NEUTRON_TUNNEL_ID_RANGES: []
+#end if
+
+#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
+NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
+NEUTRON_TUNNEL_TYPES: ['vxlan']
+METADATA_SECRET: metadata_secret
+WSREP_SST_USER: wsrep_sst
+WSREP_SST_PASS: wsrep_sst_sercet
+
+INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: "{{ internal_ip }}"
+
+#build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
+build_in_image: http://192.168.121.12:9999/img/cirros-0.3.3-x86_64-disk.img
+build_in_image_name: cirros-0.3.3-x86_64-disk.img
+
+physical_device: /dev/sdb
+
+odl_username: admin
+odl_password: admin
+odl_api_port: 8080
+
+odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz
+odl_pkg_name: karaf.tar.gz
+odl_home: "/opt/opendaylight-0.2.2/"
+odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'http', 'odl-base-all','odl-aaa-authn','odl-restconf','odl-nsf-all','odl-adsal-northbound','odl-mdsal-apidocs', 'odl-openflowplugin-all']
+odl_extra_features: ['odl-l2switch-switch', 'odl-ovsdb-plugin', 'odl-ovsdb-openstack', 'odl-ovsdb-northbound','odl-dlux-core', 'odl-restconf-all', 'odl-mdsal-clustering', 'odl-openflowplugin-flow-services', 'odl-netconf-connector', 'odl-netconf-connector-ssh', 'jolokia-osgi']
+odl_features: "{{ odl_base_features + odl_extra_features }}"
+odl_api_port: 8080
diff --git a/conf/templates/ansible_installer/openstack_kilo/vars/allinone.tmpl b/conf/templates/ansible_installer/openstack_kilo/vars/allinone.tmpl
new file mode 100644
index 00000000..740397ca
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_kilo/vars/allinone.tmpl
@@ -0,0 +1,96 @@
+#set cluster_name = $getVar('name', '')
+#set controllers = $getVar('allinone_compute', [])
+#if not $isinstance($controllers, list)
+    #set controllers = [$controllers]
+#end if
+
+#for controller in $controllers
+    #set controller_ip = $controller.management.ip
+    #set controller_hostname = $controller.hostname
+controller_host: $controller_ip
+#end for
+#for network in $controllers
+    #set network_external_nic = $network.external.interface
+    #set network_external_subnet = $network.external.subnet
+    #set network_internal_nic = $network.management.interface
+INTERFACE_NAME: $network_external_nic    
+INTERNAL_INTERFACE: $network_internal_nic
+#end for
+
+#set credentials = $getVar('service_credentials', {})
+#set console_credentials = $getVar('console_credentials', {})
+#set rabbit_username = $credentials.rabbitmq.username
+#set rabbit_password = $credentials.rabbitmq.password
+#set keystone_dbpass = $credentials.identity.password
+#set glance_dbpass = $credentials.image.password
+#set glance_pass = $console_credentials.image.password
+#set nova_dbpass = $credentials.compute.password
+#set nova_pass = $console_credentials.compute.password
+#set dash_dbpass = $credentials.dashboard.password 
+#set cinder_dbpass = $credentials.volume.password
+#set cinder_pass = $console_credentials.volume.password
+#set admin_pass = $console_credentials.admin.password
+#set neutron_pass = $console_credentials.network.password
+
+compute_controller_host: "{{ controller_host }}"
+db_host: "{{ controller_host }}"
+rabbit_host: "{{ controller_host }}"
+storage_controller_host: "{{ controller_host }}"
+image_host: "{{ controller_host }}"
+identity_host: "{{ controller_host }}"
+network_server_host: "{{ controller_host }}"
+dashboard_host: "{{ controller_host }}"
+
+cluster_name: $cluster_name
+odl_controller: 10.1.0.15
+
+DEBUG: true
+VERBOSE: true
+NTP_SERVER_LOCAL: "{{ controller_host }}"
+DB_HOST: "{{ controller_host }}"
+MQ_BROKER: rabbitmq
+
+OPENSTACK_REPO: cloudarchive-juno.list
+juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
+ADMIN_TOKEN: admin
+CEILOMETER_TOKEN: c095d479023a0fd58a54
+
+RABBIT_USER: $rabbit_username
+RABBIT_PASS: $rabbit_password
+KEYSTONE_DBPASS: $keystone_dbpass
+DEMO_PASS: demo_secret
+ADMIN_PASS: $admin_pass
+GLANCE_DBPASS: $glance_dbpass
+GLANCE_PASS: $glance_pass
+NOVA_DBPASS: $nova_dbpass
+NOVA_PASS: $nova_pass
+DASH_DBPASS: $dash_dbpass
+CINDER_DBPASS: $cinder_dbpass
+CINDER_PASS: $cinder_pass
+NEUTRON_DBPASS: $neutron_pass
+NEUTRON_PASS: $neutron_pass
+NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan']
+NEUTRON_TENANT_NETWORK_TYPES: ['vxlan']
+#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
+NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
+NEUTRON_TUNNEL_TYPES: ['vxlan']
+METADATA_SECRET: metadata_secret
+INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: 10.1.1.21
+
+EXTERNAL_NETWORK_CIDR: 203.0.113.0/24
+EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
+FLOATING_IP_START: 203.0.113.101
+FLOATING_IP_END: 203.0.113.200
+
+build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
+build_in_image_name: cirros-0.3.3-x86_64-disk.img
+
+physical_device: /dev/sdb
+
+internal_interface: "ansible_{{ INTERNAL_INTERFACE }}"
+internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
+HA_VIP: "{{ internal_ip }}"
+
+odl_username: admin
+odl_password: admin
+odl_api_port: 8080
diff --git a/conf/templates/ansible_installer/openstack_kilo/vars/multinodes.tmpl b/conf/templates/ansible_installer/openstack_kilo/vars/multinodes.tmpl
new file mode 100644
index 00000000..da266a79
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_kilo/vars/multinodes.tmpl
@@ -0,0 +1,165 @@
+#set cluster_name = $getVar('name', '')
+#set compute_controllers = $getVar('compute_controller', [])
+#set compute_workers = $getVar('compute_worker', [])
+#set network_servers = $getVar('network_server', [])
+#set network_workers = $getVar('network_worker', [])
+#set databases = $getVar('database', [])
+#set messagings = $getVar('messaging', [])
+#set images = $getVar('image', [])
+#set dashboards = $getVar('dashboard', [])
+#set identities = $getVar('identity', [])
+#set storage_controllers = $getVar('storage_controller', [])
+#set storage_volumes = $getVar('storage_volume', [])
+#if not $isinstance($compute_controllers, list)
+    #set compute_controllers = [$compute_controllers]
+#end if
+#if not $isinstance($compute_workers, list)
+    #set compute_workers = [$compute_workers]
+#end if
+#if not $isinstance($network_servers, list)
+    #set network_servers = [$network_servers]
+#end if
+#if not $isinstance($network_workers, list)
+    #set network_workers = [$network_workers]
+#end if
+#if not $isinstance($databases, list)
+    #set databases = [$databases]
+#end if
+#if not $isinstance($messagings, list)
+    #set messagings = [$messagings]
+#end if
+#if not $isinstance($images, list)
+    #set images = [$images]
+#end if
+#if not $isinstance($dashboards, list)
+    #set dashboards = [$dashboards]
+#end if
+#if not $isinstance($identities, list)
+    #set identities = [$identities]
+#end if
+#if not $isinstance($storage_controllers, list)
+    #set storage_controllers = [$storage_controllers]
+#end if
+#if not $isinstance($storage_volumes, list)
+    #set storage_volumes = [$storage_volumes]
+#end if
+#for worker in $compute_controllers
+    #set worker_ip = $worker.management.ip
+compute_controller_host: $worker_ip
+#end for
+#for worker in $databases
+    #set worker_ip = $worker.management.ip
+db_host: $worker_ip
+#end for
+#for worker in $messagings
+    #set worker_ip = $worker.management.ip
+rabbit_host: $worker_ip
+#end for
+#for worker in $storage_controllers
+    #set worker_ip = $worker.management.ip
+storage_controller_host: $worker_ip
+#end for
+#for worker in $images
+    #set worker_ip = $worker.management.ip
+image_host: $worker_ip
+#end for
+#for worker in $identities
+    #set worker_ip = $worker.management.ip
+identity_host: $worker_ip
+#end for
+#for worker in $compute_controllers
+    #set worker_ip = $worker.management.ip
+compute_controller_host: $worker_ip
+#end for
+#for worker in $network_servers
+    #set worker_ip = $worker.management.ip
+network_server_host: $worker_ip
+#end for
+#for worker in $dashboards
+    #set worker_ip = $worker.management.ip
+dashboard_host: $worker_ip
+#end for
+#for network in $network_workers
+    #set network_external_nic = $network.external.interface
+    #set network_internal_nic = $network.management.interface
+INTERFACE_NAME: $network_external_nic    
+INTERNAL_INTERFACE: $network_internal_nic
+#end for
+
+#set credentials = $getVar('service_credentials', {})
+#set console_credentials = $getVar('console_credentials', {})
+#set rabbit_username = $credentials.rabbitmq.username
+#set rabbit_password = $credentials.rabbitmq.password
+#set keystone_dbpass = $credentials.identity.password
+#set glance_dbpass = $credentials.image.password
+#set glance_pass = $console_credentials.image.password
+#set nova_dbpass = $credentials.compute.password
+#set nova_pass = $console_credentials.compute.password
+#set dash_dbpass = $credentials.dashboard.password
+#set cinder_dbpass = $credentials.volume.password
+#set cinder_pass = $console_credentials.volume.password
+#set admin_pass = $console_credentials.admin.password
+#set neutron_pass = $console_credentials.network.password
+
+cluster_name: $cluster_name
+
+odl_controller: 10.1.0.15
+
+DEBUG: true
+VERBOSE: true
+NTP_SERVER_LOCAL: "{{ compute_controller_host }}"
+DB_HOST: "{{ db_host }}"
+MQ_BROKER: rabbitmq
+
+OPENSTACK_REPO: cloudarchive-juno.list
+juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
+ADMIN_TOKEN: admin
+CEILOMETER_TOKEN: c095d479023a0fd58a54
+
+RABBIT_USER: $rabbit_username
+RABBIT_PASS: $rabbit_password
+KEYSTONE_DBPASS: $keystone_dbpass
+DEMO_PASS: demo_secret
+ADMIN_PASS: $admin_pass
+GLANCE_DBPASS: $glance_dbpass
+GLANCE_PASS: $glance_pass
+NOVA_DBPASS: $nova_dbpass
+NOVA_PASS: $nova_pass
+DASH_DBPASS: $dash_dbpass
+CINDER_DBPASS: $cinder_dbpass
+CINDER_PASS: $cinder_pass
+NEUTRON_DBPASS: $neutron_pass
+NEUTRON_PASS: $neutron_pass
+NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan']
+NEUTRON_TENANT_NETWORK_TYPES: ['vxlan']
+#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
+NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
+NEUTRON_TUNNEL_TYPES: ['vxlan']
+METADATA_SECRET: metadata_secret
+INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: 10.1.1.21
+
+EXTERNAL_NETWORK_CIDR: 203.0.113.0/24
+EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
+FLOATING_IP_START: 203.0.113.101
+FLOATING_IP_END: 203.0.113.200
+
+build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
+build_in_image_name: cirros-0.3.3-x86_64-disk.img
+
+physical_device: /dev/sdb
+
+internal_interface: "ansible_{{ INTERNAL_INTERFACE }}"
+internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
+HA_VIP: "{{ internal_ip }}"
+odl_username: admin
+odl_password: admin
+odl_api_port: 8080
+
+odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz
+odl_pkg_name: karaf.tar.gz
+odl_home: "/opt/opendaylight-0.2.2/"
+odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'http', 'odl-base-all','odl-aaa-authn','odl-restconf','odl-nsf-all','odl-adsal-northbound','odl-mdsal-apidocs', 'odl-openflowplugin-all']
+odl_extra_features: ['odl-l2switch-switch', 'odl-ovsdb-plugin', 'odl-ovsdb-openstack', 'odl-ovsdb-northbound','odl-dlux-core', 'odl-restconf-all', 'odl-mdsal-clustering', 'odl-openflowplugin-flow-services', 'odl-netconf-connector', 'odl-netconf-connector-ssh', 'jolokia-osgi']
+odl_features: "{{ odl_base_features + odl_extra_features }}"
+odl_api_port: 8080
+
diff --git a/conf/templates/ansible_installer/openstack_kilo/vars/single-controller.tmpl b/conf/templates/ansible_installer/openstack_kilo/vars/single-controller.tmpl
new file mode 100644
index 00000000..b24bc811
--- /dev/null
+++ b/conf/templates/ansible_installer/openstack_kilo/vars/single-controller.tmpl
@@ -0,0 +1,108 @@
+#set cluster_name = $getVar('name', '')
+#set controllers = $getVar('controller', [])
+#set computes = $getVar('compute', [])
+#set networks = $getVar('network', [])
+#set storages = $getVar('storage', [])
+#if not $isinstance($controllers, list)
+    #set controllers = [$controllers]
+#end if
+#if not $isinstance($computes, list)
+    #set computes = [$computes]
+#end if
+#if not $isinstance($networks, list)
+    #set networks = [$networks]
+#end if
+#if not $isinstance($storages, list)
+    #set storages = [$storages]
+#end if
+
+#for controller in $controllers
+    #set controller_ip = $controller.management.ip
+    #set controller_hostname = $controller.hostname
+controller_host: $controller_ip
+#end for
+#for network in $networks
+    #set network_external_nic = $network.external.interface
+    #set network_external_subnet = $network.external.subnet
+    #set network_internal_nic = $network.management.interface
+INTERFACE_NAME: $network_external_nic    
+INTERNAL_INTERFACE: $network_internal_nic
+#end for
+
+#set credentials = $getVar('service_credentials', {})
+#set console_credentials = $getVar('console_credentials', {})
+#set rabbit_username = $credentials.rabbitmq.username
+#set rabbit_password = $credentials.rabbitmq.password
+#set keystone_dbpass = $credentials.identity.password
+#set glance_dbpass = $credentials.image.password
+#set glance_pass = $console_credentials.image.password
+#set nova_dbpass = $credentials.compute.password
+#set nova_pass = $console_credentials.compute.password
+#set dash_dbpass = $credentials.dashboard.password
+#set cinder_dbpass = $credentials.volume.password
+#set cinder_pass = $console_credentials.volume.password
+#set admin_pass = $console_credentials.admin.password
+#set neutron_pass = $console_credentials.network.password
+
+cluster_name: $cluster_name
+deploy_type: $getVar('deploy_type', 'virtual')
+compute_controller_host: "{{ controller_host }}"
+db_host: "{{ controller_host }}"
+rabbit_host: "{{ controller_host }}"
+storage_controller_host: "{{ controller_host }}"
+image_host: "{{ controller_host }}"
+identity_host: "{{ controller_host }}"
+network_server_host: "{{ controller_host }}"
+dashboard_host: "{{ controller_host }}"
+odl_controller: 10.1.0.15
+
+DEBUG: true
+VERBOSE: true
+NTP_SERVER_LOCAL: "{{ controller_host }}"
+DB_HOST: "{{ controller_host }}"
+MQ_BROKER: rabbitmq
+
+OPENSTACK_REPO: cloudarchive-juno.list
+juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
+ADMIN_TOKEN: admin
+CEILOMETER_TOKEN: c095d479023a0fd58a54
+
+RABBIT_USER: $rabbit_username
+RABBIT_PASS: $rabbit_password
+KEYSTONE_DBPASS: $keystone_dbpass
+DEMO_PASS: demo_secret
+ADMIN_PASS: $admin_pass
+GLANCE_DBPASS: $glance_dbpass
+GLANCE_PASS: $glance_pass
+NOVA_DBPASS: $nova_dbpass
+NOVA_PASS: $nova_pass
+DASH_DBPASS: $dash_dbpass
+CINDER_DBPASS: $cinder_dbpass
+CINDER_PASS: $cinder_pass
+NEUTRON_DBPASS: $neutron_pass
+NEUTRON_PASS: $neutron_pass
+NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan']
+NEUTRON_TENANT_NETWORK_TYPES: ['vxlan']
+#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
+NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
+NEUTRON_TUNNEL_TYPES: ['vxlan']
+METADATA_SECRET: metadata_secret
+INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: 10.1.1.21
+
+EXTERNAL_NETWORK_CIDR: 203.0.113.0/24
+# EXTERNAL_NETWORK_CIDR: $network_external_subnet
+EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
+FLOATING_IP_START: 203.0.113.101
+FLOATING_IP_END: 203.0.113.200
+
+build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
+build_in_image_name: cirros-0.3.3-x86_64-disk.img
+
+physical_device: /dev/sdb
+
+internal_interface: "ansible_{{ INTERNAL_INTERFACE }}"
+internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
+HA_VIP: "{{ internal_ip }}"
+odl_username: admin
+odl_password: admin
+odl_api_port: 8080
diff --git a/conf/templates/cobbler/CentOS-7-Minimal-1503-01-x86_64/profile.tmpl b/conf/templates/cobbler/CentOS-7-Minimal-1503-01-x86_64/profile.tmpl
new file mode 100644
index 00000000..cfa89004
--- /dev/null
+++ b/conf/templates/cobbler/CentOS-7-Minimal-1503-01-x86_64/profile.tmpl
@@ -0,0 +1,3 @@
+{
+    "repos": "$getVar('repo_name', '')"
+}
diff --git a/conf/templates/cobbler/CentOS-7-Minimal-1503-01-x86_64/system.tmpl b/conf/templates/cobbler/CentOS-7-Minimal-1503-01-x86_64/system.tmpl
new file mode 100644
index 00000000..8e4cbbbe
--- /dev/null
+++ b/conf/templates/cobbler/CentOS-7-Minimal-1503-01-x86_64/system.tmpl
@@ -0,0 +1,76 @@
+{
+    "name": "$hostname",
+    "hostname": "$hostname",
+    "profile": "$profile",
+    "gateway": "$gateway",
+    #import simplejson as json
+    #set nameservers = json.dumps($nameservers, encoding='utf-8')
+    "name_servers": $nameservers,
+    #set search_path = ' '.join($search_path)
+    "name_servers_search": "$search_path",
+    "proxy": "$getVar('http_proxy', '')",
+    "modify_interface":
+        #set networks = $networks
+        #set rekeyed = {}
+        #set promic_nic = ""
+        #for $nic, $val in $networks.iteritems():
+            #set ip_key = '-'.join(('ipaddress', $nic))
+            #set netmask_key = '-'.join(('netmask', $nic))
+            #set mgmt_key = '-'.join(('management', $nic))
+            #set static_key = '-'.join(('static', $nic))
+            #set $rekeyed[$ip_key] = $val.ip
+            #set $rekeyed[$netmask_key] = $val.netmask
+            #set $rekeyed[$mgmt_key] = $val.is_mgmt
+            #set $rekeyed[$static_key] = True
+
+            #set dns_key = '-'.join(('dnsname', $nic))
+            #if $val.is_mgmt
+                #set $rekeyed[$dns_key] = $dns
+            #else
+                #if '.' in $dns
+                    #set $dns_name, $dns_domain = $dns.split('.', 1)
+                    #set $dns_nic = '%s-%s.%s' % ($dns_name, $nic, $dns_domain)
+                #else
+                    #set $dns_nic = '%s-%s' % ($dns, $nic)
+                #end if
+                #set $rekeyed[$dns_key] = $dns_nic
+            #end if
+
+            #if $val.is_promiscuous:
+                #set promic_nic = $nic
+            #end if
+            #if $val.is_mgmt:
+                #set mac_key = '-'.join(('macaddress', $nic))
+                #set $rekeyed[$mac_key] = $mac
+            #end if
+        #end for
+        #set nic_info = json.dumps($rekeyed, encoding='utf-8')
+        $nic_info
+    ,
+    "ksmeta":{
+        #set partition_config = ''
+        #for k, v in $partition.iteritems():
+            #set path = ''
+            #if v['vol_percentage']:
+                #set $path = k + ' ' + str(v['vol_percentage']) + '%'
+            #else:
+                #set $path = k + ' ' + str(v['vol_size'])
+            #end if
+            #set partition_config = ';'.join((partition_config, $path))
+        #end for
+        #set partition_config = partition_config[1:]
+        #import crypt
+        #set $password = crypt.crypt($server_credentials.password, "az")
+        #set no_proxy = ','.join($getVar('no_proxy', []))
+        "username": "$server_credentials.username",
+        "password": "$password",
+        "promisc_nics": "$promic_nic",
+        "partition": "$partition_config",
+        "https_proxy": "$getVar('https_proxy', '')",
+        "ntp_server": "$ntp_server",
+        "timezone": "$timezone",
+        "ignore_proxy": "$no_proxy",
+        "local_repo": "$getVar('local_repo', '')",
+        "disk_num": "1"
+    }
+}
diff --git a/conf/templates/cobbler/ubuntu-14.04.3-server-x86_64/profile.tmpl b/conf/templates/cobbler/ubuntu-14.04.3-server-x86_64/profile.tmpl
new file mode 100644
index 00000000..cfa89004
--- /dev/null
+++ b/conf/templates/cobbler/ubuntu-14.04.3-server-x86_64/profile.tmpl
@@ -0,0 +1,3 @@
+{
+    "repos": "$getVar('repo_name', '')"
+}
diff --git a/conf/templates/cobbler/ubuntu-14.04.3-server-x86_64/system.tmpl b/conf/templates/cobbler/ubuntu-14.04.3-server-x86_64/system.tmpl
new file mode 100644
index 00000000..cfcc883e
--- /dev/null
+++ b/conf/templates/cobbler/ubuntu-14.04.3-server-x86_64/system.tmpl
@@ -0,0 +1,75 @@
+{
+    "name": "$hostname",
+    "hostname": "$hostname",
+    "profile": "$profile",
+    "gateway": "$gateway",
+    #import simplejson as json
+    #set nameservers = json.dumps($nameservers, encoding='utf-8')
+    "name_servers": $nameservers,
+    #set search_path = ' '.join($search_path)
+    "name_servers_search": "$search_path",
+    "proxy": "$getVar('http_proxy', '')",
+    "modify_interface":
+        #set networks = $networks
+        #set rekeyed = {}
+        #set promic_nic = ""
+        #for $nic, $val in $networks.iteritems():
+            #set ip_key = '-'.join(('ipaddress', $nic))
+            #set netmask_key = '-'.join(('netmask', $nic))
+            #set mgmt_key = '-'.join(('management', $nic))
+            #set static_key = '-'.join(('static', $nic))
+            #set $rekeyed[$ip_key] = $val.ip
+            #set $rekeyed[$netmask_key] = $val.netmask
+            #set $rekeyed[$static_key] = True
+
+            #set dns_key = '-'.join(('dnsname', $nic))
+            #if $val.is_mgmt
+                #set $rekeyed[$dns_key] = $dns
+            #else
+                #if '.' in $dns
+                    #set $dns_name, $dns_domain = $dns.split('.', 1)
+                    #set $dns_nic = '%s-%s.%s' % ($dns_name, $nic, $dns_domain)
+                #else
+                    #set $dns_nic = '%s-%s' % ($dns, $nic)
+                #end if
+                #set $rekeyed[$dns_key] = $dns_nic
+            #end if
+
+            #if $val.is_promiscuous:
+                #set promic_nic = $nic
+            #end if
+            #if $val.is_mgmt:
+                #set mac_key = '-'.join(('macaddress', $nic))
+                #set $rekeyed[$mac_key] = $mac
+            #end if
+        #end for
+        #set nic_info = json.dumps($rekeyed, encoding='utf-8')
+        $nic_info
+    ,
+    "ksmeta":{
+        #set partition_config = ''
+        #for k, v in $partition.iteritems():
+            #set path = ''
+            #if v['vol_percentage']:
+                #set $path = k + ' ' + str(v['vol_percentage']) + '%'
+            #else:
+                #set $path = k + ' ' + str(v['vol_size'])
+            #end if
+            #set partition_config = ';'.join((partition_config, $path))
+        #end for
+        #set partition_config = partition_config[1:]
+        #import crypt
+        #set $password = crypt.crypt($server_credentials.password, "az")
+        #set no_proxy = ','.join($getVar('no_proxy', []))
+        "username": "$server_credentials.username",
+        "password": "$password",
+        "promisc_nics": "$promic_nic",
+        "partition": "$partition_config",
+        "https_proxy": "$getVar('https_proxy', '')",
+        "ntp_server": "$ntp_server",
+        "timezone": "$timezone",
+        "ignore_proxy": "$no_proxy",
+        "local_repo": "$getVar('local_repo', '')",
+        "disk_num": "1"
+    }
+}