Remove f2s directory

This commit is contained in:
Dmitry Shulyak 2015-12-01 10:36:18 +02:00
parent 3075df61de
commit f1a3b1afca
262 changed files with 0 additions and 16760 deletions

View File

@ -6,7 +6,6 @@ ADD bootstrap/playbooks/celery.yaml /celery.yaml
ADD resources /resources
ADD templates /templates
ADD run.sh /run.sh
ADD f2s /f2s
RUN apt-get update
# Install pip's dependency: setuptools:
@ -21,7 +20,6 @@ RUN pip install https://github.com/Mirantis/solar-agent/archive/master.zip
RUN ansible-playbook -v -i "localhost," -c local /celery.yaml --tags install
RUN pip install riak peewee
RUN pip install -U setuptools>=17.1
RUN pip install -U python-fuelclient
RUN apt-get install -y puppet

2
f2s/.gitignore vendored
View File

@ -1,2 +0,0 @@
fuel-library
tmp

View File

@ -1,109 +0,0 @@
#How to install on fuel master?
To use solar on fuel master we need to use container because of
python2.6 there. Also solar itself relies on several services.
```
yum -y install git
git clone -b f2s https://github.com/Mirantis/solar.git
docker run --name riak -d -p 8087:8087 -p 8098:8098 tutum/riak
docker run --name redis -d -p 6379:6379 -e REDIS_PASS=**None** tutum/redis
docker run --name solar -d -v /root/solar/solar:/solar -v /root/solar/solard:/solard -v /root/solar/templates:/templates \
-v /root/solar/resources:/resources -v /root/solar/f2s:/f2s \
-v /var/lib/fuel:/var/lib/fuel -v /root/.config/fuel/fuel_client.yaml:/etc/fuel/client/config.yaml -v /etc/puppet/modules:/etc/puppet/modules \
-v /root/.ssh:/root/.ssh \
--link=riak:riak --link=redis:redis solarproject/solar-celery:f2s
```
#f2s.py
This script converts tasks.yaml + library actions into solar resources,
vrs, and events.
1. Based on tasks.yaml meta.yaml is generated, you can take a look on example
at f2s/resources/netconfig/meta.yaml
2. Based on hiera lookup we generated inputs for each resource, patches can be
found at f2s/patches
3. VRs (f2s/vrs) generated based on dependencies between tasks and roles
#fsclient.py
This script helps to create solar resource with some of nailgun data.
Note, you should run it inside of the solar container.
`./f2s/fsclient.py master 1`
Accepts cluster id, prepares transports for master + generate keys task
for current cluster.
`./f2s/fsclient.py nodes 1`
Prepares transports for provided nodes, ip and cluster id fetchd from nailgun.
`./f2s/fsclient.py prep 1`
Creates tasks for syncing keys + fuel-library modules.
`./f2s/fsclient.py roles 1`
Based on roles stored in nailgun we will assign vrs/<role>.yaml to a given
node. Right now it takes while, so be patient.
#fetching data from nailgun
Special entity added which allows to fetch data from any source
*before* any actual deployment.
This entity provides mechanism to specify *manager* for resource (or list of them).
Manager accepts inputs as json in stdin, and outputs result in stdout,
with result of manager execution we will update solar storage.
Examples can be found at f2s/resources/role_data/managers.
Data will be fetched on solar command
`solar res prefetch -n <resource name>`
#tweaks
Several things needs to be manually adjusted before you can use solar
on fuel master.
- provision a node by fuel
`fuel node --node 1 --provision`
- create /var/lib/astute directory on remote
- install repos using fuel
`fuel node --node 1 --tasks core_repos`
- configure hiera on remote, and create /etc/puppet/hieradata directory
```
:backends:
- yaml
#- json
:yaml:
:datadir: /etc/puppet/hieradata
:json:
:datadir: /etc/puppet/hieradata
:hierarchy:
- "%{resource_name}"
- resource
```
All of this things will be automated by solar eventually
#basic troubleshooting
If there are any Fuel plugin installed, you should manually
create a stanza for it in the `./f2s/resources/role_data/meta.yaml`,
like:
```
input:
foo_plugin_name:
value: null
```
And regenerate the data from nailgun,
To regenerate the deployment data to Solar resources make
```
solar res clear_all
```
and repeat all of the fsclient.py and fetching nailgun data steps

View File

@ -1,354 +0,0 @@
#!/usr/bin/env python
import os
from fnmatch import fnmatch
import shutil
from collections import OrderedDict
import click
import yaml
import networkx as nx
def ensure_dir(dir):
try:
os.makedirs(dir)
except OSError:
pass
CURDIR = os.path.dirname(os.path.realpath(__file__))
LIBRARY_PATH = os.path.join(CURDIR, 'fuel-library')
RESOURCE_TMP_WORKDIR = os.path.join(CURDIR, 'tmp/resources')
ensure_dir(RESOURCE_TMP_WORKDIR)
RESOURCE_DIR = os.path.join(CURDIR, 'resources')
VR_TMP_DIR = os.path.join(CURDIR, 'tmp/vrs')
ensure_dir(VR_TMP_DIR)
INPUTS_LOCATION = "/root/current/"
DEPLOYMENT_GROUP_PATH = os.path.join(LIBRARY_PATH,
'deployment', 'puppet', 'deployment_groups', 'tasks.yaml')
VALID_TASKS = ('puppet', 'skipped')
def clean_resources():
shutil.rmtree(RESOURCE_TMP_WORKDIR)
ensure_dir(RESOURCE_TMP_WORKDIR)
def clean_vr():
shutil.rmtree(VR_TMP_DIR)
ensure_dir(VR_TMP_DIR)
def ordered_dump(data, stream=None, Dumper=yaml.Dumper, **kwds):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwds)
class Task(object):
def __init__(self, task_data, task_path):
self.data = task_data
self.src_path = task_path
self.name = self.data['id']
self.type = self.data['type']
def edges(self):
data = self.data
if 'required_for' in data:
for req in data['required_for']:
yield self.name, req
if 'requires' in data:
for req in data['requires']:
yield req, self.name
if 'groups' in data:
for req in data['groups']:
yield self.name, req
if 'tasks' in data:
for req in data['tasks']:
yield req, self.name
@property
def manifest(self):
if self.data['type'] != 'puppet':
return None
after_naily = self.data['parameters']['puppet_manifest'].split('osnailyfacter/')[-1]
return os.path.join(
LIBRARY_PATH, 'deployment', 'puppet', 'osnailyfacter',
after_naily)
@property
def spec_name(self):
splitted = self.data['parameters']['puppet_manifest'].split('/')
directory = splitted[-2]
name = splitted[-1].split('.')[0]
return "{}_{}_spec.rb'".format(directory, name)
@property
def dst_path(self):
return os.path.join(RESOURCE_TMP_WORKDIR, self.name)
@property
def actions_path(self):
return os.path.join(self.dst_path, 'actions')
@property
def meta_path(self):
return os.path.join(self.dst_path, 'meta.yaml')
def meta(self):
if self.data['type'] == 'skipped':
data = OrderedDict([('id', self.name),
('handler', 'none'),
('version', '8.0'),
('inputs', {})])
elif self.data['type'] == 'puppet':
data = OrderedDict([('id', self.name),
('handler', 'puppetv2'),
('version', '8.0'),
('actions', {
'run': 'run.pp',
'update': 'run.pp'}),
('input', self.inputs()),])
else:
raise NotImplemented('Support for %s' % self.data['type'])
return ordered_dump(data, default_flow_style=False)
@property
def actions(self):
"""yield an iterable of src/dst
"""
if self.manifest is None:
return
yield self.manifest, os.path.join(self.actions_path, 'run.pp')
def inputs(self):
"""
Inputs prepared by
fuel_noop_tests.rb
identity = spec.split('/')[-1]
ENV["SPEC"] = identity
hiera.rb
File.open("/tmp/fuel_specs/#{ENV['SPEC']}", 'a') { |f| f << "- #{key}\n" }
"""
print self.spec_name
lookup_stack_path = os.path.join(
INPUTS_LOCATION, self.spec_name)
if not os.path.exists(lookup_stack_path):
return {}
with open(lookup_stack_path) as f:
data = yaml.safe_load(f) or []
data = data + ['puppet_modules']
return {key: {'value': None} for key
in set(data) if '::' not in key}
class RoleData(Task):
name = 'role_data'
def meta(self):
data = {'id': self.name,
'handler': 'puppetv2',
'version': '8.0',
'inputs': self.inputs(),
'manager': 'globals.py'}
return yaml.safe_dump(data, default_flow_style=False)
@property
def actions(self):
pass
class DGroup(object):
filtered = ['globals', 'hiera', 'deploy_start']
def __init__(self, name, tasks):
self.name = name
self.tasks = tasks
def resources(self):
yield OrderedDict(
[('id', RoleData.name+"{{index}}"),
('from', 'f2s/resources/'+RoleData.name),
('location', "{{node}}"),
('values', {'uid': '{{index}}',
'env': '{{env}}',
'puppet_modules': '/etc/puppet/modules'})])
for t, _, _ in self.tasks:
if t.name in self.filtered:
continue
yield OrderedDict(
[('id', t.name+"{{index}}"),
('from', 'f2s/resources/'+t.name),
('location', "{{node}}"),
('values_from', RoleData.name+"{{index}}")])
def events(self):
for t, inner, outer in self.tasks:
if t.name in self.filtered:
continue
yield OrderedDict([
('type', 'depends_on'),
('state', 'success'),
('parent_action', RoleData.name + '{{index}}.run'),
('depend_action', t.name + '{{index}}.run')])
for dep in set(inner):
if dep in self.filtered:
continue
yield OrderedDict([
('type', 'depends_on'),
('state', 'success'),
('parent_action', dep + '{{index}}.run'),
('depend_action', t.name + '{{index}}.run')])
for dep in set(outer):
if dep in self.filtered:
continue
yield OrderedDict([
('type', 'depends_on'),
('state', 'success'),
('parent', {
'with_tags': ['resource=' + dep],
'action': 'run'}),
('depend_action', t.name + '{{index}}.run')])
def meta(self):
data = OrderedDict([
('id', self.name),
('resources', list(self.resources())),
('events', list(self.events()))])
return ordered_dump(data, default_flow_style=False)
@property
def path(self):
return os.path.join(VR_TMP_DIR, self.name + '.yml')
def get_files(base_dir, file_pattern='*tasks.yaml'):
for root, _dirs, files in os.walk(base_dir):
for file_name in files:
if fnmatch(file_name, file_pattern):
yield root, file_name
def load_data(base, file_name):
with open(os.path.join(base, file_name)) as f:
return yaml.load(f)
def preview(task):
print 'PATH'
print task.dst_path
print 'META'
print task.meta()
print 'ACTIONS'
for action in task.actions():
print 'src=%s dst=%s' % action
def create(task):
ensure_dir(task.dst_path)
if task.actions_path:
ensure_dir(task.actions_path)
for src, dst in task.actions:
shutil.copyfile(src, dst)
with open(task.meta_path, 'w') as f:
f.write(task.meta())
def get_tasks():
for base, task_yaml in get_files(LIBRARY_PATH + '/deployment'):
for item in load_data(base, task_yaml):
yield Task(item, base)
def get_graph():
dg = nx.DiGraph()
for t in get_tasks():
dg.add_edges_from(list(t.edges()))
dg.add_node(t.name, t=t)
return dg
def dgroup_subgraph(dg, dgroup):
preds = [p for p in dg.predecessors(dgroup)
if dg.node[p]['t'].type == 'puppet']
return dg.subgraph(preds)
@click.group()
def main():
pass
@main.command(help='converts tasks into resources')
@click.argument('tasks', nargs=-1)
@click.option('-t', is_flag=True)
@click.option('-p', is_flag=True)
@click.option('-c', is_flag=True)
def t2r(tasks, t, p, c):
if c:
clean_resources()
for task in get_tasks():
if not task.type in VALID_TASKS:
continue
if task.name in tasks or tasks == ():
if p:
preview(task)
else:
create(task)
@main.command(help='convert groups into templates')
@click.argument('groups', nargs=-1)
@click.option('-c', is_flag=True)
def g2vr(groups, c):
if c:
clean_vr()
dg = get_graph()
dgroups = [n for n in dg if dg.node[n]['t'].type == 'group']
for group in dgroups:
if groups and group not in groups:
continue
ordered = []
dsub = dg.subgraph(dg.predecessors(group))
for t in nx.topological_sort(dsub):
inner_preds = []
outer_preds = []
for p in dg.predecessors(t):
if not dg.node[p]['t'].type in VALID_TASKS:
continue
if p in dsub:
inner_preds.append(p)
else:
outer_preds.append(p)
if dg.node[t]['t'].type in VALID_TASKS:
ordered.append((dg.node[t]['t'], inner_preds, outer_preds))
obj = DGroup(group, ordered)
with open(obj.path, 'w') as f:
f.write(obj.meta())
# based on inner/outer aggregation configure joins in events
if __name__ == '__main__':
main()

View File

@ -1,89 +0,0 @@
#!/usr/bin/env python
import os
import click
from solar.core.resource import virtual_resource as vr
from solar.dblayer.model import ModelMeta
@click.group()
def main():
pass
class NailgunSource(object):
def nodes(self, uids):
from fuelclient.objects.node import Node
nodes_obj = map(Node, uids)
return [(str(n.data['id']), str(n.data['ip']), str(n.data['cluster']))
for n in nodes_obj]
def roles(self, uid):
from fuelclient.objects.node import Node
from fuelclient.objects.environment import Environment
node = Node(uid)
env = Environment(node.data['cluster'])
facts = env.get_default_facts('deployment', [uid])
return [f['role'] for f in facts]
def master(self):
return 'master', '10.20.0.2'
class DumbSource(object):
def nodes(self, uids):
ip_mask = '10.0.0.%s'
return [(uid, ip_mask % uid, 1) for uid in uids]
def roles(self, uid):
return ['primary-controller']
def master(self):
return 'master', '0.0.0.0'
if os.environ.get('DEBUG_FSCLIENT'):
source = DumbSource()
else:
source = NailgunSource()
@main.command()
@click.argument('uids', nargs=-1)
def nodes(uids):
for uid, ip, env in source.nodes(uids):
vr.create('fuel_node', 'f2s/vrs/fuel_node.yaml',
{'index': uid, 'ip': ip})
@main.command()
@click.argument('env')
def master(env):
master = source.master()
vr.create('master', 'f2s/vrs/fuel_node.yaml',
{'index': master[0], 'ip': master[1]})
vr.create('genkeys', 'f2s/vrs/genkeys.yaml', {
'node': 'node'+master[0],
'index': env})
@main.command()
@click.argument('uids', nargs=-1)
def prep(uids):
for uid, ip, env in source.nodes(uids):
vr.create('prep', 'f2s/vrs/prep.yaml',
{'index': uid, 'env': env, 'node': 'node'+uid})
@main.command()
@click.argument('uids', nargs=-1)
def roles(uids):
for uid, ip, env in source.nodes(uids):
for role in source.roles(uid):
vr.create(role, 'f2s/vrs/'+role +'.yml',
{'index': uid, 'env': env, 'node': 'node'+uid})
if __name__ == '__main__':
main()
ModelMeta.session_end()

View File

@ -1,10 +0,0 @@
--- /tmp/noop/.bundled_gems/gems/hiera-1.3.4/lib/hiera.rb 2015-11-09 19:55:29.127004136 +0000
+++ /tmp/noop/.bundled_gems/gems/hiera-1.3.4/lib/hiera.rb 2015-11-09 14:15:54.372852787 +0000
@@ -57,6 +57,7 @@
# The order-override will insert as first in the hierarchy a data source
# of your choice.
def lookup(key, default, scope, order_override=nil, resolution_type=:priority)
+ File.open("/tmp/fuel_specs/#{ENV['SPEC']}", 'a') { |f| f << "- #{key}\n" }
Backend.lookup(key, default, scope, order_override, resolution_type)
end
end

View File

@ -1,15 +0,0 @@
--- fuel-library/utils/jenkins/fuel_noop_tests.rb 2015-11-09 19:51:53.000000000 +0000
+++ fuel-library/utils/jenkins/fuel_noop_tests.rb 2015-11-09 19:51:17.000000000 +0000
@@ -271,8 +271,10 @@
# @return [Array<TrueClass,FalseClass,NilClass>] success and empty report array
def self.rspec(spec)
inside_noop_tests_directory do
+ splitted = spec.split('/')
+ dir, name = splitted[-2], splitted[-1]
+ ENV["SPEC"] = "#{dir}_#{name}"
command = "rspec #{RSPEC_OPTIONS} #{spec}"
- command = 'bundle exec ' + command if options[:bundle]
+ command = "bundle exec " + command if options[:bundle]
if options[:filter_examples]
options[:filter_examples].each do |example|
command = command + " -e #{example}"

View File

@ -1,13 +0,0 @@
notice('MODULAR: apache.pp')
# adjustments to defaults for LP#1485644 for scale
sysctl::value { 'net.core.somaxconn': value => '4096' }
sysctl::value { 'net.ipv4.tcp_max_syn_backlog': value => '8192' }
class { 'osnailyfacter::apache':
purge_configs => true,
listen_ports => hiera_array('apache_ports', ['80', '8888']),
}
include ::osnailyfacter::apache_mpm

View File

@ -1,15 +0,0 @@
id: apache
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
apache_ports:
value: null
fqdn:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,16 +0,0 @@
notice('MODULAR: api-proxy.pp')
$max_header_size = hiera('max_header_size', '81900')
# Apache and listen ports
class { 'osnailyfacter::apache':
listen_ports => hiera_array('apache_ports', ['80', '8888']),
}
# API proxy vhost
class {'osnailyfacter::apache_api_proxy':
master_ip => hiera('master_ip'),
max_header_size => $max_header_size,
}
include ::tweaks::apache_wrappers

View File

@ -1,19 +0,0 @@
id: api-proxy
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
apache_ports:
value: null
fqdn:
value: null
master_ip:
value: null
max_header_size:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,59 +0,0 @@
notice('MODULAR: ceilometer/compute.pp')
$use_syslog = hiera('use_syslog', true)
$use_stderr = hiera('use_stderr', false)
$syslog_log_facility = hiera('syslog_log_facility_ceilometer', 'LOG_LOCAL0')
$rabbit_hash = hiera_hash('rabbit_hash')
$management_vip = hiera('management_vip')
$service_endpoint = hiera('service_endpoint')
$default_ceilometer_hash = {
'enabled' => false,
'db_password' => 'ceilometer',
'user_password' => 'ceilometer',
'metering_secret' => 'ceilometer',
'http_timeout' => '600',
'event_time_to_live' => '604800',
'metering_time_to_live' => '604800',
}
$region = hiera('region', 'RegionOne')
$ceilometer_hash = hiera_hash('ceilometer_hash', $default_ceilometer_hash)
$ceilometer_region = pick($ceilometer_hash['region'], $region)
$ceilometer_enabled = $ceilometer_hash['enabled']
$amqp_password = $rabbit_hash['password']
$amqp_user = $rabbit_hash['user']
$ceilometer_user_password = $ceilometer_hash['user_password']
$ceilometer_metering_secret = $ceilometer_hash['metering_secret']
$verbose = pick($ceilometer_hash['verbose'], hiera('verbose', true))
$debug = pick($ceilometer_hash['debug'], hiera('debug', false))
if ($ceilometer_enabled) {
class { 'openstack::ceilometer':
verbose => $verbose,
debug => $debug,
use_syslog => $use_syslog,
use_stderr => $use_stderr,
syslog_log_facility => $syslog_log_facility,
amqp_hosts => hiera('amqp_hosts',''),
amqp_user => $amqp_user,
amqp_password => $amqp_password,
keystone_user => $ceilometer_hash['user'],
keystone_tenant => $ceilometer_hash['tenant'],
keystone_region => $ceilometer_region,
keystone_host => $service_endpoint,
keystone_password => $ceilometer_user_password,
on_compute => true,
metering_secret => $ceilometer_metering_secret,
event_time_to_live => $ceilometer_hash['event_time_to_live'],
metering_time_to_live => $ceilometer_hash['metering_time_to_live'],
http_timeout => $ceilometer_hash['http_timeout'],
}
# We need to restart nova-compute service in orderto apply new settings
include ::nova::params
service { 'nova-compute':
ensure => 'running',
name => $::nova::params::compute_service_name,
}
}

View File

@ -1,37 +0,0 @@
id: ceilometer-compute
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
amqp_hosts:
value: null
ceilometer:
value: null
ceilometer_hash:
value: null
debug:
value: null
fqdn:
value: null
management_vip:
value: null
puppet_modules:
value: null
rabbit_hash:
value: null
region:
value: null
role:
value: null
service_endpoint:
value: null
syslog_log_facility_ceilometer:
value: null
use_stderr:
value: null
use_syslog:
value: null
verbose:
value: null

View File

@ -1,111 +0,0 @@
notice('MODULAR: ceilometer/controller.pp')
$default_ceilometer_hash = {
'enabled' => false,
'db_password' => 'ceilometer',
'user_password' => 'ceilometer',
'metering_secret' => 'ceilometer',
'http_timeout' => '600',
'event_time_to_live' => '604800',
'metering_time_to_live' => '604800',
}
$ceilometer_hash = hiera_hash('ceilometer', $default_ceilometer_hash)
$verbose = pick($ceilometer_hash['verbose'], hiera('verbose', true))
$debug = pick($ceilometer_hash['debug'], hiera('debug', false))
$use_syslog = hiera('use_syslog', true)
$use_stderr = hiera('use_stderr', false)
$syslog_log_facility = hiera('syslog_log_facility_ceilometer', 'LOG_LOCAL0')
$nodes_hash = hiera('nodes')
$storage_hash = hiera('storage')
$rabbit_hash = hiera_hash('rabbit_hash')
$management_vip = hiera('management_vip')
$region = hiera('region', 'RegionOne')
$ceilometer_region = pick($ceilometer_hash['region'], $region)
$mongo_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('mongo_roles'))
$mongo_address_map = get_node_to_ipaddr_map_by_network_role($mongo_nodes, 'mongo/db')
$default_mongo_hash = {
'enabled' => false,
}
$mongo_hash = hiera_hash('mongo', $default_mongo_hash)
if $mongo_hash['enabled'] and $ceilometer_hash['enabled'] {
$exteranl_mongo_hash = hiera_hash('external_mongo')
$ceilometer_db_user = $exteranl_mongo_hash['mongo_user']
$ceilometer_db_password = $exteranl_mongo_hash['mongo_password']
$ceilometer_db_dbname = $exteranl_mongo_hash['mongo_db_name']
$external_mongo = true
} else {
$ceilometer_db_user = 'ceilometer'
$ceilometer_db_password = $ceilometer_hash['db_password']
$ceilometer_db_dbname = 'ceilometer'
$external_mongo = false
$exteranl_mongo_hash = {}
}
$ceilometer_enabled = $ceilometer_hash['enabled']
$ceilometer_user_password = $ceilometer_hash['user_password']
$ceilometer_metering_secret = $ceilometer_hash['metering_secret']
$ceilometer_db_type = 'mongodb'
$swift_rados_backend = $storage_hash['objects_ceph']
$amqp_password = $rabbit_hash['password']
$amqp_user = $rabbit_hash['user']
$rabbit_ha_queues = true
$service_endpoint = hiera('service_endpoint')
$ha_mode = pick($ceilometer_hash['ha_mode'], true)
prepare_network_config(hiera('network_scheme', {}))
$api_bind_address = get_network_role_property('ceilometer/api', 'ipaddr')
if $ceilometer_hash['enabled'] {
if $external_mongo {
$mongo_hosts = $exteranl_mongo_hash['hosts_ip']
if $exteranl_mongo_hash['mongo_replset'] {
$mongo_replicaset = $exteranl_mongo_hash['mongo_replset']
} else {
$mongo_replicaset = undef
}
} else {
$mongo_hosts = join(values($mongo_address_map), ',')
# MongoDB is alsways configured with replica set
$mongo_replicaset = 'ceilometer'
}
}
###############################################################################
if ($ceilometer_enabled) {
class { 'openstack::ceilometer':
verbose => $verbose,
debug => $debug,
use_syslog => $use_syslog,
use_stderr => $use_stderr,
syslog_log_facility => $syslog_log_facility,
db_type => $ceilometer_db_type,
db_host => $mongo_hosts,
db_user => $ceilometer_db_user,
db_password => $ceilometer_db_password,
db_dbname => $ceilometer_db_dbname,
swift_rados_backend => $swift_rados_backend,
metering_secret => $ceilometer_metering_secret,
amqp_hosts => hiera('amqp_hosts',''),
amqp_user => $amqp_user,
amqp_password => $amqp_password,
rabbit_ha_queues => $rabbit_ha_queues,
keystone_host => $service_endpoint,
keystone_password => $ceilometer_user_password,
keystone_user => $ceilometer_hash['user'],
keystone_tenant => $ceilometer_hash['tenant'],
keystone_region => $ceilometer_region,
host => $api_bind_address,
ha_mode => $ha_mode,
on_controller => true,
ext_mongo => $external_mongo,
mongo_replicaset => $mongo_replicaset,
event_time_to_live => $ceilometer_hash['event_time_to_live'],
metering_time_to_live => $ceilometer_hash['metering_time_to_live'],
http_timeout => $ceilometer_hash['http_timeout'],
}
}

View File

@ -1,47 +0,0 @@
id: ceilometer-controller
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
ceilometer:
value: null
debug:
value: null
fqdn:
value: null
management_vip:
value: null
mongo:
value: null
mongo_roles:
value: null
network_metadata:
value: null
network_scheme:
value: null
nodes:
value: null
puppet_modules:
value: null
rabbit:
value: null
rabbit_hash:
value: null
region:
value: null
role:
value: null
service_endpoint:
value: null
storage:
value: null
syslog_log_facility_ceilometer:
value: null
use_stderr:
value: null
use_syslog:
value: null
verbose:
value: null

View File

@ -1,41 +0,0 @@
notice('MODULAR: ceilometer/keystone.pp')
$ceilometer_hash = hiera_hash('ceilometer', {})
$public_vip = hiera('public_vip')
$public_ssl_hash = hiera('public_ssl')
$public_address = $public_ssl_hash['services'] ? {
true => $public_ssl_hash['hostname'],
default => $public_vip,
}
$public_protocol = $public_ssl_hash['services'] ? {
true => 'https',
default => 'http',
}
$admin_address = hiera('management_vip')
$region = pick($ceilometer_hash['region'], hiera('region', 'RegionOne'))
$password = $ceilometer_hash['user_password']
$auth_name = pick($ceilometer_hash['auth_name'], 'ceilometer')
$configure_endpoint = pick($ceilometer_hash['configure_endpoint'], true)
$configure_user = pick($ceilometer_hash['configure_user'], true)
$configure_user_role = pick($ceilometer_hash['configure_user_role'], true)
$service_name = pick($ceilometer_hash['service_name'], 'ceilometer')
$tenant = pick($ceilometer_hash['tenant'], 'services')
validate_string($public_address)
validate_string($password)
$public_url = "${public_protocol}://${public_address}:8777"
$admin_url = "http://${admin_address}:8777"
class { '::ceilometer::keystone::auth':
password => $password,
auth_name => $auth_name,
configure_endpoint => $configure_endpoint,
configure_user => $configure_user,
configure_user_role => $configure_user_role,
service_name => $service_name,
public_url => $public_url,
internal_url => $admin_url,
admin_url => $admin_url,
region => $region,
}

View File

@ -1,23 +0,0 @@
id: ceilometer-keystone
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
ceilometer:
value: null
fqdn:
value: null
management_vip:
value: null
public_ssl:
value: null
public_vip:
value: null
puppet_modules:
value: null
region:
value: null
role:
value: null

View File

@ -1,20 +0,0 @@
notice('MODULAR: ceilometer/radosgw_user.pp')
$default_ceilometer_hash = {
'enabled' => false,
}
$ceilometer_hash = hiera_hash('ceilometer', $default_ceilometer_hash)
if $ceilometer_hash['enabled'] {
include ceilometer::params
ceilometer_radosgw_user { 'ceilometer':
caps => {'buckets' => 'read', 'usage' => 'read'},
} ~>
service { $::ceilometer::params::agent_central_service_name:
ensure => 'running',
enable => true,
provider => 'pacemaker',
}
}

View File

@ -1,17 +0,0 @@
id: ceilometer-radosgw-user
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
ceilometer:
value: null
fqdn:
value: null
puppet_modules:
value: null
role:
value: null
storage:
value: null

View File

@ -1,97 +0,0 @@
notice('MODULAR: ceph/ceph_compute.pp')
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
$storage_hash = hiera_hash('storage_hash', {})
$use_neutron = hiera('use_neutron')
$public_vip = hiera('public_vip')
$management_vip = hiera('management_vip')
$use_syslog = hiera('use_syslog', true)
$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0')
$keystone_hash = hiera_hash('keystone_hash', {})
# Cinder settings
$cinder_pool = 'volumes'
# Glance settings
$glance_pool = 'images'
#Nova Compute settings
$compute_user = 'compute'
$compute_pool = 'compute'
if ($storage_hash['images_ceph']) {
$glance_backend = 'ceph'
} elsif ($storage_hash['images_vcenter']) {
$glance_backend = 'vmware'
} else {
$glance_backend = 'swift'
}
if ($storage_hash['volumes_ceph'] or
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph'] or
$storage_hash['ephemeral_ceph']
) {
$use_ceph = true
} else {
$use_ceph = false
}
if $use_ceph {
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
$primary_mons = keys($ceph_primary_monitor_node)
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
prepare_network_config(hiera_hash('network_scheme'))
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
$ceph_public_network = get_network_role_property('ceph/public', 'network')
class {'ceph':
primary_mon => $primary_mon,
mon_hosts => keys($mon_address_map),
mon_ip_addresses => values($mon_address_map),
cluster_node_address => $public_vip,
osd_pool_default_size => $storage_hash['osd_pool_size'],
osd_pool_default_pg_num => $storage_hash['pg_num'],
osd_pool_default_pgp_num => $storage_hash['pg_num'],
use_rgw => false,
glance_backend => $glance_backend,
rgw_pub_ip => $public_vip,
rgw_adm_ip => $management_vip,
rgw_int_ip => $management_vip,
cluster_network => $ceph_cluster_network,
public_network => $ceph_public_network,
use_syslog => $use_syslog,
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
syslog_log_facility => $syslog_log_facility_ceph,
rgw_keystone_admin_token => $keystone_hash['admin_token'],
ephemeral_ceph => $storage_hash['ephemeral_ceph']
}
service { $::ceph::params::service_nova_compute :}
ceph::pool {$compute_pool:
user => $compute_user,
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}, allow rwx pool=${compute_pool}'",
keyring_owner => 'nova',
pg_num => $storage_hash['pg_num'],
pgp_num => $storage_hash['pg_num'],
}
include ceph::nova_compute
if ($storage_hash['ephemeral_ceph']) {
include ceph::ephemeral
Class['ceph::conf'] -> Class['ceph::ephemeral'] ~>
Service[$::ceph::params::service_nova_compute]
}
Class['ceph::conf'] ->
Ceph::Pool[$compute_pool] ->
Class['ceph::nova_compute'] ~>
Service[$::ceph::params::service_nova_compute]
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
cwd => '/root',
}
}

View File

@ -1,37 +0,0 @@
id: ceph-compute
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
ceph_monitor_nodes:
value: null
ceph_primary_monitor_node:
value: null
fqdn:
value: null
keystone_hash:
value: null
management_vip:
value: null
network_scheme:
value: null
public_vip:
value: null
puppet_modules:
value: null
role:
value: null
storage:
value: null
storage_hash:
value: null
syslog_log_facility_ceph:
value: null
syslog_log_level_ceph:
value: null
use_neutron:
value: null
use_syslog:
value: null

View File

@ -1,95 +0,0 @@
notice('MODULAR: ceph/mon.pp')
$storage_hash = hiera('storage', {})
$use_neutron = hiera('use_neutron')
$public_vip = hiera('public_vip')
$management_vip = hiera('management_vip')
$use_syslog = hiera('use_syslog', true)
$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0')
$keystone_hash = hiera('keystone', {})
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
if ($storage_hash['images_ceph']) {
$glance_backend = 'ceph'
} elsif ($storage_hash['images_vcenter']) {
$glance_backend = 'vmware'
} else {
$glance_backend = 'swift'
}
if ($storage_hash['volumes_ceph'] or
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph'] or
$storage_hash['ephemeral_ceph']
) {
$use_ceph = true
} else {
$use_ceph = false
}
if $use_ceph {
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
$primary_mons = keys($ceph_primary_monitor_node)
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
prepare_network_config(hiera_hash('network_scheme'))
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
$ceph_public_network = get_network_role_property('ceph/public', 'network')
$mon_addr = get_network_role_property('ceph/public', 'ipaddr')
class {'ceph':
primary_mon => $primary_mon,
mon_hosts => keys($mon_address_map),
mon_ip_addresses => values($mon_address_map),
mon_addr => $mon_addr,
cluster_node_address => $public_vip,
osd_pool_default_size => $storage_hash['osd_pool_size'],
osd_pool_default_pg_num => $storage_hash['pg_num'],
osd_pool_default_pgp_num => $storage_hash['pg_num'],
use_rgw => false,
glance_backend => $glance_backend,
rgw_pub_ip => $public_vip,
rgw_adm_ip => $management_vip,
rgw_int_ip => $management_vip,
cluster_network => $ceph_cluster_network,
public_network => $ceph_public_network,
use_syslog => $use_syslog,
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
syslog_log_facility => $syslog_log_facility_ceph,
rgw_keystone_admin_token => $keystone_hash['admin_token'],
ephemeral_ceph => $storage_hash['ephemeral_ceph']
}
if ($storage_hash['volumes_ceph']) {
include ::cinder::params
service { 'cinder-volume':
ensure => 'running',
name => $::cinder::params::volume_service,
hasstatus => true,
hasrestart => true,
}
service { 'cinder-backup':
ensure => 'running',
name => $::cinder::params::backup_service,
hasstatus => true,
hasrestart => true,
}
Class['ceph'] ~> Service['cinder-volume']
Class['ceph'] ~> Service['cinder-backup']
}
if ($storage_hash['images_ceph']) {
include ::glance::params
service { 'glance-api':
ensure => 'running',
name => $::glance::params::api_service_name,
hasstatus => true,
hasrestart => true,
}
Class['ceph'] ~> Service['glance-api']
}
}

View File

@ -1,35 +0,0 @@
id: ceph-mon
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
ceph_monitor_nodes:
value: null
ceph_primary_monitor_node:
value: null
fqdn:
value: null
keystone:
value: null
management_vip:
value: null
network_scheme:
value: null
public_vip:
value: null
puppet_modules:
value: null
role:
value: null
storage:
value: null
syslog_log_facility_ceph:
value: null
syslog_log_level_ceph:
value: null
use_neutron:
value: null
use_syslog:
value: null

View File

@ -1,103 +0,0 @@
notice('MODULAR: ceph/radosgw.pp')
$storage_hash = hiera('storage', {})
$use_neutron = hiera('use_neutron')
$public_vip = hiera('public_vip')
$keystone_hash = hiera('keystone', {})
$management_vip = hiera('management_vip')
$service_endpoint = hiera('service_endpoint')
$public_ssl_hash = hiera('public_ssl')
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
if ($storage_hash['volumes_ceph'] or
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph']
) {
$use_ceph = true
} else {
$use_ceph = false
}
if $use_ceph and $storage_hash['objects_ceph'] {
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
$primary_mons = keys($ceph_primary_monitor_node)
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
prepare_network_config(hiera_hash('network_scheme'))
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
$ceph_public_network = get_network_role_property('ceph/public', 'network')
$rgw_ip_address = get_network_role_property('ceph/radosgw', 'ipaddr')
# Apache and listen ports
class { 'osnailyfacter::apache':
listen_ports => hiera_array('apache_ports', ['80', '8888']),
}
if ($::osfamily == 'Debian'){
apache::mod {'rewrite': }
apache::mod {'fastcgi': }
}
include ::tweaks::apache_wrappers
include ceph::params
$haproxy_stats_url = "http://${service_endpoint}:10000/;csv"
haproxy_backend_status { 'keystone-admin' :
name => 'keystone-2',
count => '200',
step => '6',
url => $haproxy_stats_url,
}
haproxy_backend_status { 'keystone-public' :
name => 'keystone-1',
count => '200',
step => '6',
url => $haproxy_stats_url,
}
Haproxy_backend_status['keystone-admin'] -> Class ['ceph::keystone']
Haproxy_backend_status['keystone-public'] -> Class ['ceph::keystone']
class { 'ceph::radosgw':
# SSL
use_ssl => false,
public_ssl => $public_ssl_hash['services'],
# Ceph
primary_mon => $primary_mon,
pub_ip => $public_vip,
adm_ip => $management_vip,
int_ip => $management_vip,
# RadosGW settings
rgw_host => $::hostname,
rgw_ip => $rgw_ip_address,
rgw_port => '6780',
swift_endpoint_port => '8080',
rgw_keyring_path => '/etc/ceph/keyring.radosgw.gateway',
rgw_socket_path => '/tmp/radosgw.sock',
rgw_log_file => '/var/log/ceph/radosgw.log',
rgw_data => '/var/lib/ceph/radosgw',
rgw_dns_name => "*.${::domain}",
rgw_print_continue => true,
#rgw Keystone settings
rgw_use_pki => false,
rgw_use_keystone => true,
rgw_keystone_url => "${service_endpoint}:35357",
rgw_keystone_admin_token => $keystone_hash['admin_token'],
rgw_keystone_token_cache_size => '10',
rgw_keystone_accepted_roles => '_member_, Member, admin, swiftoperator',
rgw_keystone_revocation_interval => '1000000',
rgw_nss_db_path => '/etc/ceph/nss',
#rgw Log settings
use_syslog => hiera('use_syslog', true),
syslog_facility => hiera('syslog_log_facility_ceph', 'LOG_LOCAL0'),
syslog_level => hiera('syslog_log_level_ceph', 'info'),
}
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
cwd => '/root',
}
}

View File

@ -1,29 +0,0 @@
id: ceph-radosgw
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
ceph_monitor_nodes:
value: null
fqdn:
value: null
keystone:
value: null
management_vip:
value: null
public_ssl:
value: null
public_vip:
value: null
puppet_modules:
value: null
role:
value: null
service_endpoint:
value: null
storage:
value: null
use_neutron:
value: null

View File

@ -1,80 +0,0 @@
notice('MODULAR: ceph/ceph_pools')
$storage_hash = hiera('storage', {})
$osd_pool_default_pg_num = $storage_hash['pg_num']
$osd_pool_default_pgp_num = $storage_hash['pg_num']
# Cinder settings
$cinder_user = 'volumes'
$cinder_pool = 'volumes'
# Cinder Backup settings
$cinder_backup_user = 'backups'
$cinder_backup_pool = 'backups'
# Glance settings
$glance_user = 'images'
$glance_pool = 'images'
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
cwd => '/root',
}
# DO NOT SPLIT ceph auth command lines! See http://tracker.ceph.com/issues/3279
ceph::pool {$glance_pool:
user => $glance_user,
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${glance_pool}'",
keyring_owner => 'glance',
pg_num => $osd_pool_default_pg_num,
pgp_num => $osd_pool_default_pg_num,
}
ceph::pool {$cinder_pool:
user => $cinder_user,
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}'",
keyring_owner => 'cinder',
pg_num => $osd_pool_default_pg_num,
pgp_num => $osd_pool_default_pg_num,
}
ceph::pool {$cinder_backup_pool:
user => $cinder_backup_user,
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_backup_pool}, allow rx pool=${cinder_pool}'",
keyring_owner => 'cinder',
pg_num => $osd_pool_default_pg_num,
pgp_num => $osd_pool_default_pg_num,
}
Ceph::Pool[$glance_pool] -> Ceph::Pool[$cinder_pool] -> Ceph::Pool[$cinder_backup_pool]
if ($storage_hash['volumes_ceph']) {
include ::cinder::params
service { 'cinder-volume':
ensure => 'running',
name => $::cinder::params::volume_service,
hasstatus => true,
hasrestart => true,
}
Ceph::Pool[$cinder_pool] ~> Service['cinder-volume']
service { 'cinder-backup':
ensure => 'running',
name => $::cinder::params::backup_service,
hasstatus => true,
hasrestart => true,
}
Ceph::Pool[$cinder_backup_pool] ~> Service['cinder-backup']
}
if ($storage_hash['images_ceph']) {
include ::glance::params
service { 'glance-api':
ensure => 'running',
name => $::glance::params::api_service_name,
hasstatus => true,
hasrestart => true,
}
Ceph::Pool[$glance_pool] ~> Service['glance-api']
}

View File

@ -1,15 +0,0 @@
id: ceph_create_pools
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
puppet_modules:
value: null
role:
value: null
storage:
value: null

View File

@ -1,53 +0,0 @@
notice('MODULAR: cinder/db.pp')
$cinder_hash = hiera_hash('cinder', {})
$mysql_hash = hiera_hash('mysql_hash', {})
$management_vip = hiera('management_vip', undef)
$database_vip = hiera('database_vip', undef)
$mysql_root_user = pick($mysql_hash['root_user'], 'root')
$mysql_db_create = pick($mysql_hash['db_create'], true)
$mysql_root_password = $mysql_hash['root_password']
$db_user = pick($cinder_hash['db_user'], 'cinder')
$db_name = pick($cinder_hash['db_name'], 'cinder')
$db_password = pick($cinder_hash['db_password'], $mysql_root_password)
$db_host = pick($cinder_hash['db_host'], $database_vip)
$db_create = pick($cinder_hash['db_create'], $mysql_db_create)
$db_root_user = pick($cinder_hash['root_user'], $mysql_root_user)
$db_root_password = pick($cinder_hash['root_password'], $mysql_root_password)
$allowed_hosts = [ $::hostname, 'localhost', '127.0.0.1', '%' ]
validate_string($mysql_root_user)
if $db_create {
class { 'galera::client':
custom_setup_class => hiera('mysql_custom_setup_class', 'galera'),
}
class { 'cinder::db::mysql':
user => $db_user,
password => $db_password,
dbname => $db_name,
allowed_hosts => $allowed_hosts,
}
class { 'osnailyfacter::mysql_access':
db_host => $db_host,
db_user => $db_root_user,
db_password => $db_root_password,
}
Class['galera::client'] ->
Class['osnailyfacter::mysql_access'] ->
Class['cinder::db::mysql']
}
class mysql::config {}
include mysql::config
class mysql::server {}
include mysql::server

View File

@ -1,23 +0,0 @@
id: cinder-db
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
cinder:
value: null
database_vip:
value: null
fqdn:
value: null
management_vip:
value: null
mysql_custom_setup_class:
value: null
mysql_hash:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,51 +0,0 @@
notice('MODULAR: cinder/keystone.pp')
$cinder_hash = hiera_hash('cinder', {})
$public_ssl_hash = hiera('public_ssl')
$public_vip = hiera('public_vip')
$public_address = $public_ssl_hash['services'] ? {
true => $public_ssl_hash['hostname'],
default => $public_vip,
}
$public_protocol = $public_ssl_hash['services'] ? {
true => 'https',
default => 'http',
}
$admin_protocol = 'http'
$admin_address = hiera('management_vip')
$region = pick($cinder_hash['region'], hiera('region', 'RegionOne'))
$password = $cinder_hash['user_password']
$auth_name = pick($cinder_hash['auth_name'], 'cinder')
$configure_endpoint = pick($cinder_hash['configure_endpoint'], true)
$configure_user = pick($cinder_hash['configure_user'], true)
$configure_user_role = pick($cinder_hash['configure_user_role'], true)
$service_name = pick($cinder_hash['service_name'], 'cinder')
$tenant = pick($cinder_hash['tenant'], 'services')
$port = '8776'
$public_url = "${public_protocol}://${public_address}:${port}/v1/%(tenant_id)s"
$admin_url = "${admin_protocol}://${admin_address}:${port}/v1/%(tenant_id)s"
$public_url_v2 = "${public_protocol}://${public_address}:${port}/v2/%(tenant_id)s"
$admin_url_v2 = "${admin_protocol}://${admin_address}:${port}/v2/%(tenant_id)s"
validate_string($public_address)
validate_string($password)
class { '::cinder::keystone::auth':
password => $password,
auth_name => $auth_name,
configure_endpoint => $configure_endpoint,
configure_user => $configure_user,
configure_user_role => $configure_user_role,
service_name => $service_name,
public_url => $public_url,
internal_url => $admin_url,
admin_url => $admin_url,
public_url_v2 => $public_url_v2,
internal_url_v2 => $admin_url_v2,
admin_url_v2 => $admin_url_v2,
region => $region,
}

View File

@ -1,23 +0,0 @@
id: cinder-keystone
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
cinder:
value: null
fqdn:
value: null
management_vip:
value: null
public_ssl:
value: null
public_vip:
value: null
puppet_modules:
value: null
region:
value: null
role:
value: null

View File

@ -1,20 +0,0 @@
notice('MODULAR: cluster-haproxy.pp')
$network_scheme = hiera('network_scheme', {})
$management_vip = hiera('management_vip')
$database_vip = hiera('database_vip', '')
$service_endpoint = hiera('service_endpoint', '')
$primary_controller = hiera('primary_controller')
$haproxy_hash = hiera_hash('haproxy', {})
#FIXME(mattymo): Replace with only VIPs for roles assigned to this node
$stats_ipaddresses = delete_undef_values([$management_vip, $database_vip, $service_endpoint, '127.0.0.1'])
class { 'cluster::haproxy':
haproxy_maxconn => '16000',
haproxy_bufsize => '32768',
primary_controller => $primary_controller,
debug => pick($haproxy_hash['debug'], hiera('debug', false)),
other_networks => direct_networks($network_scheme['endpoints']),
stats_ipaddresses => $stats_ipaddresses
}

View File

@ -1,27 +0,0 @@
id: cluster-haproxy
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
database_vip:
value: null
debug:
value: null
fqdn:
value: null
haproxy:
value: null
management_vip:
value: null
network_scheme:
value: null
primary_controller:
value: null
puppet_modules:
value: null
role:
value: null
service_endpoint:
value: null

View File

@ -1,7 +0,0 @@
notice('MODULAR: cluster-vrouter.pp')
$network_scheme = hiera('network_scheme', {})
class { 'cluster::vrouter_ocf':
other_networks => direct_networks($network_scheme['endpoints']),
}

View File

@ -1,15 +0,0 @@
id: cluster-vrouter
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
network_scheme:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,49 +0,0 @@
notice('MODULAR: cluster.pp')
if !(hiera('role') in hiera('corosync_roles')) {
fail('The node role is not in corosync roles')
}
prepare_network_config(hiera_hash('network_scheme'))
$corosync_nodes = corosync_nodes(
get_nodes_hash_by_roles(
hiera_hash('network_metadata'),
hiera('corosync_roles')
),
'mgmt/corosync'
)
class { 'cluster':
internal_address => get_network_role_property('mgmt/corosync', 'ipaddr'),
corosync_nodes => $corosync_nodes,
}
pcmk_nodes { 'pacemaker' :
nodes => $corosync_nodes,
add_pacemaker_nodes => false,
}
Service <| title == 'corosync' |> {
subscribe => File['/etc/corosync/service.d'],
require => File['/etc/corosync/corosync.conf'],
}
Service['corosync'] -> Pcmk_nodes<||>
Pcmk_nodes<||> -> Service<| provider == 'pacemaker' |>
# Sometimes during first start pacemaker can not connect to corosync
# via IPC due to pacemaker and corosync processes are run under different users
if($::operatingsystem == 'Ubuntu') {
$pacemaker_run_uid = 'hacluster'
$pacemaker_run_gid = 'haclient'
file {'/etc/corosync/uidgid.d/pacemaker':
content =>"uidgid {
uid: ${pacemaker_run_uid}
gid: ${pacemaker_run_gid}
}"
}
File['/etc/corosync/corosync.conf'] -> File['/etc/corosync/uidgid.d/pacemaker'] -> Service <| title == 'corosync' |>
}

View File

@ -1,19 +0,0 @@
id: cluster
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
corosync_roles:
value: null
fqdn:
value: null
network_metadata:
value: null
network_scheme:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,20 +0,0 @@
notice('MODULAR: cluster/health.pp')
if !(hiera('role') in hiera('corosync_roles')) {
fail('The node role is not in corosync roles')
}
# load the mounted filesystems from our custom fact, remove boot
$mount_points = delete(split($::mounts, ','), '/boot')
$disks = hiera('corosync_disks', $mount_points)
$min_disk_free = hiera('corosync_min_disk_space', '512M')
$disk_unit = hiera('corosync_disk_unit', 'M')
$monitor_interval = hiera('corosync_disk_monitor_interval', '15s')
class { 'cluster::sysinfo':
disks => $disks,
min_disk_free => $min_disk_free,
disk_unit => $disk_unit,
monitor_interval => $monitor_interval,
}

View File

@ -1,27 +0,0 @@
id: cluster_health
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
corosync_disk_monitor:
value: null
corosync_disk_monitor_interval:
value: null
corosync_disk_unit:
value: null
corosync_disks:
value: null
corosync_min_disk_space:
value: null
corosync_monitor_interval:
value: null
corosync_roles:
value: null
fqdn:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,11 +0,0 @@
notice('MODULAR: configure_default_route.pp')
$network_scheme = hiera('network_scheme')
$management_vrouter_vip = hiera('management_vrouter_vip')
prepare_network_config($network_scheme)
$management_int = get_network_role_property('management', 'interface')
$fw_admin_int = get_network_role_property('fw-admin', 'interface')
$ifconfig = configure_default_route($network_scheme, $management_vrouter_vip, $fw_admin_int, $management_int )
notice ($ifconfig)

View File

@ -1,17 +0,0 @@
id: configure_default_route
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
management_vrouter_vip:
value: null
network_scheme:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,5 +0,0 @@
notice('MODULAR: connectivity_tests.pp')
# Pull the list of repos from hiera
$repo_setup = hiera('repo_setup')
# test that the repos are accessible
url_available($repo_setup['repos'])

View File

@ -1,15 +0,0 @@
id: connectivity_tests
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
puppet_modules:
value: null
repo_setup:
value: null
role:
value: null

View File

@ -1,79 +0,0 @@
notice('MODULAR: conntrackd.pp')
prepare_network_config(hiera('network_scheme', {}))
$vrouter_name = hiera('vrouter_name', 'pub')
case $operatingsystem {
Centos: { $conntrackd_package = 'conntrack-tools' }
Ubuntu: { $conntrackd_package = 'conntrackd' }
}
### CONNTRACKD for CentOS 6 doesn't work under namespaces ##
if $operatingsystem == 'Ubuntu' {
$bind_address = get_network_role_property('mgmt/vip', 'ipaddr')
$mgmt_bridge = get_network_role_property('mgmt/vip', 'interface')
package { $conntrackd_package:
ensure => installed,
} ->
file { '/etc/conntrackd/conntrackd.conf':
content => template('cluster/conntrackd.conf.erb'),
} ->
cs_resource {'p_conntrackd':
ensure => present,
primitive_class => 'ocf',
provided_by => 'fuel',
primitive_type => 'ns_conntrackd',
metadata => {
'migration-threshold' => 'INFINITY',
'failure-timeout' => '180s'
},
parameters => {
'bridge' => $mgmt_bridge,
},
complex_type => 'master',
ms_metadata => {
'notify' => 'true',
'ordered' => 'false',
'interleave' => 'true',
'clone-node-max' => '1',
'master-max' => '1',
'master-node-max' => '1',
'target-role' => 'Master'
},
operations => {
'monitor' => {
'interval' => '30',
'timeout' => '60'
},
'monitor:Master' => {
'role' => 'Master',
'interval' => '27',
'timeout' => '60'
},
},
}
cs_colocation { "conntrackd-with-${vrouter_name}-vip":
primitives => [ 'master_p_conntrackd:Master', "vip__vrouter_${vrouter_name}" ],
}
File['/etc/conntrackd/conntrackd.conf'] -> Cs_resource['p_conntrackd'] -> Service['p_conntrackd'] -> Cs_colocation["conntrackd-with-${vrouter_name}-vip"]
service { 'p_conntrackd':
ensure => 'running',
enable => true,
provider => 'pacemaker',
}
# Workaround to ensure log is rotated properly
file { '/etc/logrotate.d/conntrackd':
content => template('openstack/95-conntrackd.conf.erb'),
}
Package[$conntrackd_package] -> File['/etc/logrotate.d/conntrackd']
}

View File

@ -1,17 +0,0 @@
id: conntrackd
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
network_scheme:
value: null
puppet_modules:
value: null
role:
value: null
vrouter_name:
value: null

View File

@ -1,49 +0,0 @@
notice('MODULAR: controller.pp')
# Pulling hiera
$primary_controller = hiera('primary_controller')
$neutron_mellanox = hiera('neutron_mellanox', false)
$use_neutron = hiera('use_neutron', false)
# Do the stuff
if $neutron_mellanox {
$mellanox_mode = $neutron_mellanox['plugin']
} else {
$mellanox_mode = 'disabled'
}
if $primary_controller {
if ($mellanox_mode == 'ethernet') {
$test_vm_pkg = 'cirros-testvm-mellanox'
} else {
$test_vm_pkg = 'cirros-testvm'
}
package { 'cirros-testvm' :
ensure => 'installed',
name => $test_vm_pkg,
}
}
Exec { logoutput => true }
if ($::mellanox_mode == 'ethernet') {
$ml2_eswitch = $neutron_mellanox['ml2_eswitch']
class { 'mellanox_openstack::controller':
eswitch_vnic_type => $ml2_eswitch['vnic_type'],
eswitch_apply_profile_patch => $ml2_eswitch['apply_profile_patch'],
}
}
# NOTE(bogdando) for nodes with pacemaker, we should use OCF instead of monit
# BP https://blueprints.launchpad.net/mos/+spec/include-openstackclient
package { 'python-openstackclient' :
ensure => installed,
}
# Reduce swapiness on controllers, see LP#1413702
sysctl::value { 'vm.swappiness':
value => '10'
}
# vim: set ts=2 sw=2 et :

View File

@ -1,19 +0,0 @@
id: controller_remaining_tasks
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
neutron_mellanox:
value: null
primary_controller:
value: null
puppet_modules:
value: null
role:
value: null
use_neutron:
value: null

View File

@ -1,132 +0,0 @@
notice('MODULAR: database.pp')
prepare_network_config(hiera('network_scheme', {}))
$use_syslog = hiera('use_syslog', true)
$primary_controller = hiera('primary_controller')
$mysql_hash = hiera_hash('mysql', {})
$management_vip = hiera('management_vip')
$database_vip = hiera('database_vip', $management_vip)
$network_scheme = hiera('network_scheme', {})
$mgmt_iface = get_network_role_property('mgmt/database', 'interface')
$direct_networks = split(direct_networks($network_scheme['endpoints'], $mgmt_iface, 'netmask'), ' ')
$access_networks = flatten(['localhost', '127.0.0.1', '240.0.0.0/255.255.0.0', $direct_networks])
$haproxy_stats_port = '10000'
$haproxy_stats_url = "http://${database_vip}:${haproxy_stats_port}/;csv"
$mysql_database_password = $mysql_hash['root_password']
$enabled = pick($mysql_hash['enabled'], true)
$galera_node_address = get_network_role_property('mgmt/database', 'ipaddr')
$galera_nodes = values(get_node_to_ipaddr_map_by_network_role(hiera_hash('database_nodes'), 'mgmt/database'))
$galera_primary_controller = hiera('primary_database', $primary_controller)
$mysql_bind_address = '0.0.0.0'
$galera_cluster_name = 'openstack'
$mysql_skip_name_resolve = true
$custom_setup_class = hiera('mysql_custom_setup_class', 'galera')
# Get galera gcache factor based on cluster node's count
$galera_gcache_factor = count(unique(filter_hash(hiera('nodes', []), 'uid')))
$status_user = 'clustercheck'
$status_password = $mysql_hash['wsrep_password']
$backend_port = '3307'
$backend_timeout = '10'
#############################################################################
validate_string($status_password)
validate_string($mysql_database_password)
validate_string($status_password)
if $enabled {
if $custom_setup_class {
file { '/etc/mysql/my.cnf':
ensure => absent,
require => Class['mysql::server']
}
$config_hash_real = {
'config_file' => '/etc/my.cnf'
}
} else {
$config_hash_real = { }
}
if '/var/lib/mysql' in split($::mounts, ',') {
$ignore_db_dirs = ['lost+found']
} else {
$ignore_db_dirs = []
}
class { 'mysql::server':
bind_address => '0.0.0.0',
etc_root_password => true,
root_password => $mysql_database_password,
old_root_password => '',
galera_cluster_name => $galera_cluster_name,
primary_controller => $galera_primary_controller,
galera_node_address => $galera_node_address,
galera_nodes => $galera_nodes,
galera_gcache_factor => $galera_gcache_factor,
enabled => $enabled,
custom_setup_class => $custom_setup_class,
mysql_skip_name_resolve => $mysql_skip_name_resolve,
use_syslog => $use_syslog,
config_hash => $config_hash_real,
ignore_db_dirs => $ignore_db_dirs,
}
class { 'osnailyfacter::mysql_user':
password => $mysql_database_password,
access_networks => $access_networks,
}
exec { 'initial_access_config':
command => '/bin/ln -sf /etc/mysql/conf.d/password.cnf /root/.my.cnf',
}
if ($custom_mysql_setup_class == 'percona_packages' and $::osfamily == 'RedHat') {
# This is a work around to prevent the conflict between the
# MySQL-shared-wsrep package (included as a dependency for MySQL-python) and
# the Percona shared package Percona-XtraDB-Cluster-shared-56. They both
# provide the libmysql client libraries. Since we are requiring the
# installation of the Percona package here before mysql::python, the python
# client is happy and the server installation won't fail due to the
# installation of our shared package
package { 'Percona-XtraDB-Cluster-shared-56':
ensure => 'present',
before => Class['mysql::python'],
}
}
$management_networks = get_routable_networks_for_network_role($network_scheme, 'mgmt/database', ' ')
class { 'openstack::galera::status':
status_user => $status_user,
status_password => $status_password,
status_allow => $galera_node_address,
backend_host => $galera_node_address,
backend_port => $backend_port,
backend_timeout => $backend_timeout,
only_from => "127.0.0.1 240.0.0.2 ${management_networks}",
}
haproxy_backend_status { 'mysql':
name => 'mysqld',
url => $haproxy_stats_url,
}
class { 'osnailyfacter::mysql_access':
db_password => $mysql_database_password,
}
Class['mysql::server'] ->
Class['osnailyfacter::mysql_user'] ->
Exec['initial_access_config'] ->
Class['openstack::galera::status'] ->
Haproxy_backend_status['mysql'] ->
Class['osnailyfacter::mysql_access']
}

View File

@ -1,33 +0,0 @@
id: database
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
database_nodes:
value: null
database_vip:
value: null
fqdn:
value: null
management_vip:
value: null
mysql:
value: null
mysql_custom_setup_class:
value: null
network_scheme:
value: null
nodes:
value: null
primary_controller:
value: null
primary_database:
value: null
puppet_modules:
value: null
role:
value: null
use_syslog:
value: null

View File

@ -1,41 +0,0 @@
notice('MODULAR: service_token_off.pp')
####################################################################
# Used as singular by post-deployment action to disable admin_token
#
$keystone_params = hiera_hash('keystone_hash', {})
if $keystone_params['service_token_off'] {
include ::keystone::params
include ::tweaks::apache_wrappers
keystone_config {
'DEFAULT/admin_token': ensure => absent;
}
# Get paste.ini source
$keystone_paste_ini = $::keystone::params::paste_config ? {
undef => '/etc/keystone/keystone-paste.ini',
default => $::keystone::params::paste_config,
}
# Remove admin_token_auth middleware from public/admin/v3 pipelines
exec { 'remove_admin_token_auth_middleware':
path => ['/bin', '/usr/bin'],
command => "sed -i.dist 's/ admin_token_auth//' $keystone_paste_ini",
onlyif => "fgrep -q ' admin_token_auth' $keystone_paste_ini",
}
service { 'httpd':
ensure => 'running',
name => $::tweaks::apache_wrappers::service_name,
enable => true,
}
# Restart service that changes to take effect
Keystone_config<||> ~> Service['httpd']
Exec['remove_admin_token_auth_middleware'] ~> Service['httpd']
}

View File

@ -1,15 +0,0 @@
id: disable_keystone_service_token
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
keystone_hash:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,8 +0,0 @@
notice('MODULAR: dns-client.pp')
$management_vip = hiera('management_vrouter_vip')
class { 'osnailyfacter::resolvconf':
management_vip => $management_vip,
}

View File

@ -1,15 +0,0 @@
id: dns-client
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
management_vrouter_vip:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,16 +0,0 @@
notice('MODULAR: dns-server.pp')
$dns_servers = hiera('external_dns')
$primary_controller = hiera('primary_controller')
$master_ip = hiera('master_ip')
$management_vrouter_vip = hiera('management_vrouter_vip')
class { 'osnailyfacter::dnsmasq':
external_dns => strip(split($dns_servers['dns_list'], ',')),
master_ip => $master_ip,
management_vrouter_vip => $management_vrouter_vip,
} ->
class { 'cluster::dns_ocf':
primary_controller => $primary_controller,
}

View File

@ -1,21 +0,0 @@
id: dns-server
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
external_dns:
value: null
fqdn:
value: null
management_vrouter_vip:
value: null
master_ip:
value: null
primary_controller:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,33 +0,0 @@
notice('MODULAR: dump_rabbitmq_definitions.pp')
$definitions_dump_file = '/etc/rabbitmq/definitions'
$original_definitions_dump_file = '/etc/rabbitmq/definitions.full'
$rabbit_hash = hiera_hash('rabbit_hash',
{
'user' => false,
'password' => false,
}
)
$rabbit_enabled = pick($rabbit_hash['enabled'], true)
if ($rabbit_enabled) {
$rabbit_api_endpoint = 'http://localhost:15672/api/definitions'
$rabbit_credentials = "${rabbit_hash['user']}:${rabbit_hash['password']}"
exec { 'rabbitmq-dump-definitions':
path => ['/usr/bin', '/usr/sbin', '/sbin', '/bin'],
command => "curl -u ${rabbit_credentials} ${rabbit_api_endpoint} -o ${original_definitions_dump_file}",
}->
exec { 'rabbitmq-dump-clean':
path => ['/usr/bin', '/usr/sbin', '/sbin', '/bin'],
command => "rabbitmq-dump-clean.py < ${original_definitions_dump_file} > ${definitions_dump_file}",
}
file { [$definitions_dump_file, $original_definitions_dump_file]:
ensure => file,
owner => 'root',
group => 'root',
mode => '0600',
}
}

View File

@ -1,15 +0,0 @@
id: dump_rabbitmq_definitions
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
puppet_modules:
value: null
rabbit_hash:
value: null
role:
value: null

View File

@ -1,10 +0,0 @@
include cinder::params
$volume_service = $::cinder::params::volume_service
service { $volume_service:
ensure => running,
enable => true,
hasstatus => true,
hasrestart => true,
}

View File

@ -1,13 +0,0 @@
id: enable_cinder_volume_service
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,10 +0,0 @@
include nova::params
$compute_service_name = $::nova::params::compute_service_name
service { $compute_service_name:
ensure => running,
enable => true,
hasstatus => true,
hasrestart => true,
}

View File

@ -1,13 +0,0 @@
id: enable_nova_compute_service
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,17 +0,0 @@
include ::ceph::params
$radosgw_service = $::ceph::params::service_radosgw
# ensure the service is running and will start on boot
service { $radosgw_service:
ensure => running,
enable => true,
}
# The Ubuntu upstart script is incompatible with the upstart provider
# This will force the service to fall back to the debian init script
if ($::operatingsystem == 'Ubuntu') {
Service['radosgw'] {
provider => 'debian'
}
}

View File

@ -1,13 +0,0 @@
id: enable_rados
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,133 +0,0 @@
notice('MODULAR: firewall.pp')
$network_scheme = hiera_hash('network_scheme')
$ironic_hash = hiera_hash('ironic', {})
# Workaround for fuel bug with firewall
firewall {'003 remote rabbitmq ':
sport => [ 4369, 5672, 41055, 55672, 61613 ],
source => hiera('master_ip'),
proto => 'tcp',
action => 'accept',
require => Class['openstack::firewall'],
}
firewall {'004 remote puppet ':
sport => [ 8140 ],
source => hiera('master_ip'),
proto => 'tcp',
action => 'accept',
require => Class['openstack::firewall'],
}
# allow local rabbitmq admin traffic for LP#1383258
firewall {'005 local rabbitmq admin':
sport => [ 15672 ],
iniface => 'lo',
proto => 'tcp',
action => 'accept',
require => Class['openstack::firewall'],
}
# reject all non-local rabbitmq admin traffic for LP#1450443
firewall {'006 reject non-local rabbitmq admin':
sport => [ 15672 ],
proto => 'tcp',
action => 'drop',
require => Class['openstack::firewall'],
}
# allow connections from haproxy namespace
firewall {'030 allow connections from haproxy namespace':
source => '240.0.0.2',
action => 'accept',
require => Class['openstack::firewall'],
}
prepare_network_config(hiera_hash('network_scheme'))
class { 'openstack::firewall' :
nova_vnc_ip_range => get_routable_networks_for_network_role($network_scheme, 'nova/api'),
nova_api_ip_range => get_network_role_property('nova/api', 'network'),
libvirt_network => get_network_role_property('management', 'network'),
keystone_network => get_network_role_property('keystone/api', 'network'),
iscsi_ip => get_network_role_property('cinder/iscsi', 'ipaddr'),
}
if $ironic_hash['enabled'] {
$nodes_hash = hiera('nodes', {})
$roles = node_roles($nodes_hash, hiera('uid'))
$network_metadata = hiera_hash('network_metadata', {})
$baremetal_int = get_network_role_property('ironic/baremetal', 'interface')
$baremetal_vip = $network_metadata['vips']['baremetal']['ipaddr']
$baremetal_ipaddr = get_network_role_property('ironic/baremetal', 'ipaddr')
$baremetal_network = get_network_role_property('ironic/baremetal', 'network')
firewallchain { 'baremetal:filter:IPv4':
ensure => present,
} ->
firewall { '999 drop all baremetal':
chain => 'baremetal',
action => 'drop',
proto => 'all',
} ->
firewall {'00 baremetal-filter':
proto => 'all',
iniface => $baremetal_int,
jump => 'baremetal',
require => Class['openstack::firewall'],
}
if member($roles, 'controller') or member($roles, 'primary-controller') {
firewall { '100 allow baremetal ping from VIP':
chain => 'baremetal',
source => $baremetal_vip,
destination => $baremetal_ipaddr,
proto => 'icmp',
icmp => 'echo-request',
action => 'accept',
}
firewall { '207 ironic-api' :
dport => '6385',
proto => 'tcp',
action => 'accept',
}
}
if member($roles, 'ironic') {
firewall { '101 allow baremetal-related':
chain => 'baremetal',
source => $baremetal_network,
destination => $baremetal_ipaddr,
proto => 'all',
state => ['RELATED', 'ESTABLISHED'],
action => 'accept',
}
firewall { '102 allow baremetal-rsyslog':
chain => 'baremetal',
source => $baremetal_network,
destination => $baremetal_ipaddr,
proto => 'udp',
dport => '514',
action => 'accept',
}
firewall { '103 allow baremetal-TFTP':
chain => 'baremetal',
source => $baremetal_network,
destination => $baremetal_ipaddr,
proto => 'udp',
dport => '69',
action => 'accept',
}
k_mod {'nf_conntrack_tftp':
ensure => 'present'
}
file_line {'nf_conntrack_tftp_on_boot':
path => '/etc/modules',
line => 'nf_conntrack_tftp',
}
}
}

View File

@ -1,19 +0,0 @@
id: firewall
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
ironic:
value: null
master_ip:
value: null
network_scheme:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,10 +0,0 @@
notice('MODULAR: fuel_pkgs.pp')
$fuel_packages = [
'fuel-ha-utils',
'fuel-misc',
]
package { $fuel_packages :
ensure => 'latest',
}

View File

@ -1,13 +0,0 @@
id: fuel_pkgs
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,49 +0,0 @@
notice('MODULAR: generate_vms.pp')
$libvirt_dir = '/etc/libvirt/qemu'
$template_dir = '/var/lib/nova'
$packages = ['qemu-utils', 'qemu-kvm', 'libvirt-bin', 'xmlstarlet']
$libvirt_service_name = 'libvirtd'
$vms = hiera_array('vms_conf')
define vm_config {
$details = $name
$id = $details['id']
file { "${template_dir}/template_${id}_vm.xml":
owner => 'root',
group => 'root',
content => template('osnailyfacter/vm_libvirt.erb'),
}
}
package { $packages:
ensure => 'installed',
}
service { $libvirt_service_name:
ensure => 'running',
require => Package[$packages],
before => Exec['generate_vms'],
}
file { "${libvirt_dir}/autostart":
ensure => 'directory',
require => Package[$packages],
}
file { "${template_dir}":
ensure => 'directory',
}
vm_config { $vms:
before => Exec['generate_vms'],
require => File["${template_dir}"],
}
exec { 'generate_vms':
command => "/usr/bin/generate_vms.sh ${libvirt_dir} ${template_dir}",
path => ['/usr/sbin', '/usr/bin' , '/sbin', '/bin'],
require => [File["${template_dir}"], File["${libvirt_dir}/autostart"]],
}

View File

@ -1,13 +0,0 @@
id: generate_vms
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,39 +0,0 @@
#!/bin/sh
cluster_id={{uid}}
open_ssl_keys='{{ssl|join(' ')}}'
ssh_keys='{{ ssh|join(' ') }}'
keys_path={{path}}
BASE_PATH=$keys_path/$cluster_id/
function generate_open_ssl_keys {
for i in $open_ssl_keys
do
local dir_path=$BASE_PATH$i/
local key_path=$dir_path$i.key
mkdir -p $dir_path
if [ ! -f $key_path ]; then
openssl rand -base64 741 > $key_path 2>&1
else
echo 'Key $key_path already exists'
fi
done
}
function generate_ssh_keys {
for i in $ssh_keys
do
local dir_path=$BASE_PATH$i/
local key_path=$dir_path$i
mkdir -p $dir_path
if [ ! -f $key_path ]; then
ssh-keygen -b 2048 -t rsa -N '' -f $key_path 2>&1
else
echo 'Key $key_path already exists'
fi
done
}
generate_open_ssl_keys
generate_ssh_keys

View File

@ -1,20 +0,0 @@
id: genkeys
handler: shell
version: 0.0.1
input:
uid:
schema: str!
value:
path:
schema: str!
value: /var/lib/fuel/keys/
ssl:
schema: []
value:
- mongo
ssh:
schema: []
value:
- neutron
- nova
- mysql

View File

@ -1,53 +0,0 @@
notice('MODULAR: glance/db.pp')
$glance_hash = hiera_hash('glance', {})
$mysql_hash = hiera_hash('mysql', {})
$management_vip = hiera('management_vip')
$database_vip = hiera('database_vip')
$mysql_root_user = pick($mysql_hash['root_user'], 'root')
$mysql_db_create = pick($mysql_hash['db_create'], true)
$mysql_root_password = $mysql_hash['root_password']
$db_user = pick($glance_hash['db_user'], 'glance')
$db_name = pick($glance_hash['db_name'], 'glance')
$db_password = pick($glance_hash['db_password'], $mysql_root_password)
$db_host = pick($glance_hash['db_host'], $database_vip)
$db_create = pick($glance_hash['db_create'], $mysql_db_create)
$db_root_user = pick($glance_hash['root_user'], $mysql_root_user)
$db_root_password = pick($glance_hash['root_password'], $mysql_root_password)
$allowed_hosts = [ hiera('node_name'), 'localhost', '127.0.0.1', '%' ]
validate_string($mysql_root_user)
validate_string($database_vip)
if $db_create {
class { 'galera::client':
custom_setup_class => hiera('mysql_custom_setup_class', 'galera'),
}
class { 'glance::db::mysql':
user => $db_user,
password => $db_password,
dbname => $db_name,
allowed_hosts => $allowed_hosts,
}
class { 'osnailyfacter::mysql_access':
db_host => $db_host,
db_user => $db_root_user,
db_password => $db_root_password,
}
Class['galera::client'] ->
Class['osnailyfacter::mysql_access'] ->
Class['glance::db::mysql']
}
class mysql::config {}
include mysql::config
class mysql::server {}
include mysql::server

View File

@ -1,25 +0,0 @@
id: glance-db
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
database_vip:
value: null
fqdn:
value: null
glance:
value: null
management_vip:
value: null
mysql:
value: null
mysql_custom_setup_class:
value: null
node_name:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,42 +0,0 @@
notice('MODULAR: glance/keystone.pp')
$glance_hash = hiera_hash('glance', {})
$public_vip = hiera('public_vip')
$public_ssl_hash = hiera('public_ssl')
$admin_address = hiera('management_vip')
$region = pick($glance_hash['region'], hiera('region', 'RegionOne'))
$password = $glance_hash['user_password']
$auth_name = pick($glance_hash['auth_name'], 'glance')
$configure_endpoint = pick($glance_hash['configure_endpoint'], true)
$configure_user = pick($glance_hash['configure_user'], true)
$configure_user_role = pick($glance_hash['configure_user_role'], true)
$service_name = pick($glance_hash['service_name'], 'glance')
$tenant = pick($glance_hash['tenant'], 'services')
$public_address = $public_ssl_hash['services'] ? {
true => $public_ssl_hash['hostname'],
default => $public_vip,
}
$public_protocol = $public_ssl_hash['services'] ? {
true => 'https',
default => 'http',
}
$public_url = "${public_protocol}://${public_address}:9292"
$admin_url = "http://${admin_address}:9292"
validate_string($public_address)
validate_string($password)
class { '::glance::keystone::auth':
password => $password,
auth_name => $auth_name,
configure_endpoint => $configure_endpoint,
configure_user => $configure_user,
configure_user_role => $configure_user_role,
service_name => $service_name,
public_url => $public_url,
admin_url => $admin_url,
internal_url => $admin_url,
region => $region,
}

View File

@ -1,23 +0,0 @@
id: glance-keystone
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
glance:
value: null
management_vip:
value: null
public_ssl:
value: null
public_vip:
value: null
puppet_modules:
value: null
region:
value: null
role:
value: null

View File

@ -1,128 +0,0 @@
notice('MODULAR: glance.pp')
$network_scheme = hiera_hash('network_scheme', {})
$network_metadata = hiera_hash('network_metadata', {})
prepare_network_config($network_scheme)
$glance_hash = hiera_hash('glance', {})
$verbose = pick($glance_hash['verbose'], hiera('verbose', true))
$debug = pick($glance_hash['debug'], hiera('debug', false))
$management_vip = hiera('management_vip')
$database_vip = hiera('database_vip')
$service_endpoint = hiera('service_endpoint')
$storage_hash = hiera('storage')
$use_syslog = hiera('use_syslog', true)
$use_stderr = hiera('use_stderr', false)
$syslog_log_facility = hiera('syslog_log_facility_glance')
$rabbit_hash = hiera_hash('rabbit_hash', {})
$max_pool_size = hiera('max_pool_size')
$max_overflow = hiera('max_overflow')
$ceilometer_hash = hiera_hash('ceilometer', {})
$region = hiera('region','RegionOne')
$glance_endpoint = $management_vip
$service_workers = pick($glance_hash['glance_workers'], min(max($::processorcount, 2), 16))
$db_type = 'mysql'
$db_host = pick($glance_hash['db_host'], $database_vip)
$api_bind_address = get_network_role_property('glance/api', 'ipaddr')
$enabled = true
$max_retries = '-1'
$idle_timeout = '3600'
$auth_uri = "http://${service_endpoint}:5000/"
$rabbit_password = $rabbit_hash['password']
$rabbit_user = $rabbit_hash['user']
$rabbit_hosts = split(hiera('amqp_hosts',''), ',')
$rabbit_virtual_host = '/'
$glance_db_user = pick($glance_hash['db_user'], 'glance')
$glance_db_dbname = pick($glance_hash['db_name'], 'glance')
$glance_db_password = $glance_hash['db_password']
$glance_user = pick($glance_hash['user'],'glance')
$glance_user_password = $glance_hash['user_password']
$glance_tenant = pick($glance_hash['tenant'],'services')
$glance_vcenter_host = $glance_hash['vc_host']
$glance_vcenter_user = $glance_hash['vc_user']
$glance_vcenter_password = $glance_hash['vc_password']
$glance_vcenter_datacenter = $glance_hash['vc_datacenter']
$glance_vcenter_datastore = $glance_hash['vc_datastore']
$glance_vcenter_image_dir = $glance_hash['vc_image_dir']
$glance_vcenter_api_retry_count = '20'
$glance_image_cache_max_size = $glance_hash['image_cache_max_size']
$glance_pipeline = pick($glance_hash['pipeline'], 'keystone')
$glance_large_object_size = pick($glance_hash['large_object_size'], '5120')
$rados_connect_timeout = '30'
if ($storage_hash['images_ceph']) {
$glance_backend = 'ceph'
$glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ]
$glance_show_image_direct_url = pick($glance_hash['show_image_direct_url'], true)
} elsif ($storage_hash['images_vcenter']) {
$glance_backend = 'vmware'
$glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ]
$glance_show_image_direct_url = pick($glance_hash['show_image_direct_url'], true)
} else {
$glance_backend = 'swift'
$glance_known_stores = [ 'glance.store.swift.Store', 'glance.store.http.Store' ]
$swift_store_large_object_size = $glance_large_object_size
$glance_show_image_direct_url = pick($glance_hash['show_image_direct_url'], false)
}
###############################################################################
class { 'openstack::glance':
verbose => $verbose,
debug => $debug,
db_type => $db_type,
db_host => $db_host,
glance_db_user => $glance_db_user,
glance_db_dbname => $glance_db_dbname,
glance_db_password => $glance_db_password,
glance_user => $glance_user,
glance_user_password => $glance_user_password,
glance_tenant => $glance_tenant,
glance_vcenter_host => $glance_vcenter_host,
glance_vcenter_user => $glance_vcenter_user,
glance_vcenter_password => $glance_vcenter_password,
glance_vcenter_datacenter => $glance_vcenter_datacenter,
glance_vcenter_datastore => $glance_vcenter_datastore,
glance_vcenter_image_dir => $glance_vcenter_image_dir,
glance_vcenter_api_retry_count => $glance_vcenter_api_retry_count,
auth_uri => $auth_uri,
keystone_host => $service_endpoint,
region => $region,
bind_host => $api_bind_address,
enabled => $enabled,
glance_backend => $glance_backend,
registry_host => $glance_endpoint,
use_syslog => $use_syslog,
use_stderr => $use_stderr,
show_image_direct_url => $glance_show_image_direct_url,
swift_store_large_object_size => $swift_store_large_object_size,
pipeline => $glance_pipeline,
syslog_log_facility => $syslog_log_facility,
glance_image_cache_max_size => $glance_image_cache_max_size,
max_retries => $max_retries,
max_pool_size => $max_pool_size,
max_overflow => $max_overflow,
idle_timeout => $idle_timeout,
rabbit_password => $rabbit_password,
rabbit_userid => $rabbit_user,
rabbit_hosts => $rabbit_hosts,
rabbit_virtual_host => $rabbit_virtual_host,
known_stores => $glance_known_stores,
ceilometer => $ceilometer_hash[enabled],
service_workers => $service_workers,
rados_connect_timeout => $rados_connect_timeout,
}
####### Disable upstart startup on install #######
if($::operatingsystem == 'Ubuntu') {
tweaks::ubuntu_service_override { 'glance-api':
package_name => 'glance-api',
}
tweaks::ubuntu_service_override { 'glance-registry':
package_name => 'glance-registry',
}
}

View File

@ -1,49 +0,0 @@
id: glance
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
amqp_hosts:
value: null
ceilometer:
value: null
database_vip:
value: null
debug:
value: null
fqdn:
value: null
glance:
value: null
management_vip:
value: null
max_overflow:
value: null
max_pool_size:
value: null
network_metadata:
value: null
network_scheme:
value: null
puppet_modules:
value: null
rabbit_hash:
value: null
region:
value: null
role:
value: null
service_endpoint:
value: null
storage:
value: null
syslog_log_facility_glance:
value: null
use_stderr:
value: null
use_syslog:
value: null
verbose:
value: null

View File

@ -1,293 +0,0 @@
notice('MODULAR: globals.pp')
$service_token_off = false
$globals_yaml_file = '/etc/hiera/globals.yaml'
# remove cached globals values before anything else
remove_file($globals_yaml_file)
$network_scheme = hiera_hash('network_scheme', {})
if empty($network_scheme) {
fail("Network_scheme not given in the astute.yaml")
}
$network_metadata = hiera_hash('network_metadata', {})
if empty($network_metadata) {
fail("Network_metadata not given in the astute.yaml")
}
$node_name = regsubst(hiera('fqdn', $::hostname), '\..*$', '')
$node = $network_metadata['nodes'][$node_name]
if empty($node) {
fail("Node hostname is not defined in the astute.yaml")
}
prepare_network_config($network_scheme)
# DEPRICATED
$nodes_hash = hiera('nodes', {})
$deployment_mode = hiera('deployment_mode', 'ha_compact')
$roles = $node['node_roles']
$storage_hash = hiera('storage', {})
$syslog_hash = hiera('syslog', {})
$base_syslog_hash = hiera('base_syslog', {})
$sahara_hash = hiera('sahara', {})
$murano_hash = hiera('murano', {})
$heat_hash = hiera_hash('heat', {})
$vcenter_hash = hiera('vcenter', {})
$nova_hash = hiera_hash('nova', {})
$mysql_hash = hiera('mysql', {})
$rabbit_hash = hiera_hash('rabbit', {})
$glance_hash = hiera_hash('glance', {})
$swift_hash = hiera('swift', {})
$cinder_hash = hiera_hash('cinder', {})
$ceilometer_hash = hiera('ceilometer',{})
$access_hash = hiera_hash('access', {})
$mp_hash = hiera('mp', {})
$keystone_hash = merge({'service_token_off' => $service_token_off},
hiera_hash('keystone', {}))
$node_role = hiera('role')
$dns_nameservers = hiera('dns_nameservers', [])
$use_ceilometer = $ceilometer_hash['enabled']
$use_neutron = hiera('quantum', false)
$use_ovs = hiera('use_ovs', $use_neutron)
$verbose = true
$debug = hiera('debug', false)
$use_monit = false
$master_ip = hiera('master_ip')
$use_syslog = hiera('use_syslog', true)
$syslog_log_facility_glance = hiera('syslog_log_facility_glance', 'LOG_LOCAL2')
$syslog_log_facility_cinder = hiera('syslog_log_facility_cinder', 'LOG_LOCAL3')
$syslog_log_facility_neutron = hiera('syslog_log_facility_neutron', 'LOG_LOCAL4')
$syslog_log_facility_nova = hiera('syslog_log_facility_nova','LOG_LOCAL6')
$syslog_log_facility_keystone = hiera('syslog_log_facility_keystone', 'LOG_LOCAL7')
$syslog_log_facility_murano = hiera('syslog_log_facility_murano', 'LOG_LOCAL0')
$syslog_log_facility_heat = hiera('syslog_log_facility_heat','LOG_LOCAL0')
$syslog_log_facility_sahara = hiera('syslog_log_facility_sahara','LOG_LOCAL0')
$syslog_log_facility_ceilometer = hiera('syslog_log_facility_ceilometer','LOG_LOCAL0')
$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0')
$nova_report_interval = hiera('nova_report_interval', 60)
$nova_service_down_time = hiera('nova_service_down_time', 180)
$apache_ports = hiera_array('apache_ports', ['80', '8888', '5000', '35357'])
$openstack_version = hiera('openstack_version',
{
'keystone' => 'installed',
'glance' => 'installed',
'horizon' => 'installed',
'nova' => 'installed',
'novncproxy' => 'installed',
'cinder' => 'installed',
}
)
$nova_rate_limits = hiera('nova_rate_limits',
{
'POST' => 100000,
'POST_SERVERS' => 100000,
'PUT' => 1000,
'GET' => 100000,
'DELETE' => 100000
}
)
$cinder_rate_limits = hiera('cinder_rate_limits',
{
'POST' => 100000,
'POST_SERVERS' => 100000,
'PUT' => 100000,
'GET' => 100000,
'DELETE' => 100000
}
)
$default_gateway = get_default_gateways()
$public_vip = $network_metadata['vips']['public']['ipaddr']
$management_vip = $network_metadata['vips']['management']['ipaddr']
$public_vrouter_vip = $network_metadata['vips']['vrouter_pub']['ipaddr']
$management_vrouter_vip = $network_metadata['vips']['vrouter']['ipaddr']
$database_vip = is_hash($network_metadata['vips']['database']) ? {
true => pick($network_metadata['vips']['database']['ipaddr'], $management_vip),
default => $management_vip
}
$service_endpoint = is_hash($network_metadata['vips']['service_endpoint']) ? {
true => pick($network_metadata['vips']['service_endpoint']['ipaddr'], $management_vip),
default => $management_vip
}
if $use_neutron {
$novanetwork_params = {}
$neutron_config = hiera_hash('quantum_settings')
$network_provider = 'neutron'
$neutron_db_password = $neutron_config['database']['passwd']
$neutron_user_password = $neutron_config['keystone']['admin_password']
$neutron_metadata_proxy_secret = $neutron_config['metadata']['metadata_proxy_shared_secret']
$base_mac = $neutron_config['L2']['base_mac']
$management_network_range = get_network_role_property('mgmt/vip', 'network')
} else {
$neutron_config = {}
$novanetwork_params = hiera('novanetwork_parameters')
$network_size = $novanetwork_params['network_size']
$num_networks = $novanetwork_params['num_networks']
$network_provider = 'nova'
if ( $novanetwork_params['network_manager'] == 'FlatDHCPManager') {
$private_int = get_network_role_property('novanetwork/fixed', 'interface')
} else {
$private_int = get_network_role_property('novanetwork/vlan', 'interface')
$vlan_start = $novanetwork_params['vlan_start']
$network_config = {
'vlan_start' => $vlan_start,
}
}
$network_manager = "nova.network.manager.${novanetwork_params['network_manager']}"
$management_network_range = hiera('management_network_range')
}
if $node_role == 'primary-controller' {
$primary_controller = true
} else {
$primary_controller = false
}
$controllers_hash = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
$mountpoints = filter_hash($mp_hash,'point')
# AMQP configuration
$queue_provider = hiera('queue_provider','rabbitmq')
$rabbit_ha_queues = true
if !$rabbit_hash['user'] {
$rabbit_hash['user'] = 'nova'
}
$amqp_port = hiera('amqp_ports', '5673')
if hiera('amqp_hosts', false) {
# using pre-defined in astute.yaml RabbitMQ servers
$amqp_hosts = hiera('amqp_hosts')
} else {
# using RabbitMQ servers on controllers
# todo(sv): switch from 'controller' nodes to 'rmq' nodes as soon as it was implemented as additional node-role
$controllers_with_amqp_server = get_node_to_ipaddr_map_by_network_role($controllers_hash, 'mgmt/messaging')
$amqp_nodes = ipsort(values($controllers_with_amqp_server))
# amqp_hosts() randomize order of RMQ endpoints and put local one first
$amqp_hosts = amqp_hosts($amqp_nodes, $amqp_port, get_network_role_property('mgmt/messaging', 'ipaddr'))
}
# MySQL and SQLAlchemy backend configuration
$custom_mysql_setup_class = hiera('custom_mysql_setup_class', 'galera')
$max_pool_size = hiera('max_pool_size', min($::processorcount * 5 + 0, 30 + 0))
$max_overflow = hiera('max_overflow', min($::processorcount * 5 + 0, 60 + 0))
$max_retries = hiera('max_retries', '-1')
$idle_timeout = hiera('idle_timeout','3600')
$nova_db_password = $nova_hash['db_password']
$sql_connection = "mysql://nova:${nova_db_password}@${database_vip}/nova?read_timeout = 6 0"
$mirror_type = hiera('mirror_type', 'external')
$multi_host = hiera('multi_host', true)
# Determine who should get the volume service
if (member($roles, 'cinder') and $storage_hash['volumes_lvm']) {
$manage_volumes = 'iscsi'
} elsif (member($roles, 'cinder') and $storage_hash['volumes_vmdk']) {
$manage_volumes = 'vmdk'
} elsif ($storage_hash['volumes_ceph']) {
$manage_volumes = 'ceph'
} else {
$manage_volumes = false
}
# Define ceph-related variables
$ceph_primary_monitor_node = get_nodes_hash_by_roles($network_metadata, ['primary-controller'])
$ceph_monitor_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
$ceph_rgw_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
#Determine who should be the default backend
if ($storage_hash['images_ceph']) {
$glance_backend = 'ceph'
$glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ]
} elsif ($storage_hash['images_vcenter']) {
$glance_backend = 'vmware'
$glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ]
} else {
$glance_backend = 'file'
$glance_known_stores = false
}
# Define ceilometer-related variables:
# todo: use special node-roles instead controllers in the future
$ceilometer_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
# Define memcached-related variables:
$memcache_roles = hiera('memcache_roles', ['primary-controller', 'controller'])
# Define node roles, that will carry corosync/pacemaker
$corosync_roles = hiera('corosync_roles', ['primary-controller', 'controller'])
# Define cinder-related variables
# todo: use special node-roles instead controllers in the future
$cinder_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
# Define horizon-related variables:
# todo: use special node-roles instead controllers in the future
$horizon_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
# Define swift-related variables
# todo(sv): use special node-roles instead controllers in the future
$swift_master_role = 'primary-controller'
$swift_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
$swift_proxies = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
$swift_proxy_caches = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) # memcache for swift
$is_primary_swift_proxy = $primary_controller
# Define murano-related variables
$murano_roles = ['primary-controller', 'controller']
# Define heat-related variables:
$heat_roles = ['primary-controller', 'controller']
# Define sahara-related variable
$sahara_roles = ['primary-controller', 'controller']
# Define ceilometer-releated parameters
if !$ceilometer_hash['event_time_to_live'] { $ceilometer_hash['event_time_to_live'] = '604800'}
if !$ceilometer_hash['metering_time_to_live'] { $ceilometer_hash['metering_time_to_live'] = '604800' }
if !$ceilometer_hash['http_timeout'] { $ceilometer_hash['http_timeout'] = '600' }
# Define database-related variables:
# todo: use special node-roles instead controllers in the future
$database_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
# Define Nova-API variables:
# todo: use special node-roles instead controllers in the future
$nova_api_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
# Define mongo-related variables
$mongo_roles = ['primary-mongo', 'mongo']
# Define neutron-related variables:
# todo: use special node-roles instead controllers in the future
$neutron_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
#Define Ironic-related variables:
$ironic_api_nodes = $controllers_hash
# Change nova_hash to add vnc port to it
# TODO(sbog): change this when we will get rid of global hashes
$public_ssl_hash = hiera('public_ssl')
if $public_ssl_hash['services'] {
$nova_hash['vncproxy_protocol'] = 'https'
} else {
$nova_hash['vncproxy_protocol'] = 'http'
}
# save all these global variables into hiera yaml file for later use
# by other manifests with hiera function
file { $globals_yaml_file :
ensure => 'present',
mode => '0644',
owner => 'root',
group => 'root',
content => template('osnailyfacter/globals_yaml.erb')
}

View File

@ -1,127 +0,0 @@
id: globals
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
access:
value: null
amqp_hosts:
value: null
amqp_ports:
value: null
apache_ports:
value: null
base_syslog:
value: null
ceilometer:
value: null
cinder:
value: null
cinder_rate_limits:
value: null
corosync_roles:
value: null
custom_mysql_setup_class:
value: null
debug:
value: null
deployment_mode:
value: null
dns_nameservers:
value: null
fqdn:
value: null
glance:
value: null
heat:
value: null
idle_timeout:
value: null
keystone:
value: null
master_ip:
value: null
max_overflow:
value: null
max_pool_size:
value: null
max_retries:
value: null
memcache_roles:
value: null
mirror_type:
value: null
mp:
value: null
multi_host:
value: null
murano:
value: null
mysql:
value: null
network_metadata:
value: null
network_scheme:
value: null
nodes:
value: null
nova:
value: null
nova_rate_limits:
value: null
nova_report_interval:
value: null
nova_service_down_time:
value: null
openstack_version:
value: null
public_ssl:
value: null
puppet_modules:
value: null
quantum:
value: null
quantum_settings:
value: null
queue_provider:
value: null
rabbit:
value: null
role:
value: null
sahara:
value: null
storage:
value: null
swift:
value: null
syslog:
value: null
syslog_log_facility_ceilometer:
value: null
syslog_log_facility_ceph:
value: null
syslog_log_facility_cinder:
value: null
syslog_log_facility_glance:
value: null
syslog_log_facility_heat:
value: null
syslog_log_facility_keystone:
value: null
syslog_log_facility_murano:
value: null
syslog_log_facility_neutron:
value: null
syslog_log_facility_nova:
value: null
syslog_log_facility_sahara:
value: null
use_ovs:
value: null
use_syslog:
value: null
vcenter:
value: null

View File

@ -1,53 +0,0 @@
notice('MODULAR: heat/db.pp')
$heat_hash = hiera_hash('heat', {})
$mysql_hash = hiera_hash('mysql', {})
$management_vip = hiera('management_vip', undef)
$database_vip = hiera('database_vip', undef)
$mysql_root_user = pick($mysql_hash['root_user'], 'root')
$mysql_db_create = pick($mysql_hash['db_create'], true)
$mysql_root_password = $mysql_hash['root_password']
$db_user = pick($heat_hash['db_user'], 'heat')
$db_name = pick($heat_hash['db_name'], 'heat')
$db_password = pick($heat_hash['db_password'], $mysql_root_password)
$db_host = pick($heat_hash['db_host'], $database_vip)
$db_create = pick($heat_hash['db_create'], $mysql_db_create)
$db_root_user = pick($heat_hash['root_user'], $mysql_root_user)
$db_root_password = pick($heat_hash['root_password'], $mysql_root_password)
$allowed_hosts = [ $::hostname, 'localhost', '127.0.0.1', '%' ]
validate_string($mysql_root_user)
if $db_create {
class { 'galera::client':
custom_setup_class => hiera('mysql_custom_setup_class', 'galera'),
}
class { 'heat::db::mysql':
user => $db_user,
password => $db_password,
dbname => $db_name,
allowed_hosts => $allowed_hosts,
}
class { 'osnailyfacter::mysql_access':
db_host => $db_host,
db_user => $db_root_user,
db_password => $db_root_password,
}
Class['galera::client'] ->
Class['osnailyfacter::mysql_access'] ->
Class['heat::db::mysql']
}
class mysql::config {}
include mysql::config
class mysql::server {}
include mysql::server

View File

@ -1,23 +0,0 @@
id: heat-db
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
database_vip:
value: null
fqdn:
value: null
heat:
value: null
management_vip:
value: null
mysql:
value: null
mysql_custom_setup_class:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,59 +0,0 @@
notice('MODULAR: heat/keystone.pp')
$heat_hash = hiera_hash('heat', {})
$public_vip = hiera('public_vip')
$admin_address = hiera('management_vip')
$region = pick($heat_hash['region'], hiera('region', 'RegionOne'))
$public_ssl_hash = hiera('public_ssl')
$public_address = $public_ssl_hash['services'] ? {
true => $public_ssl_hash['hostname'],
default => $public_vip,
}
$public_protocol = $public_ssl_hash['services'] ? {
true => 'https',
default => 'http',
}
$password = $heat_hash['user_password']
$auth_name = pick($heat_hash['auth_name'], 'heat')
$configure_endpoint = pick($heat_hash['configure_endpoint'], true)
$configure_user = pick($heat_hash['configure_user'], true)
$configure_user_role = pick($heat_hash['configure_user_role'], true)
$service_name = pick($heat_hash['service_name'], 'heat')
$tenant = pick($heat_hash['tenant'], 'services')
validate_string($public_address)
validate_string($password)
$public_url = "${public_protocol}://${public_address}:8004/v1/%(tenant_id)s"
$admin_url = "http://${admin_address}:8004/v1/%(tenant_id)s"
$public_url_cfn = "${public_protocol}://${public_address}:8000/v1"
$admin_url_cfn = "http://${admin_address}:8000/v1"
class { '::heat::keystone::auth' :
password => $password,
auth_name => $auth_name,
region => $region,
tenant => $keystone_tenant,
email => "${auth_name}@localhost",
configure_endpoint => true,
trusts_delegated_roles => $trusts_delegated_roles,
public_url => $public_url,
internal_url => $admin_url,
admin_url => $admin_url,
}
class { '::heat::keystone::auth_cfn' :
password => $password,
auth_name => "${auth_name}-cfn",
service_type => 'cloudformation',
region => $region,
tenant => $keystone_tenant,
email => "${auth_name}-cfn@localhost",
configure_endpoint => true,
public_url => $public_url_cfn,
internal_url => $admin_url_cfn,
admin_url => $admin_url_cfn,
}

View File

@ -1,23 +0,0 @@
id: heat-keystone
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
heat:
value: null
management_vip:
value: null
public_ssl:
value: null
public_vip:
value: null
puppet_modules:
value: null
region:
value: null
role:
value: null

View File

@ -1,169 +0,0 @@
notice('MODULAR: heat.pp')
prepare_network_config(hiera('network_scheme', {}))
$management_vip = hiera('management_vip')
$heat_hash = hiera_hash('heat', {})
$rabbit_hash = hiera_hash('rabbit_hash', {})
$max_retries = hiera('max_retries')
$max_pool_size = hiera('max_pool_size')
$max_overflow = hiera('max_overflow')
$idle_timeout = hiera('idle_timeout')
$service_endpoint = hiera('service_endpoint')
$debug = pick($heat_hash['debug'], hiera('debug', false))
$verbose = pick($heat_hash['verbose'], hiera('verbose', true))
$use_stderr = hiera('use_stderr', false)
$use_syslog = hiera('use_syslog', true)
$syslog_log_facility_heat = hiera('syslog_log_facility_heat')
$deployment_mode = hiera('deployment_mode')
$bind_address = get_network_role_property('heat/api', 'ipaddr')
$database_password = $heat_hash['db_password']
$keystone_user = pick($heat_hash['user'], 'heat')
$keystone_tenant = pick($heat_hash['tenant'], 'services')
$db_host = pick($heat_hash['db_host'], hiera('database_vip'))
$database_user = pick($heat_hash['db_user'], 'heat')
$database_name = hiera('heat_db_name', 'heat')
$read_timeout = '60'
$sql_connection = "mysql://${database_user}:${database_password}@${db_host}/${database_name}?read_timeout=${read_timeout}"
$region = hiera('region', 'RegionOne')
$public_ssl_hash = hiera('public_ssl')
$public_ip = hiera('public_vip')
$public_protocol = pick($public_ssl_hash['services'], false) ? {
true => 'https',
default => 'http',
}
$public_address = pick($public_ssl_hash['services'], false) ? {
true => pick($public_ssl_hash['hostname']),
default => $public_ip,
}
$auth_uri = "${public_protocol}://${public_address}:5000/v2.0/"
$identity_uri = "http://${service_endpoint}:35357/"
####### Disable upstart startup on install #######
if $::operatingsystem == 'Ubuntu' {
tweaks::ubuntu_service_override { 'heat-api-cloudwatch':
package_name => 'heat-api-cloudwatch',
}
tweaks::ubuntu_service_override { 'heat-api-cfn':
package_name => 'heat-api-cfn',
}
tweaks::ubuntu_service_override { 'heat-api':
package_name => 'heat-api',
}
tweaks::ubuntu_service_override { 'heat-engine':
package_name => 'heat-engine',
}
Tweaks::Ubuntu_service_override['heat-api'] -> Service['heat-api']
Tweaks::Ubuntu_service_override['heat-api-cfn'] -> Service['heat-api-cfn']
Tweaks::Ubuntu_service_override['heat-api-cloudwatch'] -> Service['heat-api-cloudwatch']
Tweaks::Ubuntu_service_override['heat-engine'] -> Service['heat-engine']
}
class { 'openstack::heat' :
external_ip => $management_vip,
keystone_auth => pick($heat_hash['keystone_auth'], true),
api_bind_host => $bind_address,
api_cfn_bind_host => $bind_address,
api_cloudwatch_bind_host => $bind_address,
auth_uri => $auth_uri,
identity_uri => $identity_uri,
keystone_user => $keystone_user,
keystone_password => $heat_hash['user_password'],
keystone_tenant => $keystone_tenant,
keystone_ec2_uri => "http://${service_endpoint}:5000/v2.0",
region => $region,
public_ssl => $public_ssl_hash['services'],
rpc_backend => 'rabbit',
amqp_hosts => split(hiera('amqp_hosts',''), ','),
amqp_user => $rabbit_hash['user'],
amqp_password => $rabbit_hash['password'],
sql_connection => $sql_connection,
db_host => $db_host,
db_password => $database_password,
max_retries => $max_retries,
max_pool_size => $max_pool_size,
max_overflow => $max_overflow,
idle_timeout => $idle_timeout,
debug => $debug,
verbose => $verbose,
use_syslog => $use_syslog,
use_stderr => $use_stderr,
syslog_log_facility => $syslog_log_facility_heat,
auth_encryption_key => $heat_hash['auth_encryption_key'],
}
if hiera('heat_ha_engine', true){
if ($deployment_mode == 'ha') or ($deployment_mode == 'ha_compact') {
include ::heat_ha::engine
}
}
#------------------------------
class heat::docker_resource (
$enabled = true,
$package_name = 'heat-docker',
) {
if $enabled {
package { 'heat-docker':
ensure => installed,
name => $package_name,
}
Package['heat-docker'] ~> Service<| title == 'heat-engine' |>
}
}
if $::osfamily == 'RedHat' {
$docker_resource_package_name = 'openstack-heat-docker'
} elsif $::osfamily == 'Debian' {
$docker_resource_package_name = 'heat-docker'
}
class { 'heat::docker_resource' :
package_name => $docker_resource_package_name,
}
$haproxy_stats_url = "http://${service_endpoint}:10000/;csv"
haproxy_backend_status { 'keystone-admin' :
name => 'keystone-2',
count => '200',
step => '6',
url => $haproxy_stats_url,
}
class { 'heat::keystone::domain' :
auth_url => "http://${service_endpoint}:35357/v2.0",
keystone_admin => $keystone_user,
keystone_password => $heat_hash['user_password'],
keystone_tenant => $keystone_tenant,
domain_name => 'heat',
domain_admin => 'heat_admin',
domain_password => $heat_hash['user_password'],
}
Class['heat'] ->
Haproxy_backend_status['keystone-admin'] ->
Class['heat::keystone::domain'] ~>
Service<| title == 'heat-engine' |>
######################
exec { 'wait_for_heat_config' :
command => 'sync && sleep 3',
provider => 'shell',
}
Heat_config <||> -> Exec['wait_for_heat_config'] -> Service['heat-api']
Heat_config <||> -> Exec['wait_for_heat_config'] -> Service['heat-api-cfn']
Heat_config <||> -> Exec['wait_for_heat_config'] -> Service['heat-api-cloudwatch']
Heat_config <||> -> Exec['wait_for_heat_config'] -> Service['heat-engine']
######################
class mysql::server {}
class mysql::config {}
include mysql::server
include mysql::config

View File

@ -1,55 +0,0 @@
id: heat
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
amqp_hosts:
value: null
database_vip:
value: null
debug:
value: null
deployment_mode:
value: null
fqdn:
value: null
heat:
value: null
heat_db_name:
value: null
heat_ha_engine:
value: null
idle_timeout:
value: null
management_vip:
value: null
max_overflow:
value: null
max_pool_size:
value: null
max_retries:
value: null
network_scheme:
value: null
public_ssl:
value: null
puppet_modules:
value: null
rabbit_hash:
value: null
region:
value: null
role:
value: null
service_endpoint:
value: null
syslog_log_facility_heat:
value: null
use_stderr:
value: null
use_syslog:
value: null
verbose:
value: null

View File

@ -1,75 +0,0 @@
notice('MODULAR: hiera.pp')
$deep_merge_package_name = $::osfamily ? {
/RedHat/ => 'rubygem-deep_merge',
/Debian/ => 'ruby-deep-merge',
}
$data_dir = '/etc/hiera'
$data = [
'override/node/%{::fqdn}',
'override/class/%{calling_class}',
'override/module/%{calling_module}',
'override/plugins',
'override/common',
'class/%{calling_class}',
'module/%{calling_module}',
'nodes',
'globals',
'astute'
]
$astute_data_file = '/etc/astute.yaml'
$hiera_main_config = '/etc/hiera.yaml'
$hiera_puppet_config = '/etc/puppet/hiera.yaml'
$hiera_data_file = "${data_dir}/astute.yaml"
File {
owner => 'root',
group => 'root',
mode => '0644',
}
$hiera_config_content = inline_template('
---
:backends:
- yaml
:hierarchy:
<% @data.each do |name| -%>
- <%= name %>
<% end -%>
:yaml:
:datadir: <%= @data_dir %>
:merge_behavior: deeper
:logger: noop
')
file { 'hiera_data_dir' :
ensure => 'directory',
path => $data_dir,
}
file { 'hiera_config' :
ensure => 'present',
path => $hiera_main_config,
content => $hiera_config_content,
}
file { 'hiera_data_astute' :
ensure => 'symlink',
path => $hiera_data_file,
target => $astute_data_file,
}
file { 'hiera_puppet_config' :
ensure => 'symlink',
path => $hiera_puppet_config,
target => $hiera_main_config,
}
# needed to support the 'deeper' merge_behavior setting for hiera
package { 'rubygem-deep_merge':
ensure => present,
name => $deep_merge_package_name,
}

View File

@ -1,11 +0,0 @@
id: hiera
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
puppet_modules:
value: null

View File

@ -1,79 +0,0 @@
notice('MODULAR: horizon.pp')
prepare_network_config(hiera('network_scheme', {}))
$horizon_hash = hiera_hash('horizon', {})
$service_endpoint = hiera('service_endpoint')
$memcache_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('memcache_roles'))
$memcache_address_map = get_node_to_ipaddr_map_by_network_role($memcache_nodes, 'mgmt/memcache')
$bind_address = get_network_role_property('horizon', 'ipaddr')
$neutron_advanced_config = hiera_hash('neutron_advanced_configuration', {})
$public_ssl = hiera('public_ssl')
$ssl_no_verify = $public_ssl['horizon']
if $horizon_hash['secret_key'] {
$secret_key = $horizon_hash['secret_key']
} else {
$secret_key = 'dummy_secret_key'
}
$neutron_dvr = pick($neutron_advanced_config['neutron_dvr'], false)
$keystone_scheme = 'http'
$keystone_host = $service_endpoint
$keystone_port = '5000'
$keystone_api = 'v2.0'
$keystone_url = "${keystone_scheme}://${keystone_host}:${keystone_port}/${keystone_api}"
$neutron_options = {'enable_distributed_router' => $neutron_dvr}
class { 'openstack::horizon':
secret_key => $secret_key,
cache_server_ip => ipsort(values($memcache_address_map)),
package_ensure => hiera('horizon_package_ensure', 'installed'),
bind_address => $bind_address,
cache_server_port => hiera('memcache_server_port', '11211'),
cache_backend => 'django.core.cache.backends.memcached.MemcachedCache',
cache_options => {'SOCKET_TIMEOUT' => 1,'SERVER_RETRIES' => 1,'DEAD_RETRY' => 1},
neutron => hiera('use_neutron'),
keystone_url => $keystone_url,
use_ssl => hiera('horizon_use_ssl', false),
ssl_no_verify => $ssl_no_verify,
verbose => pick($horizon_hash['verbose'], hiera('verbose', true)),
debug => pick($horizon_hash['debug'], hiera('debug')),
use_syslog => hiera('use_syslog', true),
nova_quota => hiera('nova_quota'),
servername => hiera('public_vip'),
neutron_options => $neutron_options,
}
$haproxy_stats_url = "http://${service_endpoint}:10000/;csv"
haproxy_backend_status { 'keystone-admin' :
name => 'keystone-2',
count => '30',
step => '3',
url => $haproxy_stats_url,
}
haproxy_backend_status { 'keystone-public' :
name => 'keystone-1',
count => '30',
step => '3',
url => $haproxy_stats_url,
}
Class['openstack::horizon'] -> Haproxy_backend_status['keystone-admin']
Class['openstack::horizon'] -> Haproxy_backend_status['keystone-public']
# TODO(aschultz): remove this if openstack-dashboard stops installing
# openstack-dashboard-apache
if $::osfamily == 'Debian' {
# LP#1513252 - remove this package if it's installed by the
# openstack-dashboard package installation.
package { 'openstack-dashboard-apache':
ensure => 'absent',
require => Package['openstack-dashboard']
} ~> Service[$::apache::params::service_name]
}
include ::tweaks::apache_wrappers

View File

@ -1,47 +0,0 @@
id: horizon
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
apache_ports:
value: null
debug:
value: null
fqdn:
value: null
horizon:
value: null
horizon_package_ensure:
value: null
horizon_use_ssl:
value: null
memcache_roles:
value: null
memcache_server_port:
value: null
network_metadata:
value: null
network_scheme:
value: null
neutron_advanced_configuration:
value: null
nova_quota:
value: null
public_ssl:
value: null
public_vip:
value: null
puppet_modules:
value: null
role:
value: null
service_endpoint:
value: null
use_neutron:
value: null
use_syslog:
value: null
verbose:
value: null

View File

@ -1,5 +0,0 @@
notice('MODULAR: hosts.pp')
class { "l23network::hosts_file":
nodes => hiera('nodes'),
}

View File

@ -1,13 +0,0 @@
id: hosts
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
nodes:
value: null
puppet_modules:
value: null

View File

@ -1,61 +0,0 @@
notice('MODULAR: ironic/ironic.pp')
$ironic_hash = hiera_hash('ironic', {})
$public_vip = hiera('public_vip')
$management_vip = hiera('management_vip')
$network_metadata = hiera_hash('network_metadata', {})
$database_vip = hiera('database_vip')
$keystone_endpoint = hiera('service_endpoint')
$neutron_endpoint = hiera('neutron_endpoint', $management_vip)
$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292")
$debug = hiera('debug', false)
$verbose = hiera('verbose', true)
$use_syslog = hiera('use_syslog', true)
$syslog_log_facility_ironic = hiera('syslog_log_facility_ironic', 'LOG_USER')
$rabbit_hash = hiera_hash('rabbit_hash', {})
$rabbit_ha_queues = hiera('rabbit_ha_queues')
$amqp_hosts = hiera('amqp_hosts')
$amqp_port = hiera('amqp_port', '5673')
$rabbit_hosts = split($amqp_hosts, ',')
$neutron_config = hiera_hash('quantum_settings')
$db_host = pick($ironic_hash['db_host'], $database_vip)
$db_user = pick($ironic_hash['db_user'], 'ironic')
$db_name = pick($ironic_hash['db_name'], 'ironic')
$db_password = pick($ironic_hash['db_password'], 'ironic')
$database_connection = "mysql://${db_name}:${db_password}@${db_host}/${db_name}?charset=utf8&read_timeout=60"
$ironic_tenant = pick($ironic_hash['tenant'],'services')
$ironic_user = pick($ironic_hash['auth_name'],'ironic')
$ironic_user_password = pick($ironic_hash['user_password'],'ironic')
prepare_network_config(hiera('network_scheme', {}))
$baremetal_vip = $network_metadata['vips']['baremetal']['ipaddr']
class { 'ironic':
verbose => $verbose,
debug => $debug,
rabbit_hosts => $rabbit_hosts,
rabbit_port => $amqp_port,
rabbit_userid => $rabbit_hash['user'],
rabbit_password => $rabbit_hash['password'],
amqp_durable_queues => $rabbit_ha_queues,
use_syslog => $use_syslog,
log_facility => $syslog_log_facility_ironic,
database_connection => $database_connection,
glance_api_servers => $glance_api_servers,
}
class { 'ironic::client': }
class { 'ironic::api':
host_ip => get_network_role_property('ironic/api', 'ipaddr'),
auth_host => $keystone_endpoint,
admin_tenant_name => $ironic_tenant,
admin_user => $ironic_user,
admin_password => $ironic_user_password,
neutron_url => "http://${neutron_endpoint}:9696",
}

View File

@ -1,11 +0,0 @@
id: ironic-api
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
ironic:
value: null
puppet_modules:
value: null

View File

@ -1,98 +0,0 @@
#####################################################################################
### ironic-compute is additional compute role with compute_driver=ironic. ###
### It can't be assigned with nova-compute to the same node. It doesn't include ###
### openstack::compute class it is configured separately. ###
#####################################################################################
notice('MODULAR: ironic/ironic-compute.pp')
$ironic_hash = hiera_hash('ironic', {})
$nova_hash = hiera_hash('nova', {})
$management_vip = hiera('management_vip')
$database_vip = hiera('database_vip')
$service_endpoint = hiera('service_endpoint')
$neutron_endpoint = hiera('neutron_endpoint', $management_vip)
$ironic_endpoint = hiera('ironic_endpoint', $management_vip)
$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292")
$debug = hiera('debug', false)
$verbose = hiera('verbose', true)
$use_syslog = hiera('use_syslog', true)
$syslog_log_facility_ironic = hiera('syslog_log_facility_ironic', 'LOG_LOCAL0')
$syslog_log_facility_nova = hiera('syslog_log_facility_nova', 'LOG_LOCAL6')
$amqp_hosts = hiera('amqp_hosts')
$rabbit_hash = hiera_hash('rabbit_hash')
$nova_report_interval = hiera('nova_report_interval')
$nova_service_down_time = hiera('nova_service_down_time')
$neutron_config = hiera_hash('quantum_settings')
$ironic_tenant = pick($ironic_hash['tenant'],'services')
$ironic_user = pick($ironic_hash['auth_name'],'ironic')
$ironic_user_password = pick($ironic_hash['user_password'],'ironic')
$db_host = pick($nova_hash['db_host'], $database_vip)
$db_user = pick($nova_hash['db_user'], 'nova')
$db_name = pick($nova_hash['db_name'], 'nova')
$db_password = pick($nova_hash['db_password'], 'nova')
$database_connection = "mysql://${db_name}:${db_password}@${db_host}/${db_name}?read_timeout=60"
$memcache_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('memcache_roles'))
$cache_server_ip = ipsort(values(get_node_to_ipaddr_map_by_network_role($memcache_nodes,'mgmt/memcache')))
$memcached_addresses = suffix($cache_server_ip, inline_template(":<%= @cache_server_port %>"))
$notify_on_state_change = 'vm_and_task_state'
class { '::nova':
install_utilities => false,
ensure_package => installed,
database_connection => $database_connection,
rpc_backend => 'nova.openstack.common.rpc.impl_kombu',
#FIXME(bogdando) we have to split amqp_hosts until all modules synced
rabbit_hosts => split($amqp_hosts, ','),
rabbit_userid => $rabbit_hash['user'],
rabbit_password => $rabbit_hash['password'],
image_service => 'nova.image.glance.GlanceImageService',
glance_api_servers => $glance_api_servers,
verbose => $verbose,
debug => $debug,
use_syslog => $use_syslog,
log_facility => $syslog_log_facility_nova,
state_path => $nova_hash['state_path'],
report_interval => $nova_report_interval,
service_down_time => $nova_service_down_time,
notify_on_state_change => $notify_on_state_change,
memcached_servers => $memcached_addresses,
}
class { '::nova::compute':
ensure_package => installed,
enabled => true,
vnc_enabled => false,
force_config_drive => $nova_hash['force_config_drive'],
#NOTE(bogdando) default became true in 4.0.0 puppet-nova (was false)
neutron_enabled => true,
default_availability_zone => $nova_hash['default_availability_zone'],
default_schedule_zone => $nova_hash['default_schedule_zone'],
reserved_host_memory => '0',
}
class { 'nova::compute::ironic':
admin_url => "http://${service_endpoint}:35357/v2.0",
admin_user => $ironic_user,
admin_tenant_name => $ironic_tenant,
admin_passwd => $ironic_user_password,
api_endpoint => "http://${ironic_endpoint}:6385/v1",
}
class { 'nova::network::neutron':
neutron_admin_password => $neutron_config['keystone']['admin_password'],
neutron_url => "http://${neutron_endpoint}:9696",
neutron_admin_auth_url => "http://${service_endpoint}:35357/v2.0",
}
file { '/etc/nova/nova-compute.conf':
ensure => absent,
require => Package['nova-compute'],
} ~> Service['nova-compute']

View File

@ -1,13 +0,0 @@
id: ironic-compute
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,121 +0,0 @@
notice('MODULAR: ironic/ironic-conductor.pp')
$network_scheme = hiera('network_scheme', {})
prepare_network_config($network_scheme)
$baremetal_address = get_network_role_property('ironic/baremetal', 'ipaddr')
$ironic_hash = hiera_hash('ironic', {})
$management_vip = hiera('management_vip')
$network_metadata = hiera_hash('network_metadata', {})
$baremetal_vip = $network_metadata['vips']['baremetal']['ipaddr']
$database_vip = hiera('database_vip')
$service_endpoint = hiera('service_endpoint')
$neutron_endpoint = hiera('neutron_endpoint', $management_vip)
$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292")
$amqp_hosts = hiera('amqp_hosts')
$rabbit_hosts = split($amqp_hosts, ',')
$debug = hiera('debug', false)
$verbose = hiera('verbose', true)
$use_syslog = hiera('use_syslog', true)
$syslog_log_facility_ironic = hiera('syslog_log_facility_ironic', 'LOG_USER')
$rabbit_hash = hiera_hash('rabbit_hash')
$rabbit_ha_queues = hiera('rabbit_ha_queues')
$ironic_tenant = pick($ironic_hash['tenant'],'services')
$ironic_user = pick($ironic_hash['auth_name'],'ironic')
$ironic_user_password = pick($ironic_hash['user_password'],'ironic')
$ironic_swift_tempurl_key = pick($ironic_hash['swift_tempurl_key'],'ironic')
$db_host = pick($ironic_hash['db_host'], $database_vip)
$db_user = pick($ironic_hash['db_user'], 'ironic')
$db_name = pick($ironic_hash['db_name'], 'ironic')
$db_password = pick($ironic_hash['db_password'], 'ironic')
$database_connection = "mysql://${db_name}:${db_password}@${db_host}/${db_name}?charset=utf8&read_timeout=60"
$tftp_root = '/var/lib/ironic/tftpboot'
package { 'ironic-fa-deploy':
ensure => 'present',
}
class { '::ironic':
verbose => $verbose,
debug => $debug,
enabled_drivers => ['fuel_ssh', 'fuel_ipmitool', 'fake'],
rabbit_hosts => $rabbit_hosts,
rabbit_userid => $rabbit_hash['user'],
rabbit_password => $rabbit_hash['password'],
amqp_durable_queues => $rabbit_ha_queues,
use_syslog => $use_syslog,
log_facility => $syslog_log_facility_ironic,
database_connection => $database_connection,
glance_api_servers => $glance_api_servers,
}
class { '::ironic::client': }
class { '::ironic::conductor': }
class { '::ironic::drivers::pxe':
tftp_server => $baremetal_address,
tftp_root => $tftp_root,
tftp_master_path => "${tftp_root}/master_images",
}
ironic_config {
'neutron/url': value => "http://${neutron_endpoint}:9696";
'keystone_authtoken/auth_uri': value => "http://${service_endpoint}:5000/";
'keystone_authtoken/auth_host': value => $service_endpoint;
'keystone_authtoken/admin_tenant_name': value => $ironic_tenant;
'keystone_authtoken/admin_user': value => $ironic_user;
'keystone_authtoken/admin_password': value => $ironic_user_password, secret => true;
'glance/swift_temp_url_key': value => $ironic_swift_tempurl_key;
'glance/swift_endpoint_url': value => "http://${baremetal_vip}:8080";
'conductor/api_url': value => "http://${baremetal_vip}:6385";
}
file { $tftp_root:
ensure => directory,
owner => 'ironic',
group => 'ironic',
mode => '0755',
require => Class['ironic'],
}
file { "${tftp_root}/pxelinux.0":
ensure => present,
source => '/usr/lib/syslinux/pxelinux.0',
require => Package['syslinux'],
}
file { "${tftp_root}/map-file":
content => "r ^([^/]) ${tftp_root}/\\1",
}
class { '::tftp':
username => 'ironic',
directory => $tftp_root,
options => "--map-file ${tftp_root}/map-file",
inetd => false,
require => File["${tftp_root}/map-file"],
}
package { 'syslinux':
ensure => 'present',
}
package { 'ipmitool':
ensure => 'present',
before => Class['::ironic::conductor'],
}
file { "/etc/ironic/fuel_key":
ensure => present,
source => '/var/lib/astute/ironic/ironic',
owner => 'ironic',
group => 'ironic',
mode => '0600',
require => Class['ironic'],
}

View File

@ -1,13 +0,0 @@
id: ironic-conductor
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
fqdn:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,51 +0,0 @@
notice('MODULAR: ironic/db.pp')
$ironic_hash = hiera_hash('ironic', {})
$mysql_hash = hiera_hash('mysql', {})
$database_vip = hiera('database_vip')
$mysql_root_user = pick($mysql_hash['root_user'], 'root')
$mysql_db_create = pick($mysql_hash['db_create'], true)
$mysql_root_password = $mysql_hash['root_password']
$db_user = pick($ironic_hash['db_user'], 'ironic')
$db_name = pick($ironic_hash['db_name'], 'ironic')
$db_password = pick($ironic_hash['db_password'], $mysql_root_password)
$db_host = pick($ironic_hash['db_host'], $database_vip)
$db_create = pick($ironic_hash['db_create'], $mysql_db_create)
$db_root_user = pick($ironic_hash['root_user'], $mysql_root_user)
$db_root_password = pick($ironic_hash['root_password'], $mysql_root_password)
$allowed_hosts = [ hiera('node_name'), 'localhost', '127.0.0.1', '%' ]
validate_string($mysql_root_user)
validate_string($database_vip)
if $db_create {
class { 'galera::client':
custom_setup_class => hiera('mysql_custom_setup_class', 'galera'),
}
class { 'ironic::db::mysql':
user => $db_user,
password => $db_password,
dbname => $db_name,
allowed_hosts => $allowed_hosts,
}
class { 'osnailyfacter::mysql_access':
db_host => $db_host,
db_user => $db_root_user,
db_password => $db_root_password,
}
Class['galera::client'] ->
Class['osnailyfacter::mysql_access'] ->
Class['ironic::db::mysql']
}
class mysql::config {}
include mysql::config
class mysql::server {}
include mysql::server

View File

@ -1,23 +0,0 @@
id: ironic-db
handler: puppetv2
version: '8.0'
actions:
run: run.pp
update: run.pp
input:
database_vip:
value: null
fqdn:
value: null
ironic:
value: null
mysql:
value: null
mysql_custom_setup_class:
value: null
node_name:
value: null
puppet_modules:
value: null
role:
value: null

View File

@ -1,39 +0,0 @@
notice('MODULAR: ironic/keystone.pp')
$ironic_hash = hiera_hash('ironic', {})
$public_vip = hiera('public_vip')
$management_vip = hiera('management_vip')
$public_ssl_hash = hiera('public_ssl')
$ironic_tenant = pick($ironic_hash['tenant'],'services')
$ironic_user = pick($ironic_hash['auth_name'],'ironic')
$ironic_user_password = pick($ironic_hash['user_password'],'ironic')
$configure_endpoint = pick($ironic_hash['configure_endpoint'], true)
$configure_user = pick($ironic_hash['configure_user'], true)
$configure_user_role = pick($ironic_hash['configure_user_role'], true)
$service_name = pick($ironic_hash['service_name'], 'ironic')
$public_address = $public_ssl_hash['services'] ? {
true => $public_ssl_hash['hostname'],
default => $public_vip,
}
$public_protocol = $public_ssl_hash['services'] ? {
true => 'https',
default => 'http',
}
$region = hiera('region', 'RegionOne')
$public_url = "${public_protocol}://${public_address}:6385"
$admin_url = "http://${management_vip}:6385"
$internal_url = "http://${management_vip}:6385"
class { 'ironic::keystone::auth':
password => $ironic_user_password,
region => $region,
public_url => $public_url,
internal_url => $internal_url,
admin_url => $admin_url,
configure_endpoint => $configure_endpoint,
configure_user => $configure_user,
configure_user_role => $configure_user_role,
service_name => $service_name,
}

Some files were not shown because too many files have changed in this diff Show More