Save generated resources with inputs
This commit is contained in:
parent
88ddec81d0
commit
1d3409e381
13
f2s/resources/apache/actions/run.pp
Normal file
13
f2s/resources/apache/actions/run.pp
Normal file
@ -0,0 +1,13 @@
|
||||
notice('MODULAR: apache.pp')
|
||||
|
||||
# adjustments to defaults for LP#1485644 for scale
|
||||
sysctl::value { 'net.core.somaxconn': value => '4096' }
|
||||
sysctl::value { 'net.ipv4.tcp_max_syn_backlog': value => '8192' }
|
||||
|
||||
class { 'osnailyfacter::apache':
|
||||
purge_configs => true,
|
||||
listen_ports => hiera_array('apache_ports', ['80', '8888']),
|
||||
}
|
||||
|
||||
include ::osnailyfacter::apache_mpm
|
||||
|
12
f2s/resources/apache/meta.yaml
Normal file
12
f2s/resources/apache/meta.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
id: apache
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
apache_ports:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
16
f2s/resources/api-proxy/actions/run.pp
Normal file
16
f2s/resources/api-proxy/actions/run.pp
Normal file
@ -0,0 +1,16 @@
|
||||
notice('MODULAR: api-proxy.pp')
|
||||
|
||||
$max_header_size = hiera('max_header_size', '81900')
|
||||
|
||||
# Apache and listen ports
|
||||
class { 'osnailyfacter::apache':
|
||||
listen_ports => hiera_array('apache_ports', ['80', '8888']),
|
||||
}
|
||||
|
||||
# API proxy vhost
|
||||
class {'osnailyfacter::apache_api_proxy':
|
||||
master_ip => hiera('master_ip'),
|
||||
max_header_size => $max_header_size,
|
||||
}
|
||||
|
||||
include ::tweaks::apache_wrappers
|
16
f2s/resources/api-proxy/meta.yaml
Normal file
16
f2s/resources/api-proxy/meta.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
id: api-proxy
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
apache_ports:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
master_ip:
|
||||
value: null
|
||||
max_header_size:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
59
f2s/resources/ceilometer-compute/actions/run.pp
Normal file
59
f2s/resources/ceilometer-compute/actions/run.pp
Normal file
@ -0,0 +1,59 @@
|
||||
notice('MODULAR: ceilometer/compute.pp')
|
||||
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$use_stderr = hiera('use_stderr', false)
|
||||
$syslog_log_facility = hiera('syslog_log_facility_ceilometer', 'LOG_LOCAL0')
|
||||
$rabbit_hash = hiera_hash('rabbit_hash')
|
||||
$management_vip = hiera('management_vip')
|
||||
$service_endpoint = hiera('service_endpoint')
|
||||
|
||||
$default_ceilometer_hash = {
|
||||
'enabled' => false,
|
||||
'db_password' => 'ceilometer',
|
||||
'user_password' => 'ceilometer',
|
||||
'metering_secret' => 'ceilometer',
|
||||
'http_timeout' => '600',
|
||||
'event_time_to_live' => '604800',
|
||||
'metering_time_to_live' => '604800',
|
||||
}
|
||||
|
||||
$region = hiera('region', 'RegionOne')
|
||||
$ceilometer_hash = hiera_hash('ceilometer_hash', $default_ceilometer_hash)
|
||||
$ceilometer_region = pick($ceilometer_hash['region'], $region)
|
||||
$ceilometer_enabled = $ceilometer_hash['enabled']
|
||||
$amqp_password = $rabbit_hash['password']
|
||||
$amqp_user = $rabbit_hash['user']
|
||||
$ceilometer_user_password = $ceilometer_hash['user_password']
|
||||
$ceilometer_metering_secret = $ceilometer_hash['metering_secret']
|
||||
$verbose = pick($ceilometer_hash['verbose'], hiera('verbose', true))
|
||||
$debug = pick($ceilometer_hash['debug'], hiera('debug', false))
|
||||
|
||||
if ($ceilometer_enabled) {
|
||||
class { 'openstack::ceilometer':
|
||||
verbose => $verbose,
|
||||
debug => $debug,
|
||||
use_syslog => $use_syslog,
|
||||
use_stderr => $use_stderr,
|
||||
syslog_log_facility => $syslog_log_facility,
|
||||
amqp_hosts => hiera('amqp_hosts',''),
|
||||
amqp_user => $amqp_user,
|
||||
amqp_password => $amqp_password,
|
||||
keystone_user => $ceilometer_hash['user'],
|
||||
keystone_tenant => $ceilometer_hash['tenant'],
|
||||
keystone_region => $ceilometer_region,
|
||||
keystone_host => $service_endpoint,
|
||||
keystone_password => $ceilometer_user_password,
|
||||
on_compute => true,
|
||||
metering_secret => $ceilometer_metering_secret,
|
||||
event_time_to_live => $ceilometer_hash['event_time_to_live'],
|
||||
metering_time_to_live => $ceilometer_hash['metering_time_to_live'],
|
||||
http_timeout => $ceilometer_hash['http_timeout'],
|
||||
}
|
||||
|
||||
# We need to restart nova-compute service in orderto apply new settings
|
||||
include ::nova::params
|
||||
service { 'nova-compute':
|
||||
ensure => 'running',
|
||||
name => $::nova::params::compute_service_name,
|
||||
}
|
||||
}
|
10
f2s/resources/ceilometer-compute/meta.yaml
Normal file
10
f2s/resources/ceilometer-compute/meta.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
id: ceilometer-compute
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
111
f2s/resources/ceilometer-controller/actions/run.pp
Normal file
111
f2s/resources/ceilometer-controller/actions/run.pp
Normal file
@ -0,0 +1,111 @@
|
||||
notice('MODULAR: ceilometer/controller.pp')
|
||||
|
||||
$default_ceilometer_hash = {
|
||||
'enabled' => false,
|
||||
'db_password' => 'ceilometer',
|
||||
'user_password' => 'ceilometer',
|
||||
'metering_secret' => 'ceilometer',
|
||||
'http_timeout' => '600',
|
||||
'event_time_to_live' => '604800',
|
||||
'metering_time_to_live' => '604800',
|
||||
}
|
||||
|
||||
$ceilometer_hash = hiera_hash('ceilometer', $default_ceilometer_hash)
|
||||
$verbose = pick($ceilometer_hash['verbose'], hiera('verbose', true))
|
||||
$debug = pick($ceilometer_hash['debug'], hiera('debug', false))
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$use_stderr = hiera('use_stderr', false)
|
||||
$syslog_log_facility = hiera('syslog_log_facility_ceilometer', 'LOG_LOCAL0')
|
||||
$nodes_hash = hiera('nodes')
|
||||
$storage_hash = hiera('storage')
|
||||
$rabbit_hash = hiera_hash('rabbit_hash')
|
||||
$management_vip = hiera('management_vip')
|
||||
$region = hiera('region', 'RegionOne')
|
||||
$ceilometer_region = pick($ceilometer_hash['region'], $region)
|
||||
$mongo_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('mongo_roles'))
|
||||
$mongo_address_map = get_node_to_ipaddr_map_by_network_role($mongo_nodes, 'mongo/db')
|
||||
|
||||
$default_mongo_hash = {
|
||||
'enabled' => false,
|
||||
}
|
||||
|
||||
$mongo_hash = hiera_hash('mongo', $default_mongo_hash)
|
||||
|
||||
if $mongo_hash['enabled'] and $ceilometer_hash['enabled'] {
|
||||
$exteranl_mongo_hash = hiera_hash('external_mongo')
|
||||
$ceilometer_db_user = $exteranl_mongo_hash['mongo_user']
|
||||
$ceilometer_db_password = $exteranl_mongo_hash['mongo_password']
|
||||
$ceilometer_db_dbname = $exteranl_mongo_hash['mongo_db_name']
|
||||
$external_mongo = true
|
||||
} else {
|
||||
$ceilometer_db_user = 'ceilometer'
|
||||
$ceilometer_db_password = $ceilometer_hash['db_password']
|
||||
$ceilometer_db_dbname = 'ceilometer'
|
||||
$external_mongo = false
|
||||
$exteranl_mongo_hash = {}
|
||||
}
|
||||
|
||||
$ceilometer_enabled = $ceilometer_hash['enabled']
|
||||
$ceilometer_user_password = $ceilometer_hash['user_password']
|
||||
$ceilometer_metering_secret = $ceilometer_hash['metering_secret']
|
||||
$ceilometer_db_type = 'mongodb'
|
||||
$swift_rados_backend = $storage_hash['objects_ceph']
|
||||
$amqp_password = $rabbit_hash['password']
|
||||
$amqp_user = $rabbit_hash['user']
|
||||
$rabbit_ha_queues = true
|
||||
$service_endpoint = hiera('service_endpoint')
|
||||
$ha_mode = pick($ceilometer_hash['ha_mode'], true)
|
||||
|
||||
prepare_network_config(hiera('network_scheme', {}))
|
||||
$api_bind_address = get_network_role_property('ceilometer/api', 'ipaddr')
|
||||
|
||||
if $ceilometer_hash['enabled'] {
|
||||
if $external_mongo {
|
||||
$mongo_hosts = $exteranl_mongo_hash['hosts_ip']
|
||||
if $exteranl_mongo_hash['mongo_replset'] {
|
||||
$mongo_replicaset = $exteranl_mongo_hash['mongo_replset']
|
||||
} else {
|
||||
$mongo_replicaset = undef
|
||||
}
|
||||
} else {
|
||||
$mongo_hosts = join(values($mongo_address_map), ',')
|
||||
# MongoDB is alsways configured with replica set
|
||||
$mongo_replicaset = 'ceilometer'
|
||||
}
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
|
||||
if ($ceilometer_enabled) {
|
||||
class { 'openstack::ceilometer':
|
||||
verbose => $verbose,
|
||||
debug => $debug,
|
||||
use_syslog => $use_syslog,
|
||||
use_stderr => $use_stderr,
|
||||
syslog_log_facility => $syslog_log_facility,
|
||||
db_type => $ceilometer_db_type,
|
||||
db_host => $mongo_hosts,
|
||||
db_user => $ceilometer_db_user,
|
||||
db_password => $ceilometer_db_password,
|
||||
db_dbname => $ceilometer_db_dbname,
|
||||
swift_rados_backend => $swift_rados_backend,
|
||||
metering_secret => $ceilometer_metering_secret,
|
||||
amqp_hosts => hiera('amqp_hosts',''),
|
||||
amqp_user => $amqp_user,
|
||||
amqp_password => $amqp_password,
|
||||
rabbit_ha_queues => $rabbit_ha_queues,
|
||||
keystone_host => $service_endpoint,
|
||||
keystone_password => $ceilometer_user_password,
|
||||
keystone_user => $ceilometer_hash['user'],
|
||||
keystone_tenant => $ceilometer_hash['tenant'],
|
||||
keystone_region => $ceilometer_region,
|
||||
host => $api_bind_address,
|
||||
ha_mode => $ha_mode,
|
||||
on_controller => true,
|
||||
ext_mongo => $external_mongo,
|
||||
mongo_replicaset => $mongo_replicaset,
|
||||
event_time_to_live => $ceilometer_hash['event_time_to_live'],
|
||||
metering_time_to_live => $ceilometer_hash['metering_time_to_live'],
|
||||
http_timeout => $ceilometer_hash['http_timeout'],
|
||||
}
|
||||
}
|
44
f2s/resources/ceilometer-controller/meta.yaml
Normal file
44
f2s/resources/ceilometer-controller/meta.yaml
Normal file
@ -0,0 +1,44 @@
|
||||
id: ceilometer-controller
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
ceilometer:
|
||||
value: null
|
||||
debug:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
management_vip:
|
||||
value: null
|
||||
mongo:
|
||||
value: null
|
||||
mongo_roles:
|
||||
value: null
|
||||
network_metadata:
|
||||
value: null
|
||||
network_scheme:
|
||||
value: null
|
||||
nodes:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
rabbit:
|
||||
value: null
|
||||
rabbit_hash:
|
||||
value: null
|
||||
region:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
||||
service_endpoint:
|
||||
value: null
|
||||
storage:
|
||||
value: null
|
||||
syslog_log_facility_ceilometer:
|
||||
value: null
|
||||
use_stderr:
|
||||
value: null
|
||||
use_syslog:
|
||||
value: null
|
||||
verbose:
|
||||
value: null
|
41
f2s/resources/ceilometer-keystone/actions/run.pp
Normal file
41
f2s/resources/ceilometer-keystone/actions/run.pp
Normal file
@ -0,0 +1,41 @@
|
||||
notice('MODULAR: ceilometer/keystone.pp')
|
||||
|
||||
$ceilometer_hash = hiera_hash('ceilometer', {})
|
||||
$public_vip = hiera('public_vip')
|
||||
$public_ssl_hash = hiera('public_ssl')
|
||||
$public_address = $public_ssl_hash['services'] ? {
|
||||
true => $public_ssl_hash['hostname'],
|
||||
default => $public_vip,
|
||||
}
|
||||
$public_protocol = $public_ssl_hash['services'] ? {
|
||||
true => 'https',
|
||||
default => 'http',
|
||||
}
|
||||
$admin_address = hiera('management_vip')
|
||||
$region = pick($ceilometer_hash['region'], hiera('region', 'RegionOne'))
|
||||
$password = $ceilometer_hash['user_password']
|
||||
$auth_name = pick($ceilometer_hash['auth_name'], 'ceilometer')
|
||||
$configure_endpoint = pick($ceilometer_hash['configure_endpoint'], true)
|
||||
$configure_user = pick($ceilometer_hash['configure_user'], true)
|
||||
$configure_user_role = pick($ceilometer_hash['configure_user_role'], true)
|
||||
$service_name = pick($ceilometer_hash['service_name'], 'ceilometer')
|
||||
$tenant = pick($ceilometer_hash['tenant'], 'services')
|
||||
|
||||
validate_string($public_address)
|
||||
validate_string($password)
|
||||
|
||||
$public_url = "${public_protocol}://${public_address}:8777"
|
||||
$admin_url = "http://${admin_address}:8777"
|
||||
|
||||
class { '::ceilometer::keystone::auth':
|
||||
password => $password,
|
||||
auth_name => $auth_name,
|
||||
configure_endpoint => $configure_endpoint,
|
||||
configure_user => $configure_user,
|
||||
configure_user_role => $configure_user_role,
|
||||
service_name => $service_name,
|
||||
public_url => $public_url,
|
||||
internal_url => $admin_url,
|
||||
admin_url => $admin_url,
|
||||
region => $region,
|
||||
}
|
20
f2s/resources/ceilometer-keystone/meta.yaml
Normal file
20
f2s/resources/ceilometer-keystone/meta.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
id: ceilometer-keystone
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
ceilometer:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
management_vip:
|
||||
value: null
|
||||
public_ssl:
|
||||
value: null
|
||||
public_vip:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
region:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
20
f2s/resources/ceilometer-radosgw-user/actions/run.pp
Normal file
20
f2s/resources/ceilometer-radosgw-user/actions/run.pp
Normal file
@ -0,0 +1,20 @@
|
||||
notice('MODULAR: ceilometer/radosgw_user.pp')
|
||||
|
||||
$default_ceilometer_hash = {
|
||||
'enabled' => false,
|
||||
}
|
||||
|
||||
$ceilometer_hash = hiera_hash('ceilometer', $default_ceilometer_hash)
|
||||
|
||||
if $ceilometer_hash['enabled'] {
|
||||
include ceilometer::params
|
||||
|
||||
ceilometer_radosgw_user { 'ceilometer':
|
||||
caps => {'buckets' => 'read', 'usage' => 'read'},
|
||||
} ~>
|
||||
service { $::ceilometer::params::agent_central_service_name:
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
provider => 'pacemaker',
|
||||
}
|
||||
}
|
14
f2s/resources/ceilometer-radosgw-user/meta.yaml
Normal file
14
f2s/resources/ceilometer-radosgw-user/meta.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
id: ceilometer-radosgw-user
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
ceilometer:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
||||
storage:
|
||||
value: null
|
97
f2s/resources/ceph-compute/actions/run.pp
Normal file
97
f2s/resources/ceph-compute/actions/run.pp
Normal file
@ -0,0 +1,97 @@
|
||||
notice('MODULAR: ceph/ceph_compute.pp')
|
||||
|
||||
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
|
||||
$storage_hash = hiera_hash('storage_hash', {})
|
||||
$use_neutron = hiera('use_neutron')
|
||||
$public_vip = hiera('public_vip')
|
||||
$management_vip = hiera('management_vip')
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0')
|
||||
$keystone_hash = hiera_hash('keystone_hash', {})
|
||||
# Cinder settings
|
||||
$cinder_pool = 'volumes'
|
||||
# Glance settings
|
||||
$glance_pool = 'images'
|
||||
#Nova Compute settings
|
||||
$compute_user = 'compute'
|
||||
$compute_pool = 'compute'
|
||||
|
||||
|
||||
if ($storage_hash['images_ceph']) {
|
||||
$glance_backend = 'ceph'
|
||||
} elsif ($storage_hash['images_vcenter']) {
|
||||
$glance_backend = 'vmware'
|
||||
} else {
|
||||
$glance_backend = 'swift'
|
||||
}
|
||||
|
||||
if ($storage_hash['volumes_ceph'] or
|
||||
$storage_hash['images_ceph'] or
|
||||
$storage_hash['objects_ceph'] or
|
||||
$storage_hash['ephemeral_ceph']
|
||||
) {
|
||||
$use_ceph = true
|
||||
} else {
|
||||
$use_ceph = false
|
||||
}
|
||||
|
||||
if $use_ceph {
|
||||
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
|
||||
$primary_mons = keys($ceph_primary_monitor_node)
|
||||
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
|
||||
|
||||
prepare_network_config(hiera_hash('network_scheme'))
|
||||
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
|
||||
$ceph_public_network = get_network_role_property('ceph/public', 'network')
|
||||
|
||||
class {'ceph':
|
||||
primary_mon => $primary_mon,
|
||||
mon_hosts => keys($mon_address_map),
|
||||
mon_ip_addresses => values($mon_address_map),
|
||||
cluster_node_address => $public_vip,
|
||||
osd_pool_default_size => $storage_hash['osd_pool_size'],
|
||||
osd_pool_default_pg_num => $storage_hash['pg_num'],
|
||||
osd_pool_default_pgp_num => $storage_hash['pg_num'],
|
||||
use_rgw => false,
|
||||
glance_backend => $glance_backend,
|
||||
rgw_pub_ip => $public_vip,
|
||||
rgw_adm_ip => $management_vip,
|
||||
rgw_int_ip => $management_vip,
|
||||
cluster_network => $ceph_cluster_network,
|
||||
public_network => $ceph_public_network,
|
||||
use_syslog => $use_syslog,
|
||||
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
|
||||
syslog_log_facility => $syslog_log_facility_ceph,
|
||||
rgw_keystone_admin_token => $keystone_hash['admin_token'],
|
||||
ephemeral_ceph => $storage_hash['ephemeral_ceph']
|
||||
}
|
||||
|
||||
|
||||
service { $::ceph::params::service_nova_compute :}
|
||||
|
||||
ceph::pool {$compute_pool:
|
||||
user => $compute_user,
|
||||
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}, allow rwx pool=${compute_pool}'",
|
||||
keyring_owner => 'nova',
|
||||
pg_num => $storage_hash['pg_num'],
|
||||
pgp_num => $storage_hash['pg_num'],
|
||||
}
|
||||
|
||||
include ceph::nova_compute
|
||||
|
||||
if ($storage_hash['ephemeral_ceph']) {
|
||||
include ceph::ephemeral
|
||||
Class['ceph::conf'] -> Class['ceph::ephemeral'] ~>
|
||||
Service[$::ceph::params::service_nova_compute]
|
||||
}
|
||||
|
||||
Class['ceph::conf'] ->
|
||||
Ceph::Pool[$compute_pool] ->
|
||||
Class['ceph::nova_compute'] ~>
|
||||
Service[$::ceph::params::service_nova_compute]
|
||||
|
||||
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
|
||||
cwd => '/root',
|
||||
}
|
||||
|
||||
}
|
10
f2s/resources/ceph-compute/meta.yaml
Normal file
10
f2s/resources/ceph-compute/meta.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
id: ceph-compute
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
95
f2s/resources/ceph-mon/actions/run.pp
Normal file
95
f2s/resources/ceph-mon/actions/run.pp
Normal file
@ -0,0 +1,95 @@
|
||||
notice('MODULAR: ceph/mon.pp')
|
||||
|
||||
$storage_hash = hiera('storage', {})
|
||||
$use_neutron = hiera('use_neutron')
|
||||
$public_vip = hiera('public_vip')
|
||||
$management_vip = hiera('management_vip')
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0')
|
||||
$keystone_hash = hiera('keystone', {})
|
||||
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
|
||||
|
||||
if ($storage_hash['images_ceph']) {
|
||||
$glance_backend = 'ceph'
|
||||
} elsif ($storage_hash['images_vcenter']) {
|
||||
$glance_backend = 'vmware'
|
||||
} else {
|
||||
$glance_backend = 'swift'
|
||||
}
|
||||
|
||||
if ($storage_hash['volumes_ceph'] or
|
||||
$storage_hash['images_ceph'] or
|
||||
$storage_hash['objects_ceph'] or
|
||||
$storage_hash['ephemeral_ceph']
|
||||
) {
|
||||
$use_ceph = true
|
||||
} else {
|
||||
$use_ceph = false
|
||||
}
|
||||
|
||||
if $use_ceph {
|
||||
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
|
||||
$primary_mons = keys($ceph_primary_monitor_node)
|
||||
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
|
||||
|
||||
prepare_network_config(hiera_hash('network_scheme'))
|
||||
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
|
||||
$ceph_public_network = get_network_role_property('ceph/public', 'network')
|
||||
$mon_addr = get_network_role_property('ceph/public', 'ipaddr')
|
||||
|
||||
class {'ceph':
|
||||
primary_mon => $primary_mon,
|
||||
mon_hosts => keys($mon_address_map),
|
||||
mon_ip_addresses => values($mon_address_map),
|
||||
mon_addr => $mon_addr,
|
||||
cluster_node_address => $public_vip,
|
||||
osd_pool_default_size => $storage_hash['osd_pool_size'],
|
||||
osd_pool_default_pg_num => $storage_hash['pg_num'],
|
||||
osd_pool_default_pgp_num => $storage_hash['pg_num'],
|
||||
use_rgw => false,
|
||||
glance_backend => $glance_backend,
|
||||
rgw_pub_ip => $public_vip,
|
||||
rgw_adm_ip => $management_vip,
|
||||
rgw_int_ip => $management_vip,
|
||||
cluster_network => $ceph_cluster_network,
|
||||
public_network => $ceph_public_network,
|
||||
use_syslog => $use_syslog,
|
||||
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
|
||||
syslog_log_facility => $syslog_log_facility_ceph,
|
||||
rgw_keystone_admin_token => $keystone_hash['admin_token'],
|
||||
ephemeral_ceph => $storage_hash['ephemeral_ceph']
|
||||
}
|
||||
|
||||
if ($storage_hash['volumes_ceph']) {
|
||||
include ::cinder::params
|
||||
service { 'cinder-volume':
|
||||
ensure => 'running',
|
||||
name => $::cinder::params::volume_service,
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
}
|
||||
|
||||
service { 'cinder-backup':
|
||||
ensure => 'running',
|
||||
name => $::cinder::params::backup_service,
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
}
|
||||
|
||||
Class['ceph'] ~> Service['cinder-volume']
|
||||
Class['ceph'] ~> Service['cinder-backup']
|
||||
}
|
||||
|
||||
if ($storage_hash['images_ceph']) {
|
||||
include ::glance::params
|
||||
service { 'glance-api':
|
||||
ensure => 'running',
|
||||
name => $::glance::params::api_service_name,
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
}
|
||||
|
||||
Class['ceph'] ~> Service['glance-api']
|
||||
}
|
||||
|
||||
}
|
32
f2s/resources/ceph-mon/meta.yaml
Normal file
32
f2s/resources/ceph-mon/meta.yaml
Normal file
@ -0,0 +1,32 @@
|
||||
id: ceph-mon
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
ceph_monitor_nodes:
|
||||
value: null
|
||||
ceph_primary_monitor_node:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
keystone:
|
||||
value: null
|
||||
management_vip:
|
||||
value: null
|
||||
network_scheme:
|
||||
value: null
|
||||
public_vip:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
||||
storage:
|
||||
value: null
|
||||
syslog_log_facility_ceph:
|
||||
value: null
|
||||
syslog_log_level_ceph:
|
||||
value: null
|
||||
use_neutron:
|
||||
value: null
|
||||
use_syslog:
|
||||
value: null
|
103
f2s/resources/ceph-radosgw/actions/run.pp
Normal file
103
f2s/resources/ceph-radosgw/actions/run.pp
Normal file
@ -0,0 +1,103 @@
|
||||
notice('MODULAR: ceph/radosgw.pp')
|
||||
|
||||
$storage_hash = hiera('storage', {})
|
||||
$use_neutron = hiera('use_neutron')
|
||||
$public_vip = hiera('public_vip')
|
||||
$keystone_hash = hiera('keystone', {})
|
||||
$management_vip = hiera('management_vip')
|
||||
$service_endpoint = hiera('service_endpoint')
|
||||
$public_ssl_hash = hiera('public_ssl')
|
||||
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
|
||||
|
||||
if ($storage_hash['volumes_ceph'] or
|
||||
$storage_hash['images_ceph'] or
|
||||
$storage_hash['objects_ceph']
|
||||
) {
|
||||
$use_ceph = true
|
||||
} else {
|
||||
$use_ceph = false
|
||||
}
|
||||
|
||||
if $use_ceph and $storage_hash['objects_ceph'] {
|
||||
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
|
||||
$primary_mons = keys($ceph_primary_monitor_node)
|
||||
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
|
||||
|
||||
prepare_network_config(hiera_hash('network_scheme'))
|
||||
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
|
||||
$ceph_public_network = get_network_role_property('ceph/public', 'network')
|
||||
$rgw_ip_address = get_network_role_property('ceph/radosgw', 'ipaddr')
|
||||
|
||||
# Apache and listen ports
|
||||
class { 'osnailyfacter::apache':
|
||||
listen_ports => hiera_array('apache_ports', ['80', '8888']),
|
||||
}
|
||||
if ($::osfamily == 'Debian'){
|
||||
apache::mod {'rewrite': }
|
||||
apache::mod {'fastcgi': }
|
||||
}
|
||||
include ::tweaks::apache_wrappers
|
||||
include ceph::params
|
||||
|
||||
$haproxy_stats_url = "http://${service_endpoint}:10000/;csv"
|
||||
|
||||
haproxy_backend_status { 'keystone-admin' :
|
||||
name => 'keystone-2',
|
||||
count => '200',
|
||||
step => '6',
|
||||
url => $haproxy_stats_url,
|
||||
}
|
||||
|
||||
haproxy_backend_status { 'keystone-public' :
|
||||
name => 'keystone-1',
|
||||
count => '200',
|
||||
step => '6',
|
||||
url => $haproxy_stats_url,
|
||||
}
|
||||
|
||||
Haproxy_backend_status['keystone-admin'] -> Class ['ceph::keystone']
|
||||
Haproxy_backend_status['keystone-public'] -> Class ['ceph::keystone']
|
||||
|
||||
class { 'ceph::radosgw':
|
||||
# SSL
|
||||
use_ssl => false,
|
||||
public_ssl => $public_ssl_hash['services'],
|
||||
|
||||
# Ceph
|
||||
primary_mon => $primary_mon,
|
||||
pub_ip => $public_vip,
|
||||
adm_ip => $management_vip,
|
||||
int_ip => $management_vip,
|
||||
|
||||
# RadosGW settings
|
||||
rgw_host => $::hostname,
|
||||
rgw_ip => $rgw_ip_address,
|
||||
rgw_port => '6780',
|
||||
swift_endpoint_port => '8080',
|
||||
rgw_keyring_path => '/etc/ceph/keyring.radosgw.gateway',
|
||||
rgw_socket_path => '/tmp/radosgw.sock',
|
||||
rgw_log_file => '/var/log/ceph/radosgw.log',
|
||||
rgw_data => '/var/lib/ceph/radosgw',
|
||||
rgw_dns_name => "*.${::domain}",
|
||||
rgw_print_continue => true,
|
||||
|
||||
#rgw Keystone settings
|
||||
rgw_use_pki => false,
|
||||
rgw_use_keystone => true,
|
||||
rgw_keystone_url => "${service_endpoint}:35357",
|
||||
rgw_keystone_admin_token => $keystone_hash['admin_token'],
|
||||
rgw_keystone_token_cache_size => '10',
|
||||
rgw_keystone_accepted_roles => '_member_, Member, admin, swiftoperator',
|
||||
rgw_keystone_revocation_interval => '1000000',
|
||||
rgw_nss_db_path => '/etc/ceph/nss',
|
||||
|
||||
#rgw Log settings
|
||||
use_syslog => hiera('use_syslog', true),
|
||||
syslog_facility => hiera('syslog_log_facility_ceph', 'LOG_LOCAL0'),
|
||||
syslog_level => hiera('syslog_log_level_ceph', 'info'),
|
||||
}
|
||||
|
||||
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
|
||||
cwd => '/root',
|
||||
}
|
||||
}
|
26
f2s/resources/ceph-radosgw/meta.yaml
Normal file
26
f2s/resources/ceph-radosgw/meta.yaml
Normal file
@ -0,0 +1,26 @@
|
||||
id: ceph-radosgw
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
ceph_monitor_nodes:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
keystone:
|
||||
value: null
|
||||
management_vip:
|
||||
value: null
|
||||
public_ssl:
|
||||
value: null
|
||||
public_vip:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
||||
service_endpoint:
|
||||
value: null
|
||||
storage:
|
||||
value: null
|
||||
use_neutron:
|
||||
value: null
|
80
f2s/resources/ceph_create_pools/actions/run.pp
Normal file
80
f2s/resources/ceph_create_pools/actions/run.pp
Normal file
@ -0,0 +1,80 @@
|
||||
notice('MODULAR: ceph/ceph_pools')
|
||||
|
||||
$storage_hash = hiera('storage', {})
|
||||
$osd_pool_default_pg_num = $storage_hash['pg_num']
|
||||
$osd_pool_default_pgp_num = $storage_hash['pg_num']
|
||||
# Cinder settings
|
||||
$cinder_user = 'volumes'
|
||||
$cinder_pool = 'volumes'
|
||||
# Cinder Backup settings
|
||||
$cinder_backup_user = 'backups'
|
||||
$cinder_backup_pool = 'backups'
|
||||
# Glance settings
|
||||
$glance_user = 'images'
|
||||
$glance_pool = 'images'
|
||||
|
||||
|
||||
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
|
||||
cwd => '/root',
|
||||
}
|
||||
|
||||
# DO NOT SPLIT ceph auth command lines! See http://tracker.ceph.com/issues/3279
|
||||
ceph::pool {$glance_pool:
|
||||
user => $glance_user,
|
||||
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${glance_pool}'",
|
||||
keyring_owner => 'glance',
|
||||
pg_num => $osd_pool_default_pg_num,
|
||||
pgp_num => $osd_pool_default_pg_num,
|
||||
}
|
||||
|
||||
ceph::pool {$cinder_pool:
|
||||
user => $cinder_user,
|
||||
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}'",
|
||||
keyring_owner => 'cinder',
|
||||
pg_num => $osd_pool_default_pg_num,
|
||||
pgp_num => $osd_pool_default_pg_num,
|
||||
}
|
||||
|
||||
ceph::pool {$cinder_backup_pool:
|
||||
user => $cinder_backup_user,
|
||||
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_backup_pool}, allow rx pool=${cinder_pool}'",
|
||||
keyring_owner => 'cinder',
|
||||
pg_num => $osd_pool_default_pg_num,
|
||||
pgp_num => $osd_pool_default_pg_num,
|
||||
}
|
||||
|
||||
Ceph::Pool[$glance_pool] -> Ceph::Pool[$cinder_pool] -> Ceph::Pool[$cinder_backup_pool]
|
||||
|
||||
if ($storage_hash['volumes_ceph']) {
|
||||
include ::cinder::params
|
||||
service { 'cinder-volume':
|
||||
ensure => 'running',
|
||||
name => $::cinder::params::volume_service,
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
}
|
||||
|
||||
Ceph::Pool[$cinder_pool] ~> Service['cinder-volume']
|
||||
|
||||
service { 'cinder-backup':
|
||||
ensure => 'running',
|
||||
name => $::cinder::params::backup_service,
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
}
|
||||
|
||||
Ceph::Pool[$cinder_backup_pool] ~> Service['cinder-backup']
|
||||
}
|
||||
|
||||
if ($storage_hash['images_ceph']) {
|
||||
include ::glance::params
|
||||
service { 'glance-api':
|
||||
ensure => 'running',
|
||||
name => $::glance::params::api_service_name,
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
}
|
||||
|
||||
Ceph::Pool[$glance_pool] ~> Service['glance-api']
|
||||
}
|
||||
|
12
f2s/resources/ceph_create_pools/meta.yaml
Normal file
12
f2s/resources/ceph_create_pools/meta.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
id: ceph_create_pools
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
||||
storage:
|
||||
value: null
|
53
f2s/resources/cinder-db/actions/run.pp
Normal file
53
f2s/resources/cinder-db/actions/run.pp
Normal file
@ -0,0 +1,53 @@
|
||||
notice('MODULAR: cinder/db.pp')
|
||||
|
||||
$cinder_hash = hiera_hash('cinder', {})
|
||||
$mysql_hash = hiera_hash('mysql_hash', {})
|
||||
$management_vip = hiera('management_vip', undef)
|
||||
$database_vip = hiera('database_vip', undef)
|
||||
|
||||
$mysql_root_user = pick($mysql_hash['root_user'], 'root')
|
||||
$mysql_db_create = pick($mysql_hash['db_create'], true)
|
||||
$mysql_root_password = $mysql_hash['root_password']
|
||||
|
||||
$db_user = pick($cinder_hash['db_user'], 'cinder')
|
||||
$db_name = pick($cinder_hash['db_name'], 'cinder')
|
||||
$db_password = pick($cinder_hash['db_password'], $mysql_root_password)
|
||||
|
||||
$db_host = pick($cinder_hash['db_host'], $database_vip)
|
||||
$db_create = pick($cinder_hash['db_create'], $mysql_db_create)
|
||||
$db_root_user = pick($cinder_hash['root_user'], $mysql_root_user)
|
||||
$db_root_password = pick($cinder_hash['root_password'], $mysql_root_password)
|
||||
|
||||
$allowed_hosts = [ $::hostname, 'localhost', '127.0.0.1', '%' ]
|
||||
|
||||
validate_string($mysql_root_user)
|
||||
|
||||
if $db_create {
|
||||
|
||||
class { 'galera::client':
|
||||
custom_setup_class => hiera('mysql_custom_setup_class', 'galera'),
|
||||
}
|
||||
|
||||
class { 'cinder::db::mysql':
|
||||
user => $db_user,
|
||||
password => $db_password,
|
||||
dbname => $db_name,
|
||||
allowed_hosts => $allowed_hosts,
|
||||
}
|
||||
|
||||
class { 'osnailyfacter::mysql_access':
|
||||
db_host => $db_host,
|
||||
db_user => $db_root_user,
|
||||
db_password => $db_root_password,
|
||||
}
|
||||
|
||||
Class['galera::client'] ->
|
||||
Class['osnailyfacter::mysql_access'] ->
|
||||
Class['cinder::db::mysql']
|
||||
|
||||
}
|
||||
|
||||
class mysql::config {}
|
||||
include mysql::config
|
||||
class mysql::server {}
|
||||
include mysql::server
|
20
f2s/resources/cinder-db/meta.yaml
Normal file
20
f2s/resources/cinder-db/meta.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
id: cinder-db
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
cinder:
|
||||
value: null
|
||||
database_vip:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
management_vip:
|
||||
value: null
|
||||
mysql_custom_setup_class:
|
||||
value: null
|
||||
mysql_hash:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
51
f2s/resources/cinder-keystone/actions/run.pp
Normal file
51
f2s/resources/cinder-keystone/actions/run.pp
Normal file
@ -0,0 +1,51 @@
|
||||
notice('MODULAR: cinder/keystone.pp')
|
||||
|
||||
$cinder_hash = hiera_hash('cinder', {})
|
||||
$public_ssl_hash = hiera('public_ssl')
|
||||
$public_vip = hiera('public_vip')
|
||||
$public_address = $public_ssl_hash['services'] ? {
|
||||
true => $public_ssl_hash['hostname'],
|
||||
default => $public_vip,
|
||||
}
|
||||
$public_protocol = $public_ssl_hash['services'] ? {
|
||||
true => 'https',
|
||||
default => 'http',
|
||||
}
|
||||
$admin_protocol = 'http'
|
||||
$admin_address = hiera('management_vip')
|
||||
$region = pick($cinder_hash['region'], hiera('region', 'RegionOne'))
|
||||
|
||||
$password = $cinder_hash['user_password']
|
||||
$auth_name = pick($cinder_hash['auth_name'], 'cinder')
|
||||
$configure_endpoint = pick($cinder_hash['configure_endpoint'], true)
|
||||
$configure_user = pick($cinder_hash['configure_user'], true)
|
||||
$configure_user_role = pick($cinder_hash['configure_user_role'], true)
|
||||
$service_name = pick($cinder_hash['service_name'], 'cinder')
|
||||
$tenant = pick($cinder_hash['tenant'], 'services')
|
||||
|
||||
$port = '8776'
|
||||
|
||||
$public_url = "${public_protocol}://${public_address}:${port}/v1/%(tenant_id)s"
|
||||
$admin_url = "${admin_protocol}://${admin_address}:${port}/v1/%(tenant_id)s"
|
||||
|
||||
$public_url_v2 = "${public_protocol}://${public_address}:${port}/v2/%(tenant_id)s"
|
||||
$admin_url_v2 = "${admin_protocol}://${admin_address}:${port}/v2/%(tenant_id)s"
|
||||
|
||||
validate_string($public_address)
|
||||
validate_string($password)
|
||||
|
||||
class { '::cinder::keystone::auth':
|
||||
password => $password,
|
||||
auth_name => $auth_name,
|
||||
configure_endpoint => $configure_endpoint,
|
||||
configure_user => $configure_user,
|
||||
configure_user_role => $configure_user_role,
|
||||
service_name => $service_name,
|
||||
public_url => $public_url,
|
||||
internal_url => $admin_url,
|
||||
admin_url => $admin_url,
|
||||
public_url_v2 => $public_url_v2,
|
||||
internal_url_v2 => $admin_url_v2,
|
||||
admin_url_v2 => $admin_url_v2,
|
||||
region => $region,
|
||||
}
|
20
f2s/resources/cinder-keystone/meta.yaml
Normal file
20
f2s/resources/cinder-keystone/meta.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
id: cinder-keystone
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
cinder:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
management_vip:
|
||||
value: null
|
||||
public_ssl:
|
||||
value: null
|
||||
public_vip:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
region:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
20
f2s/resources/cluster-haproxy/actions/run.pp
Normal file
20
f2s/resources/cluster-haproxy/actions/run.pp
Normal file
@ -0,0 +1,20 @@
|
||||
notice('MODULAR: cluster-haproxy.pp')
|
||||
|
||||
$network_scheme = hiera('network_scheme', {})
|
||||
$management_vip = hiera('management_vip')
|
||||
$database_vip = hiera('database_vip', '')
|
||||
$service_endpoint = hiera('service_endpoint', '')
|
||||
$primary_controller = hiera('primary_controller')
|
||||
$haproxy_hash = hiera_hash('haproxy', {})
|
||||
|
||||
#FIXME(mattymo): Replace with only VIPs for roles assigned to this node
|
||||
$stats_ipaddresses = delete_undef_values([$management_vip, $database_vip, $service_endpoint, '127.0.0.1'])
|
||||
|
||||
class { 'cluster::haproxy':
|
||||
haproxy_maxconn => '16000',
|
||||
haproxy_bufsize => '32768',
|
||||
primary_controller => $primary_controller,
|
||||
debug => pick($haproxy_hash['debug'], hiera('debug', false)),
|
||||
other_networks => direct_networks($network_scheme['endpoints']),
|
||||
stats_ipaddresses => $stats_ipaddresses
|
||||
}
|
24
f2s/resources/cluster-haproxy/meta.yaml
Normal file
24
f2s/resources/cluster-haproxy/meta.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
id: cluster-haproxy
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
database_vip:
|
||||
value: null
|
||||
debug:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
haproxy:
|
||||
value: null
|
||||
management_vip:
|
||||
value: null
|
||||
network_scheme:
|
||||
value: null
|
||||
primary_controller:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
||||
service_endpoint:
|
||||
value: null
|
7
f2s/resources/cluster-vrouter/actions/run.pp
Normal file
7
f2s/resources/cluster-vrouter/actions/run.pp
Normal file
@ -0,0 +1,7 @@
|
||||
notice('MODULAR: cluster-vrouter.pp')
|
||||
|
||||
$network_scheme = hiera('network_scheme', {})
|
||||
|
||||
class { 'cluster::vrouter_ocf':
|
||||
other_networks => direct_networks($network_scheme['endpoints']),
|
||||
}
|
12
f2s/resources/cluster-vrouter/meta.yaml
Normal file
12
f2s/resources/cluster-vrouter/meta.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
id: cluster-vrouter
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
network_scheme:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
49
f2s/resources/cluster/actions/run.pp
Normal file
49
f2s/resources/cluster/actions/run.pp
Normal file
@ -0,0 +1,49 @@
|
||||
notice('MODULAR: cluster.pp')
|
||||
|
||||
if !(hiera('role') in hiera('corosync_roles')) {
|
||||
fail('The node role is not in corosync roles')
|
||||
}
|
||||
|
||||
prepare_network_config(hiera_hash('network_scheme'))
|
||||
|
||||
$corosync_nodes = corosync_nodes(
|
||||
get_nodes_hash_by_roles(
|
||||
hiera_hash('network_metadata'),
|
||||
hiera('corosync_roles')
|
||||
),
|
||||
'mgmt/corosync'
|
||||
)
|
||||
|
||||
class { 'cluster':
|
||||
internal_address => get_network_role_property('mgmt/corosync', 'ipaddr'),
|
||||
corosync_nodes => $corosync_nodes,
|
||||
}
|
||||
|
||||
pcmk_nodes { 'pacemaker' :
|
||||
nodes => $corosync_nodes,
|
||||
add_pacemaker_nodes => false,
|
||||
}
|
||||
|
||||
Service <| title == 'corosync' |> {
|
||||
subscribe => File['/etc/corosync/service.d'],
|
||||
require => File['/etc/corosync/corosync.conf'],
|
||||
}
|
||||
|
||||
Service['corosync'] -> Pcmk_nodes<||>
|
||||
Pcmk_nodes<||> -> Service<| provider == 'pacemaker' |>
|
||||
|
||||
# Sometimes during first start pacemaker can not connect to corosync
|
||||
# via IPC due to pacemaker and corosync processes are run under different users
|
||||
if($::operatingsystem == 'Ubuntu') {
|
||||
$pacemaker_run_uid = 'hacluster'
|
||||
$pacemaker_run_gid = 'haclient'
|
||||
|
||||
file {'/etc/corosync/uidgid.d/pacemaker':
|
||||
content =>"uidgid {
|
||||
uid: ${pacemaker_run_uid}
|
||||
gid: ${pacemaker_run_gid}
|
||||
}"
|
||||
}
|
||||
|
||||
File['/etc/corosync/corosync.conf'] -> File['/etc/corosync/uidgid.d/pacemaker'] -> Service <| title == 'corosync' |>
|
||||
}
|
16
f2s/resources/cluster/meta.yaml
Normal file
16
f2s/resources/cluster/meta.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
id: cluster
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
corosync_roles:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
network_metadata:
|
||||
value: null
|
||||
network_scheme:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
20
f2s/resources/cluster_health/actions/run.pp
Normal file
20
f2s/resources/cluster_health/actions/run.pp
Normal file
@ -0,0 +1,20 @@
|
||||
notice('MODULAR: cluster/health.pp')
|
||||
|
||||
if !(hiera('role') in hiera('corosync_roles')) {
|
||||
fail('The node role is not in corosync roles')
|
||||
}
|
||||
|
||||
# load the mounted filesystems from our custom fact, remove boot
|
||||
$mount_points = delete(split($::mounts, ','), '/boot')
|
||||
|
||||
$disks = hiera('corosync_disks', $mount_points)
|
||||
$min_disk_free = hiera('corosync_min_disk_space', '512M')
|
||||
$disk_unit = hiera('corosync_disk_unit', 'M')
|
||||
$monitor_interval = hiera('corosync_disk_monitor_interval', '15s')
|
||||
|
||||
class { 'cluster::sysinfo':
|
||||
disks => $disks,
|
||||
min_disk_free => $min_disk_free,
|
||||
disk_unit => $disk_unit,
|
||||
monitor_interval => $monitor_interval,
|
||||
}
|
24
f2s/resources/cluster_health/meta.yaml
Normal file
24
f2s/resources/cluster_health/meta.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
id: cluster_health
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
corosync_disk_monitor:
|
||||
value: null
|
||||
corosync_disk_monitor_interval:
|
||||
value: null
|
||||
corosync_disk_unit:
|
||||
value: null
|
||||
corosync_disks:
|
||||
value: null
|
||||
corosync_min_disk_space:
|
||||
value: null
|
||||
corosync_monitor_interval:
|
||||
value: null
|
||||
corosync_roles:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
11
f2s/resources/configure_default_route/actions/run.pp
Normal file
11
f2s/resources/configure_default_route/actions/run.pp
Normal file
@ -0,0 +1,11 @@
|
||||
notice('MODULAR: configure_default_route.pp')
|
||||
|
||||
$network_scheme = hiera('network_scheme')
|
||||
$management_vrouter_vip = hiera('management_vrouter_vip')
|
||||
|
||||
prepare_network_config($network_scheme)
|
||||
$management_int = get_network_role_property('management', 'interface')
|
||||
$fw_admin_int = get_network_role_property('fw-admin', 'interface')
|
||||
$ifconfig = configure_default_route($network_scheme, $management_vrouter_vip, $fw_admin_int, $management_int )
|
||||
|
||||
notice ($ifconfig)
|
10
f2s/resources/configure_default_route/meta.yaml
Normal file
10
f2s/resources/configure_default_route/meta.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
id: configure_default_route
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
5
f2s/resources/connectivity_tests/actions/run.pp
Normal file
5
f2s/resources/connectivity_tests/actions/run.pp
Normal file
@ -0,0 +1,5 @@
|
||||
notice('MODULAR: connectivity_tests.pp')
|
||||
# Pull the list of repos from hiera
|
||||
$repo_setup = hiera('repo_setup')
|
||||
# test that the repos are accessible
|
||||
url_available($repo_setup['repos'])
|
12
f2s/resources/connectivity_tests/meta.yaml
Normal file
12
f2s/resources/connectivity_tests/meta.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
id: connectivity_tests
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
repo_setup:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
79
f2s/resources/conntrackd/actions/run.pp
Normal file
79
f2s/resources/conntrackd/actions/run.pp
Normal file
@ -0,0 +1,79 @@
|
||||
notice('MODULAR: conntrackd.pp')
|
||||
|
||||
prepare_network_config(hiera('network_scheme', {}))
|
||||
$vrouter_name = hiera('vrouter_name', 'pub')
|
||||
|
||||
case $operatingsystem {
|
||||
Centos: { $conntrackd_package = 'conntrack-tools' }
|
||||
Ubuntu: { $conntrackd_package = 'conntrackd' }
|
||||
}
|
||||
|
||||
|
||||
### CONNTRACKD for CentOS 6 doesn't work under namespaces ##
|
||||
|
||||
if $operatingsystem == 'Ubuntu' {
|
||||
$bind_address = get_network_role_property('mgmt/vip', 'ipaddr')
|
||||
$mgmt_bridge = get_network_role_property('mgmt/vip', 'interface')
|
||||
|
||||
package { $conntrackd_package:
|
||||
ensure => installed,
|
||||
} ->
|
||||
|
||||
file { '/etc/conntrackd/conntrackd.conf':
|
||||
content => template('cluster/conntrackd.conf.erb'),
|
||||
} ->
|
||||
|
||||
cs_resource {'p_conntrackd':
|
||||
ensure => present,
|
||||
primitive_class => 'ocf',
|
||||
provided_by => 'fuel',
|
||||
primitive_type => 'ns_conntrackd',
|
||||
metadata => {
|
||||
'migration-threshold' => 'INFINITY',
|
||||
'failure-timeout' => '180s'
|
||||
},
|
||||
parameters => {
|
||||
'bridge' => $mgmt_bridge,
|
||||
},
|
||||
complex_type => 'master',
|
||||
ms_metadata => {
|
||||
'notify' => 'true',
|
||||
'ordered' => 'false',
|
||||
'interleave' => 'true',
|
||||
'clone-node-max' => '1',
|
||||
'master-max' => '1',
|
||||
'master-node-max' => '1',
|
||||
'target-role' => 'Master'
|
||||
},
|
||||
operations => {
|
||||
'monitor' => {
|
||||
'interval' => '30',
|
||||
'timeout' => '60'
|
||||
},
|
||||
'monitor:Master' => {
|
||||
'role' => 'Master',
|
||||
'interval' => '27',
|
||||
'timeout' => '60'
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cs_colocation { "conntrackd-with-${vrouter_name}-vip":
|
||||
primitives => [ 'master_p_conntrackd:Master', "vip__vrouter_${vrouter_name}" ],
|
||||
}
|
||||
|
||||
File['/etc/conntrackd/conntrackd.conf'] -> Cs_resource['p_conntrackd'] -> Service['p_conntrackd'] -> Cs_colocation["conntrackd-with-${vrouter_name}-vip"]
|
||||
|
||||
service { 'p_conntrackd':
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
provider => 'pacemaker',
|
||||
}
|
||||
|
||||
# Workaround to ensure log is rotated properly
|
||||
file { '/etc/logrotate.d/conntrackd':
|
||||
content => template('openstack/95-conntrackd.conf.erb'),
|
||||
}
|
||||
|
||||
Package[$conntrackd_package] -> File['/etc/logrotate.d/conntrackd']
|
||||
}
|
14
f2s/resources/conntrackd/meta.yaml
Normal file
14
f2s/resources/conntrackd/meta.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
id: conntrackd
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
network_scheme:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
||||
vrouter_name:
|
||||
value: null
|
49
f2s/resources/controller_remaining_tasks/actions/run.pp
Normal file
49
f2s/resources/controller_remaining_tasks/actions/run.pp
Normal file
@ -0,0 +1,49 @@
|
||||
notice('MODULAR: controller.pp')
|
||||
|
||||
# Pulling hiera
|
||||
$primary_controller = hiera('primary_controller')
|
||||
$neutron_mellanox = hiera('neutron_mellanox', false)
|
||||
$use_neutron = hiera('use_neutron', false)
|
||||
|
||||
# Do the stuff
|
||||
if $neutron_mellanox {
|
||||
$mellanox_mode = $neutron_mellanox['plugin']
|
||||
} else {
|
||||
$mellanox_mode = 'disabled'
|
||||
}
|
||||
|
||||
if $primary_controller {
|
||||
if ($mellanox_mode == 'ethernet') {
|
||||
$test_vm_pkg = 'cirros-testvm-mellanox'
|
||||
} else {
|
||||
$test_vm_pkg = 'cirros-testvm'
|
||||
}
|
||||
package { 'cirros-testvm' :
|
||||
ensure => 'installed',
|
||||
name => $test_vm_pkg,
|
||||
}
|
||||
}
|
||||
|
||||
Exec { logoutput => true }
|
||||
|
||||
if ($::mellanox_mode == 'ethernet') {
|
||||
$ml2_eswitch = $neutron_mellanox['ml2_eswitch']
|
||||
class { 'mellanox_openstack::controller':
|
||||
eswitch_vnic_type => $ml2_eswitch['vnic_type'],
|
||||
eswitch_apply_profile_patch => $ml2_eswitch['apply_profile_patch'],
|
||||
}
|
||||
}
|
||||
|
||||
# NOTE(bogdando) for nodes with pacemaker, we should use OCF instead of monit
|
||||
|
||||
# BP https://blueprints.launchpad.net/mos/+spec/include-openstackclient
|
||||
package { 'python-openstackclient' :
|
||||
ensure => installed,
|
||||
}
|
||||
|
||||
# Reduce swapiness on controllers, see LP#1413702
|
||||
sysctl::value { 'vm.swappiness':
|
||||
value => '10'
|
||||
}
|
||||
|
||||
# vim: set ts=2 sw=2 et :
|
16
f2s/resources/controller_remaining_tasks/meta.yaml
Normal file
16
f2s/resources/controller_remaining_tasks/meta.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
id: controller_remaining_tasks
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
neutron_mellanox:
|
||||
value: null
|
||||
primary_controller:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
||||
use_neutron:
|
||||
value: null
|
132
f2s/resources/database/actions/run.pp
Normal file
132
f2s/resources/database/actions/run.pp
Normal file
@ -0,0 +1,132 @@
|
||||
notice('MODULAR: database.pp')
|
||||
|
||||
prepare_network_config(hiera('network_scheme', {}))
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$primary_controller = hiera('primary_controller')
|
||||
$mysql_hash = hiera_hash('mysql', {})
|
||||
$management_vip = hiera('management_vip')
|
||||
$database_vip = hiera('database_vip', $management_vip)
|
||||
|
||||
$network_scheme = hiera('network_scheme', {})
|
||||
$mgmt_iface = get_network_role_property('mgmt/database', 'interface')
|
||||
$direct_networks = split(direct_networks($network_scheme['endpoints'], $mgmt_iface, 'netmask'), ' ')
|
||||
$access_networks = flatten(['localhost', '127.0.0.1', '240.0.0.0/255.255.0.0', $direct_networks])
|
||||
|
||||
$haproxy_stats_port = '10000'
|
||||
$haproxy_stats_url = "http://${database_vip}:${haproxy_stats_port}/;csv"
|
||||
|
||||
$mysql_database_password = $mysql_hash['root_password']
|
||||
$enabled = pick($mysql_hash['enabled'], true)
|
||||
|
||||
$galera_node_address = get_network_role_property('mgmt/database', 'ipaddr')
|
||||
$galera_nodes = values(get_node_to_ipaddr_map_by_network_role(hiera_hash('database_nodes'), 'mgmt/database'))
|
||||
$galera_primary_controller = hiera('primary_database', $primary_controller)
|
||||
$mysql_bind_address = '0.0.0.0'
|
||||
$galera_cluster_name = 'openstack'
|
||||
|
||||
$mysql_skip_name_resolve = true
|
||||
$custom_setup_class = hiera('mysql_custom_setup_class', 'galera')
|
||||
|
||||
# Get galera gcache factor based on cluster node's count
|
||||
$galera_gcache_factor = count(unique(filter_hash(hiera('nodes', []), 'uid')))
|
||||
|
||||
$status_user = 'clustercheck'
|
||||
$status_password = $mysql_hash['wsrep_password']
|
||||
$backend_port = '3307'
|
||||
$backend_timeout = '10'
|
||||
|
||||
#############################################################################
|
||||
validate_string($status_password)
|
||||
validate_string($mysql_database_password)
|
||||
validate_string($status_password)
|
||||
|
||||
if $enabled {
|
||||
|
||||
if $custom_setup_class {
|
||||
file { '/etc/mysql/my.cnf':
|
||||
ensure => absent,
|
||||
require => Class['mysql::server']
|
||||
}
|
||||
$config_hash_real = {
|
||||
'config_file' => '/etc/my.cnf'
|
||||
}
|
||||
} else {
|
||||
$config_hash_real = { }
|
||||
}
|
||||
|
||||
if '/var/lib/mysql' in split($::mounts, ',') {
|
||||
$ignore_db_dirs = ['lost+found']
|
||||
} else {
|
||||
$ignore_db_dirs = []
|
||||
}
|
||||
|
||||
class { 'mysql::server':
|
||||
bind_address => '0.0.0.0',
|
||||
etc_root_password => true,
|
||||
root_password => $mysql_database_password,
|
||||
old_root_password => '',
|
||||
galera_cluster_name => $galera_cluster_name,
|
||||
primary_controller => $galera_primary_controller,
|
||||
galera_node_address => $galera_node_address,
|
||||
galera_nodes => $galera_nodes,
|
||||
galera_gcache_factor => $galera_gcache_factor,
|
||||
enabled => $enabled,
|
||||
custom_setup_class => $custom_setup_class,
|
||||
mysql_skip_name_resolve => $mysql_skip_name_resolve,
|
||||
use_syslog => $use_syslog,
|
||||
config_hash => $config_hash_real,
|
||||
ignore_db_dirs => $ignore_db_dirs,
|
||||
}
|
||||
|
||||
class { 'osnailyfacter::mysql_user':
|
||||
password => $mysql_database_password,
|
||||
access_networks => $access_networks,
|
||||
}
|
||||
|
||||
exec { 'initial_access_config':
|
||||
command => '/bin/ln -sf /etc/mysql/conf.d/password.cnf /root/.my.cnf',
|
||||
}
|
||||
|
||||
if ($custom_mysql_setup_class == 'percona_packages' and $::osfamily == 'RedHat') {
|
||||
# This is a work around to prevent the conflict between the
|
||||
# MySQL-shared-wsrep package (included as a dependency for MySQL-python) and
|
||||
# the Percona shared package Percona-XtraDB-Cluster-shared-56. They both
|
||||
# provide the libmysql client libraries. Since we are requiring the
|
||||
# installation of the Percona package here before mysql::python, the python
|
||||
# client is happy and the server installation won't fail due to the
|
||||
# installation of our shared package
|
||||
package { 'Percona-XtraDB-Cluster-shared-56':
|
||||
ensure => 'present',
|
||||
before => Class['mysql::python'],
|
||||
}
|
||||
}
|
||||
|
||||
$management_networks = get_routable_networks_for_network_role($network_scheme, 'mgmt/database', ' ')
|
||||
|
||||
class { 'openstack::galera::status':
|
||||
status_user => $status_user,
|
||||
status_password => $status_password,
|
||||
status_allow => $galera_node_address,
|
||||
backend_host => $galera_node_address,
|
||||
backend_port => $backend_port,
|
||||
backend_timeout => $backend_timeout,
|
||||
only_from => "127.0.0.1 240.0.0.2 ${management_networks}",
|
||||
}
|
||||
|
||||
haproxy_backend_status { 'mysql':
|
||||
name => 'mysqld',
|
||||
url => $haproxy_stats_url,
|
||||
}
|
||||
|
||||
class { 'osnailyfacter::mysql_access':
|
||||
db_password => $mysql_database_password,
|
||||
}
|
||||
|
||||
Class['mysql::server'] ->
|
||||
Class['osnailyfacter::mysql_user'] ->
|
||||
Exec['initial_access_config'] ->
|
||||
Class['openstack::galera::status'] ->
|
||||
Haproxy_backend_status['mysql'] ->
|
||||
Class['osnailyfacter::mysql_access']
|
||||
|
||||
}
|
30
f2s/resources/database/meta.yaml
Normal file
30
f2s/resources/database/meta.yaml
Normal file
@ -0,0 +1,30 @@
|
||||
id: database
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
database_nodes:
|
||||
value: null
|
||||
database_vip:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
management_vip:
|
||||
value: null
|
||||
mysql:
|
||||
value: null
|
||||
mysql_custom_setup_class:
|
||||
value: null
|
||||
network_scheme:
|
||||
value: null
|
||||
nodes:
|
||||
value: null
|
||||
primary_controller:
|
||||
value: null
|
||||
primary_database:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
||||
use_syslog:
|
||||
value: null
|
41
f2s/resources/disable_keystone_service_token/actions/run.pp
Normal file
41
f2s/resources/disable_keystone_service_token/actions/run.pp
Normal file
@ -0,0 +1,41 @@
|
||||
notice('MODULAR: service_token_off.pp')
|
||||
|
||||
####################################################################
|
||||
# Used as singular by post-deployment action to disable admin_token
|
||||
#
|
||||
|
||||
$keystone_params = hiera_hash('keystone_hash', {})
|
||||
|
||||
if $keystone_params['service_token_off'] {
|
||||
|
||||
include ::keystone::params
|
||||
include ::tweaks::apache_wrappers
|
||||
|
||||
keystone_config {
|
||||
'DEFAULT/admin_token': ensure => absent;
|
||||
}
|
||||
|
||||
# Get paste.ini source
|
||||
$keystone_paste_ini = $::keystone::params::paste_config ? {
|
||||
undef => '/etc/keystone/keystone-paste.ini',
|
||||
default => $::keystone::params::paste_config,
|
||||
}
|
||||
|
||||
# Remove admin_token_auth middleware from public/admin/v3 pipelines
|
||||
exec { 'remove_admin_token_auth_middleware':
|
||||
path => ['/bin', '/usr/bin'],
|
||||
command => "sed -i.dist 's/ admin_token_auth//' $keystone_paste_ini",
|
||||
onlyif => "fgrep -q ' admin_token_auth' $keystone_paste_ini",
|
||||
}
|
||||
|
||||
service { 'httpd':
|
||||
ensure => 'running',
|
||||
name => $::tweaks::apache_wrappers::service_name,
|
||||
enable => true,
|
||||
}
|
||||
|
||||
# Restart service that changes to take effect
|
||||
Keystone_config<||> ~> Service['httpd']
|
||||
Exec['remove_admin_token_auth_middleware'] ~> Service['httpd']
|
||||
|
||||
}
|
12
f2s/resources/disable_keystone_service_token/meta.yaml
Normal file
12
f2s/resources/disable_keystone_service_token/meta.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
id: disable_keystone_service_token
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
keystone_hash:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
8
f2s/resources/dns-client/actions/run.pp
Normal file
8
f2s/resources/dns-client/actions/run.pp
Normal file
@ -0,0 +1,8 @@
|
||||
notice('MODULAR: dns-client.pp')
|
||||
|
||||
$management_vip = hiera('management_vrouter_vip')
|
||||
|
||||
class { 'osnailyfacter::resolvconf':
|
||||
management_vip => $management_vip,
|
||||
}
|
||||
|
12
f2s/resources/dns-client/meta.yaml
Normal file
12
f2s/resources/dns-client/meta.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
id: dns-client
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
management_vrouter_vip:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
16
f2s/resources/dns-server/actions/run.pp
Normal file
16
f2s/resources/dns-server/actions/run.pp
Normal file
@ -0,0 +1,16 @@
|
||||
notice('MODULAR: dns-server.pp')
|
||||
|
||||
$dns_servers = hiera('external_dns')
|
||||
$primary_controller = hiera('primary_controller')
|
||||
$master_ip = hiera('master_ip')
|
||||
$management_vrouter_vip = hiera('management_vrouter_vip')
|
||||
|
||||
class { 'osnailyfacter::dnsmasq':
|
||||
external_dns => strip(split($dns_servers['dns_list'], ',')),
|
||||
master_ip => $master_ip,
|
||||
management_vrouter_vip => $management_vrouter_vip,
|
||||
} ->
|
||||
|
||||
class { 'cluster::dns_ocf':
|
||||
primary_controller => $primary_controller,
|
||||
}
|
18
f2s/resources/dns-server/meta.yaml
Normal file
18
f2s/resources/dns-server/meta.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
id: dns-server
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
external_dns:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
management_vrouter_vip:
|
||||
value: null
|
||||
master_ip:
|
||||
value: null
|
||||
primary_controller:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
28
f2s/resources/dump_rabbitmq_definitions/actions/run.pp
Normal file
28
f2s/resources/dump_rabbitmq_definitions/actions/run.pp
Normal file
@ -0,0 +1,28 @@
|
||||
notice('MODULAR: dump_rabbitmq_definitions.pp')
|
||||
|
||||
$definitions_dump_file = '/etc/rabbitmq/definitions'
|
||||
$rabbit_hash = hiera_hash('rabbit_hash',
|
||||
{
|
||||
'user' => false,
|
||||
'password' => false,
|
||||
}
|
||||
)
|
||||
$rabbit_enabled = pick($rabbit_hash['enabled'], true)
|
||||
|
||||
|
||||
if ($rabbit_enabled) {
|
||||
$rabbit_api_endpoint = 'http://localhost:15672/api/definitions'
|
||||
$rabbit_credentials = "${rabbit_hash['user']}:${rabbit_hash['password']}"
|
||||
|
||||
exec { 'rabbitmq-dump-definitions':
|
||||
path => ['/usr/bin', '/usr/sbin', '/sbin', '/bin'],
|
||||
command => "curl -u ${rabbit_credentials} ${rabbit_api_endpoint} -o ${definitions_dump_file}",
|
||||
}
|
||||
|
||||
file { $definitions_dump_file:
|
||||
ensure => file,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
}
|
||||
}
|
12
f2s/resources/dump_rabbitmq_definitions/meta.yaml
Normal file
12
f2s/resources/dump_rabbitmq_definitions/meta.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
id: dump_rabbitmq_definitions
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
rabbit_hash:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
10
f2s/resources/enable_cinder_volume_service/actions/run.pp
Normal file
10
f2s/resources/enable_cinder_volume_service/actions/run.pp
Normal file
@ -0,0 +1,10 @@
|
||||
include cinder::params
|
||||
|
||||
$volume_service = $::cinder::params::volume_service
|
||||
|
||||
service { $volume_service:
|
||||
ensure => running,
|
||||
enable => true,
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
}
|
10
f2s/resources/enable_cinder_volume_service/meta.yaml
Normal file
10
f2s/resources/enable_cinder_volume_service/meta.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
id: enable_cinder_volume_service
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
10
f2s/resources/enable_nova_compute_service/actions/run.pp
Normal file
10
f2s/resources/enable_nova_compute_service/actions/run.pp
Normal file
@ -0,0 +1,10 @@
|
||||
include nova::params
|
||||
|
||||
$compute_service_name = $::nova::params::compute_service_name
|
||||
|
||||
service { $compute_service_name:
|
||||
ensure => running,
|
||||
enable => true,
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
}
|
10
f2s/resources/enable_nova_compute_service/meta.yaml
Normal file
10
f2s/resources/enable_nova_compute_service/meta.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
id: enable_nova_compute_service
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
17
f2s/resources/enable_rados/actions/run.pp
Normal file
17
f2s/resources/enable_rados/actions/run.pp
Normal file
@ -0,0 +1,17 @@
|
||||
include ::ceph::params
|
||||
|
||||
$radosgw_service = $::ceph::params::service_radosgw
|
||||
|
||||
# ensure the service is running and will start on boot
|
||||
service { $radosgw_service:
|
||||
ensure => running,
|
||||
enable => true,
|
||||
}
|
||||
|
||||
# The Ubuntu upstart script is incompatible with the upstart provider
|
||||
# This will force the service to fall back to the debian init script
|
||||
if ($::operatingsystem == 'Ubuntu') {
|
||||
Service['radosgw'] {
|
||||
provider => 'debian'
|
||||
}
|
||||
}
|
10
f2s/resources/enable_rados/meta.yaml
Normal file
10
f2s/resources/enable_rados/meta.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
id: enable_rados
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
132
f2s/resources/firewall/actions/run.pp
Normal file
132
f2s/resources/firewall/actions/run.pp
Normal file
@ -0,0 +1,132 @@
|
||||
notice('MODULAR: firewall.pp')
|
||||
|
||||
$network_scheme = hiera_hash('network_scheme')
|
||||
$ironic_hash = hiera_hash('ironic', {})
|
||||
|
||||
# Workaround for fuel bug with firewall
|
||||
firewall {'003 remote rabbitmq ':
|
||||
sport => [ 4369, 5672, 41055, 55672, 61613 ],
|
||||
source => hiera('master_ip'),
|
||||
proto => 'tcp',
|
||||
action => 'accept',
|
||||
require => Class['openstack::firewall'],
|
||||
}
|
||||
|
||||
firewall {'004 remote puppet ':
|
||||
sport => [ 8140 ],
|
||||
source => hiera('master_ip'),
|
||||
proto => 'tcp',
|
||||
action => 'accept',
|
||||
require => Class['openstack::firewall'],
|
||||
}
|
||||
|
||||
# allow local rabbitmq admin traffic for LP#1383258
|
||||
firewall {'005 local rabbitmq admin':
|
||||
sport => [ 15672 ],
|
||||
iniface => 'lo',
|
||||
proto => 'tcp',
|
||||
action => 'accept',
|
||||
require => Class['openstack::firewall'],
|
||||
}
|
||||
|
||||
# reject all non-local rabbitmq admin traffic for LP#1450443
|
||||
firewall {'006 reject non-local rabbitmq admin':
|
||||
sport => [ 15672 ],
|
||||
proto => 'tcp',
|
||||
action => 'drop',
|
||||
require => Class['openstack::firewall'],
|
||||
}
|
||||
|
||||
# allow connections from haproxy namespace
|
||||
firewall {'030 allow connections from haproxy namespace':
|
||||
source => '240.0.0.2',
|
||||
action => 'accept',
|
||||
require => Class['openstack::firewall'],
|
||||
}
|
||||
|
||||
prepare_network_config(hiera_hash('network_scheme'))
|
||||
class { 'openstack::firewall' :
|
||||
nova_vnc_ip_range => get_routable_networks_for_network_role($network_scheme, 'nova/api'),
|
||||
nova_api_ip_range => get_network_role_property('nova/api', 'network'),
|
||||
libvirt_network => get_network_role_property('management', 'network'),
|
||||
keystone_network => get_network_role_property('keystone/api', 'network'),
|
||||
}
|
||||
|
||||
if $ironic_hash['enabled'] {
|
||||
$nodes_hash = hiera('nodes', {})
|
||||
$roles = node_roles($nodes_hash, hiera('uid'))
|
||||
$network_metadata = hiera_hash('network_metadata', {})
|
||||
$baremetal_int = get_network_role_property('ironic/baremetal', 'interface')
|
||||
$baremetal_vip = $network_metadata['vips']['baremetal']['ipaddr']
|
||||
$baremetal_ipaddr = get_network_role_property('ironic/baremetal', 'ipaddr')
|
||||
$baremetal_network = get_network_role_property('ironic/baremetal', 'network')
|
||||
|
||||
firewallchain { 'baremetal:filter:IPv4':
|
||||
ensure => present,
|
||||
} ->
|
||||
firewall { '999 drop all baremetal':
|
||||
chain => 'baremetal',
|
||||
action => 'drop',
|
||||
proto => 'all',
|
||||
} ->
|
||||
firewall {'00 baremetal-filter':
|
||||
proto => 'all',
|
||||
iniface => $baremetal_int,
|
||||
jump => 'baremetal',
|
||||
require => Class['openstack::firewall'],
|
||||
}
|
||||
|
||||
if member($roles, 'controller') or member($roles, 'primary-controller') {
|
||||
firewall { '100 allow baremetal ping from VIP':
|
||||
chain => 'baremetal',
|
||||
source => $baremetal_vip,
|
||||
destination => $baremetal_ipaddr,
|
||||
proto => 'icmp',
|
||||
icmp => 'echo-request',
|
||||
action => 'accept',
|
||||
}
|
||||
firewall { '207 ironic-api' :
|
||||
dport => '6385',
|
||||
proto => 'tcp',
|
||||
action => 'accept',
|
||||
}
|
||||
}
|
||||
|
||||
if member($roles, 'ironic') {
|
||||
firewall { '101 allow baremetal-related':
|
||||
chain => 'baremetal',
|
||||
source => $baremetal_network,
|
||||
destination => $baremetal_ipaddr,
|
||||
proto => 'all',
|
||||
state => ['RELATED', 'ESTABLISHED'],
|
||||
action => 'accept',
|
||||
}
|
||||
|
||||
firewall { '102 allow baremetal-rsyslog':
|
||||
chain => 'baremetal',
|
||||
source => $baremetal_network,
|
||||
destination => $baremetal_ipaddr,
|
||||
proto => 'udp',
|
||||
dport => '514',
|
||||
action => 'accept',
|
||||
}
|
||||
|
||||
firewall { '103 allow baremetal-TFTP':
|
||||
chain => 'baremetal',
|
||||
source => $baremetal_network,
|
||||
destination => $baremetal_ipaddr,
|
||||
proto => 'udp',
|
||||
dport => '69',
|
||||
action => 'accept',
|
||||
}
|
||||
|
||||
k_mod {'nf_conntrack_tftp':
|
||||
ensure => 'present'
|
||||
}
|
||||
|
||||
file_line {'nf_conntrack_tftp_on_boot':
|
||||
path => '/etc/modules',
|
||||
line => 'nf_conntrack_tftp',
|
||||
}
|
||||
}
|
||||
}
|
16
f2s/resources/firewall/meta.yaml
Normal file
16
f2s/resources/firewall/meta.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
id: firewall
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
ironic:
|
||||
value: null
|
||||
master_ip:
|
||||
value: null
|
||||
network_scheme:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
10
f2s/resources/fuel_pkgs/actions/run.pp
Normal file
10
f2s/resources/fuel_pkgs/actions/run.pp
Normal file
@ -0,0 +1,10 @@
|
||||
notice('MODULAR: fuel_pkgs.pp')
|
||||
|
||||
$fuel_packages = [
|
||||
'fuel-ha-utils',
|
||||
'fuel-misc',
|
||||
]
|
||||
|
||||
package { $fuel_packages :
|
||||
ensure => 'latest',
|
||||
}
|
10
f2s/resources/fuel_pkgs/meta.yaml
Normal file
10
f2s/resources/fuel_pkgs/meta.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
id: fuel_pkgs
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
49
f2s/resources/generate_vms/actions/run.pp
Normal file
49
f2s/resources/generate_vms/actions/run.pp
Normal file
@ -0,0 +1,49 @@
|
||||
notice('MODULAR: generate_vms.pp')
|
||||
|
||||
$libvirt_dir = '/etc/libvirt/qemu'
|
||||
$template_dir = '/var/lib/nova'
|
||||
$packages = ['qemu-utils', 'qemu-kvm', 'libvirt-bin', 'xmlstarlet']
|
||||
$libvirt_service_name = 'libvirtd'
|
||||
|
||||
$vms = hiera_array('vms_conf')
|
||||
|
||||
define vm_config {
|
||||
$details = $name
|
||||
$id = $details['id']
|
||||
|
||||
file { "${template_dir}/template_${id}_vm.xml":
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('osnailyfacter/vm_libvirt.erb'),
|
||||
}
|
||||
}
|
||||
|
||||
package { $packages:
|
||||
ensure => 'installed',
|
||||
}
|
||||
|
||||
service { $libvirt_service_name:
|
||||
ensure => 'running',
|
||||
require => Package[$packages],
|
||||
before => Exec['generate_vms'],
|
||||
}
|
||||
|
||||
file { "${libvirt_dir}/autostart":
|
||||
ensure => 'directory',
|
||||
require => Package[$packages],
|
||||
}
|
||||
|
||||
file { "${template_dir}":
|
||||
ensure => 'directory',
|
||||
}
|
||||
|
||||
vm_config { $vms:
|
||||
before => Exec['generate_vms'],
|
||||
require => File["${template_dir}"],
|
||||
}
|
||||
|
||||
exec { 'generate_vms':
|
||||
command => "/usr/bin/generate_vms.sh ${libvirt_dir} ${template_dir}",
|
||||
path => ['/usr/sbin', '/usr/bin' , '/sbin', '/bin'],
|
||||
require => [File["${template_dir}"], File["${libvirt_dir}/autostart"]],
|
||||
}
|
10
f2s/resources/generate_vms/meta.yaml
Normal file
10
f2s/resources/generate_vms/meta.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
id: generate_vms
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
53
f2s/resources/glance-db/actions/run.pp
Normal file
53
f2s/resources/glance-db/actions/run.pp
Normal file
@ -0,0 +1,53 @@
|
||||
notice('MODULAR: glance/db.pp')
|
||||
|
||||
$glance_hash = hiera_hash('glance', {})
|
||||
$mysql_hash = hiera_hash('mysql', {})
|
||||
$management_vip = hiera('management_vip')
|
||||
$database_vip = hiera('database_vip')
|
||||
|
||||
$mysql_root_user = pick($mysql_hash['root_user'], 'root')
|
||||
$mysql_db_create = pick($mysql_hash['db_create'], true)
|
||||
$mysql_root_password = $mysql_hash['root_password']
|
||||
|
||||
$db_user = pick($glance_hash['db_user'], 'glance')
|
||||
$db_name = pick($glance_hash['db_name'], 'glance')
|
||||
$db_password = pick($glance_hash['db_password'], $mysql_root_password)
|
||||
|
||||
$db_host = pick($glance_hash['db_host'], $database_vip)
|
||||
$db_create = pick($glance_hash['db_create'], $mysql_db_create)
|
||||
$db_root_user = pick($glance_hash['root_user'], $mysql_root_user)
|
||||
$db_root_password = pick($glance_hash['root_password'], $mysql_root_password)
|
||||
|
||||
$allowed_hosts = [ hiera('node_name'), 'localhost', '127.0.0.1', '%' ]
|
||||
|
||||
validate_string($mysql_root_user)
|
||||
validate_string($database_vip)
|
||||
|
||||
|
||||
if $db_create {
|
||||
class { 'galera::client':
|
||||
custom_setup_class => hiera('mysql_custom_setup_class', 'galera'),
|
||||
}
|
||||
|
||||
class { 'glance::db::mysql':
|
||||
user => $db_user,
|
||||
password => $db_password,
|
||||
dbname => $db_name,
|
||||
allowed_hosts => $allowed_hosts,
|
||||
}
|
||||
|
||||
class { 'osnailyfacter::mysql_access':
|
||||
db_host => $db_host,
|
||||
db_user => $db_root_user,
|
||||
db_password => $db_root_password,
|
||||
}
|
||||
|
||||
Class['galera::client'] ->
|
||||
Class['osnailyfacter::mysql_access'] ->
|
||||
Class['glance::db::mysql']
|
||||
}
|
||||
|
||||
class mysql::config {}
|
||||
include mysql::config
|
||||
class mysql::server {}
|
||||
include mysql::server
|
22
f2s/resources/glance-db/meta.yaml
Normal file
22
f2s/resources/glance-db/meta.yaml
Normal file
@ -0,0 +1,22 @@
|
||||
id: glance-db
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
database_vip:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
glance:
|
||||
value: null
|
||||
management_vip:
|
||||
value: null
|
||||
mysql:
|
||||
value: null
|
||||
mysql_custom_setup_class:
|
||||
value: null
|
||||
node_name:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
42
f2s/resources/glance-keystone/actions/run.pp
Normal file
42
f2s/resources/glance-keystone/actions/run.pp
Normal file
@ -0,0 +1,42 @@
|
||||
notice('MODULAR: glance/keystone.pp')
|
||||
|
||||
$glance_hash = hiera_hash('glance', {})
|
||||
$public_vip = hiera('public_vip')
|
||||
$public_ssl_hash = hiera('public_ssl')
|
||||
$admin_address = hiera('management_vip')
|
||||
$region = pick($glance_hash['region'], hiera('region', 'RegionOne'))
|
||||
$password = $glance_hash['user_password']
|
||||
$auth_name = pick($glance_hash['auth_name'], 'glance')
|
||||
$configure_endpoint = pick($glance_hash['configure_endpoint'], true)
|
||||
$configure_user = pick($glance_hash['configure_user'], true)
|
||||
$configure_user_role = pick($glance_hash['configure_user_role'], true)
|
||||
$service_name = pick($glance_hash['service_name'], 'glance')
|
||||
$tenant = pick($glance_hash['tenant'], 'services')
|
||||
|
||||
$public_address = $public_ssl_hash['services'] ? {
|
||||
true => $public_ssl_hash['hostname'],
|
||||
default => $public_vip,
|
||||
}
|
||||
$public_protocol = $public_ssl_hash['services'] ? {
|
||||
true => 'https',
|
||||
default => 'http',
|
||||
}
|
||||
|
||||
$public_url = "${public_protocol}://${public_address}:9292"
|
||||
$admin_url = "http://${admin_address}:9292"
|
||||
|
||||
validate_string($public_address)
|
||||
validate_string($password)
|
||||
|
||||
class { '::glance::keystone::auth':
|
||||
password => $password,
|
||||
auth_name => $auth_name,
|
||||
configure_endpoint => $configure_endpoint,
|
||||
configure_user => $configure_user,
|
||||
configure_user_role => $configure_user_role,
|
||||
service_name => $service_name,
|
||||
public_url => $public_url,
|
||||
admin_url => $admin_url,
|
||||
internal_url => $admin_url,
|
||||
region => $region,
|
||||
}
|
20
f2s/resources/glance-keystone/meta.yaml
Normal file
20
f2s/resources/glance-keystone/meta.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
id: glance-keystone
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
glance:
|
||||
value: null
|
||||
management_vip:
|
||||
value: null
|
||||
public_ssl:
|
||||
value: null
|
||||
public_vip:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
region:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
128
f2s/resources/glance/actions/run.pp
Normal file
128
f2s/resources/glance/actions/run.pp
Normal file
@ -0,0 +1,128 @@
|
||||
notice('MODULAR: glance.pp')
|
||||
|
||||
$network_scheme = hiera_hash('network_scheme', {})
|
||||
$network_metadata = hiera_hash('network_metadata', {})
|
||||
prepare_network_config($network_scheme)
|
||||
|
||||
$glance_hash = hiera_hash('glance', {})
|
||||
$verbose = pick($glance_hash['verbose'], hiera('verbose', true))
|
||||
$debug = pick($glance_hash['debug'], hiera('debug', false))
|
||||
$management_vip = hiera('management_vip')
|
||||
$database_vip = hiera('database_vip')
|
||||
$service_endpoint = hiera('service_endpoint')
|
||||
$storage_hash = hiera('storage')
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$use_stderr = hiera('use_stderr', false)
|
||||
$syslog_log_facility = hiera('syslog_log_facility_glance')
|
||||
$rabbit_hash = hiera_hash('rabbit_hash', {})
|
||||
$max_pool_size = hiera('max_pool_size')
|
||||
$max_overflow = hiera('max_overflow')
|
||||
$ceilometer_hash = hiera_hash('ceilometer', {})
|
||||
$region = hiera('region','RegionOne')
|
||||
$glance_endpoint = $management_vip
|
||||
$service_workers = pick($glance_hash['glance_workers'], min(max($::processorcount, 2), 16))
|
||||
|
||||
$db_type = 'mysql'
|
||||
$db_host = pick($glance_hash['db_host'], $database_vip)
|
||||
$api_bind_address = get_network_role_property('glance/api', 'ipaddr')
|
||||
$enabled = true
|
||||
$max_retries = '-1'
|
||||
$idle_timeout = '3600'
|
||||
$auth_uri = "http://${service_endpoint}:5000/"
|
||||
|
||||
$rabbit_password = $rabbit_hash['password']
|
||||
$rabbit_user = $rabbit_hash['user']
|
||||
$rabbit_hosts = split(hiera('amqp_hosts',''), ',')
|
||||
$rabbit_virtual_host = '/'
|
||||
|
||||
$glance_db_user = pick($glance_hash['db_user'], 'glance')
|
||||
$glance_db_dbname = pick($glance_hash['db_name'], 'glance')
|
||||
$glance_db_password = $glance_hash['db_password']
|
||||
$glance_user = pick($glance_hash['user'],'glance')
|
||||
$glance_user_password = $glance_hash['user_password']
|
||||
$glance_tenant = pick($glance_hash['tenant'],'services')
|
||||
$glance_vcenter_host = $glance_hash['vc_host']
|
||||
$glance_vcenter_user = $glance_hash['vc_user']
|
||||
$glance_vcenter_password = $glance_hash['vc_password']
|
||||
$glance_vcenter_datacenter = $glance_hash['vc_datacenter']
|
||||
$glance_vcenter_datastore = $glance_hash['vc_datastore']
|
||||
$glance_vcenter_image_dir = $glance_hash['vc_image_dir']
|
||||
$glance_vcenter_api_retry_count = '20'
|
||||
$glance_image_cache_max_size = $glance_hash['image_cache_max_size']
|
||||
$glance_pipeline = pick($glance_hash['pipeline'], 'keystone')
|
||||
$glance_large_object_size = pick($glance_hash['large_object_size'], '5120')
|
||||
|
||||
$rados_connect_timeout = '30'
|
||||
|
||||
if ($storage_hash['images_ceph']) {
|
||||
$glance_backend = 'ceph'
|
||||
$glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ]
|
||||
$glance_show_image_direct_url = pick($glance_hash['show_image_direct_url'], true)
|
||||
} elsif ($storage_hash['images_vcenter']) {
|
||||
$glance_backend = 'vmware'
|
||||
$glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ]
|
||||
$glance_show_image_direct_url = pick($glance_hash['show_image_direct_url'], true)
|
||||
} else {
|
||||
$glance_backend = 'swift'
|
||||
$glance_known_stores = [ 'glance.store.swift.Store', 'glance.store.http.Store' ]
|
||||
$swift_store_large_object_size = $glance_large_object_size
|
||||
$glance_show_image_direct_url = pick($glance_hash['show_image_direct_url'], false)
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
|
||||
class { 'openstack::glance':
|
||||
verbose => $verbose,
|
||||
debug => $debug,
|
||||
db_type => $db_type,
|
||||
db_host => $db_host,
|
||||
glance_db_user => $glance_db_user,
|
||||
glance_db_dbname => $glance_db_dbname,
|
||||
glance_db_password => $glance_db_password,
|
||||
glance_user => $glance_user,
|
||||
glance_user_password => $glance_user_password,
|
||||
glance_tenant => $glance_tenant,
|
||||
glance_vcenter_host => $glance_vcenter_host,
|
||||
glance_vcenter_user => $glance_vcenter_user,
|
||||
glance_vcenter_password => $glance_vcenter_password,
|
||||
glance_vcenter_datacenter => $glance_vcenter_datacenter,
|
||||
glance_vcenter_datastore => $glance_vcenter_datastore,
|
||||
glance_vcenter_image_dir => $glance_vcenter_image_dir,
|
||||
glance_vcenter_api_retry_count => $glance_vcenter_api_retry_count,
|
||||
auth_uri => $auth_uri,
|
||||
keystone_host => $service_endpoint,
|
||||
region => $region,
|
||||
bind_host => $api_bind_address,
|
||||
enabled => $enabled,
|
||||
glance_backend => $glance_backend,
|
||||
registry_host => $glance_endpoint,
|
||||
use_syslog => $use_syslog,
|
||||
use_stderr => $use_stderr,
|
||||
show_image_direct_url => $glance_show_image_direct_url,
|
||||
swift_store_large_object_size => $swift_store_large_object_size,
|
||||
pipeline => $glance_pipeline,
|
||||
syslog_log_facility => $syslog_log_facility,
|
||||
glance_image_cache_max_size => $glance_image_cache_max_size,
|
||||
max_retries => $max_retries,
|
||||
max_pool_size => $max_pool_size,
|
||||
max_overflow => $max_overflow,
|
||||
idle_timeout => $idle_timeout,
|
||||
rabbit_password => $rabbit_password,
|
||||
rabbit_userid => $rabbit_user,
|
||||
rabbit_hosts => $rabbit_hosts,
|
||||
rabbit_virtual_host => $rabbit_virtual_host,
|
||||
known_stores => $glance_known_stores,
|
||||
ceilometer => $ceilometer_hash[enabled],
|
||||
service_workers => $service_workers,
|
||||
rados_connect_timeout => $rados_connect_timeout,
|
||||
}
|
||||
|
||||
####### Disable upstart startup on install #######
|
||||
if($::operatingsystem == 'Ubuntu') {
|
||||
tweaks::ubuntu_service_override { 'glance-api':
|
||||
package_name => 'glance-api',
|
||||
}
|
||||
tweaks::ubuntu_service_override { 'glance-registry':
|
||||
package_name => 'glance-registry',
|
||||
}
|
||||
}
|
46
f2s/resources/glance/meta.yaml
Normal file
46
f2s/resources/glance/meta.yaml
Normal file
@ -0,0 +1,46 @@
|
||||
id: glance
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
amqp_hosts:
|
||||
value: null
|
||||
ceilometer:
|
||||
value: null
|
||||
database_vip:
|
||||
value: null
|
||||
debug:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
glance:
|
||||
value: null
|
||||
management_vip:
|
||||
value: null
|
||||
max_overflow:
|
||||
value: null
|
||||
max_pool_size:
|
||||
value: null
|
||||
network_metadata:
|
||||
value: null
|
||||
network_scheme:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
rabbit_hash:
|
||||
value: null
|
||||
region:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
||||
service_endpoint:
|
||||
value: null
|
||||
storage:
|
||||
value: null
|
||||
syslog_log_facility_glance:
|
||||
value: null
|
||||
use_stderr:
|
||||
value: null
|
||||
use_syslog:
|
||||
value: null
|
||||
verbose:
|
||||
value: null
|
293
f2s/resources/globals/actions/run.pp
Normal file
293
f2s/resources/globals/actions/run.pp
Normal file
@ -0,0 +1,293 @@
|
||||
notice('MODULAR: globals.pp')
|
||||
|
||||
$service_token_off = false
|
||||
$globals_yaml_file = '/etc/hiera/globals.yaml'
|
||||
|
||||
# remove cached globals values before anything else
|
||||
remove_file($globals_yaml_file)
|
||||
|
||||
$network_scheme = hiera_hash('network_scheme', {})
|
||||
if empty($network_scheme) {
|
||||
fail("Network_scheme not given in the astute.yaml")
|
||||
}
|
||||
$network_metadata = hiera_hash('network_metadata', {})
|
||||
if empty($network_metadata) {
|
||||
fail("Network_metadata not given in the astute.yaml")
|
||||
}
|
||||
|
||||
$node_name = regsubst(hiera('fqdn', $::hostname), '\..*$', '')
|
||||
$node = $network_metadata['nodes'][$node_name]
|
||||
if empty($node) {
|
||||
fail("Node hostname is not defined in the astute.yaml")
|
||||
}
|
||||
|
||||
prepare_network_config($network_scheme)
|
||||
|
||||
# DEPRICATED
|
||||
$nodes_hash = hiera('nodes', {})
|
||||
|
||||
$deployment_mode = hiera('deployment_mode', 'ha_compact')
|
||||
$roles = $node['node_roles']
|
||||
$storage_hash = hiera('storage', {})
|
||||
$syslog_hash = hiera('syslog', {})
|
||||
$base_syslog_hash = hiera('base_syslog', {})
|
||||
$sahara_hash = hiera('sahara', {})
|
||||
$murano_hash = hiera('murano', {})
|
||||
$heat_hash = hiera_hash('heat', {})
|
||||
$vcenter_hash = hiera('vcenter', {})
|
||||
$nova_hash = hiera_hash('nova', {})
|
||||
$mysql_hash = hiera('mysql', {})
|
||||
$rabbit_hash = hiera_hash('rabbit', {})
|
||||
$glance_hash = hiera_hash('glance', {})
|
||||
$swift_hash = hiera('swift', {})
|
||||
$cinder_hash = hiera_hash('cinder', {})
|
||||
$ceilometer_hash = hiera('ceilometer',{})
|
||||
$access_hash = hiera_hash('access', {})
|
||||
$mp_hash = hiera('mp', {})
|
||||
$keystone_hash = merge({'service_token_off' => $service_token_off},
|
||||
hiera_hash('keystone', {}))
|
||||
|
||||
$node_role = hiera('role')
|
||||
$dns_nameservers = hiera('dns_nameservers', [])
|
||||
$use_ceilometer = $ceilometer_hash['enabled']
|
||||
$use_neutron = hiera('quantum', false)
|
||||
$use_ovs = hiera('use_ovs', $use_neutron)
|
||||
$verbose = true
|
||||
$debug = hiera('debug', false)
|
||||
$use_monit = false
|
||||
$master_ip = hiera('master_ip')
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$syslog_log_facility_glance = hiera('syslog_log_facility_glance', 'LOG_LOCAL2')
|
||||
$syslog_log_facility_cinder = hiera('syslog_log_facility_cinder', 'LOG_LOCAL3')
|
||||
$syslog_log_facility_neutron = hiera('syslog_log_facility_neutron', 'LOG_LOCAL4')
|
||||
$syslog_log_facility_nova = hiera('syslog_log_facility_nova','LOG_LOCAL6')
|
||||
$syslog_log_facility_keystone = hiera('syslog_log_facility_keystone', 'LOG_LOCAL7')
|
||||
$syslog_log_facility_murano = hiera('syslog_log_facility_murano', 'LOG_LOCAL0')
|
||||
$syslog_log_facility_heat = hiera('syslog_log_facility_heat','LOG_LOCAL0')
|
||||
$syslog_log_facility_sahara = hiera('syslog_log_facility_sahara','LOG_LOCAL0')
|
||||
$syslog_log_facility_ceilometer = hiera('syslog_log_facility_ceilometer','LOG_LOCAL0')
|
||||
$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0')
|
||||
|
||||
$nova_report_interval = hiera('nova_report_interval', 60)
|
||||
$nova_service_down_time = hiera('nova_service_down_time', 180)
|
||||
$apache_ports = hiera_array('apache_ports', ['80', '8888', '5000', '35357'])
|
||||
|
||||
$openstack_version = hiera('openstack_version',
|
||||
{
|
||||
'keystone' => 'installed',
|
||||
'glance' => 'installed',
|
||||
'horizon' => 'installed',
|
||||
'nova' => 'installed',
|
||||
'novncproxy' => 'installed',
|
||||
'cinder' => 'installed',
|
||||
}
|
||||
)
|
||||
|
||||
$nova_rate_limits = hiera('nova_rate_limits',
|
||||
{
|
||||
'POST' => 100000,
|
||||
'POST_SERVERS' => 100000,
|
||||
'PUT' => 1000,
|
||||
'GET' => 100000,
|
||||
'DELETE' => 100000
|
||||
}
|
||||
)
|
||||
|
||||
$cinder_rate_limits = hiera('cinder_rate_limits',
|
||||
{
|
||||
'POST' => 100000,
|
||||
'POST_SERVERS' => 100000,
|
||||
'PUT' => 100000,
|
||||
'GET' => 100000,
|
||||
'DELETE' => 100000
|
||||
}
|
||||
)
|
||||
|
||||
$default_gateway = get_default_gateways()
|
||||
$public_vip = $network_metadata['vips']['public']['ipaddr']
|
||||
$management_vip = $network_metadata['vips']['management']['ipaddr']
|
||||
$public_vrouter_vip = $network_metadata['vips']['vrouter_pub']['ipaddr']
|
||||
$management_vrouter_vip = $network_metadata['vips']['vrouter']['ipaddr']
|
||||
|
||||
$database_vip = is_hash($network_metadata['vips']['database']) ? {
|
||||
true => pick($network_metadata['vips']['database']['ipaddr'], $management_vip),
|
||||
default => $management_vip
|
||||
}
|
||||
$service_endpoint = is_hash($network_metadata['vips']['service_endpoint']) ? {
|
||||
true => pick($network_metadata['vips']['service_endpoint']['ipaddr'], $management_vip),
|
||||
default => $management_vip
|
||||
}
|
||||
|
||||
if $use_neutron {
|
||||
$novanetwork_params = {}
|
||||
$neutron_config = hiera_hash('quantum_settings')
|
||||
$network_provider = 'neutron'
|
||||
$neutron_db_password = $neutron_config['database']['passwd']
|
||||
$neutron_user_password = $neutron_config['keystone']['admin_password']
|
||||
$neutron_metadata_proxy_secret = $neutron_config['metadata']['metadata_proxy_shared_secret']
|
||||
$base_mac = $neutron_config['L2']['base_mac']
|
||||
$management_network_range = get_network_role_property('mgmt/vip', 'network')
|
||||
} else {
|
||||
$neutron_config = {}
|
||||
$novanetwork_params = hiera('novanetwork_parameters')
|
||||
$network_size = $novanetwork_params['network_size']
|
||||
$num_networks = $novanetwork_params['num_networks']
|
||||
$network_provider = 'nova'
|
||||
if ( $novanetwork_params['network_manager'] == 'FlatDHCPManager') {
|
||||
$private_int = get_network_role_property('novanetwork/fixed', 'interface')
|
||||
} else {
|
||||
$private_int = get_network_role_property('novanetwork/vlan', 'interface')
|
||||
$vlan_start = $novanetwork_params['vlan_start']
|
||||
$network_config = {
|
||||
'vlan_start' => $vlan_start,
|
||||
}
|
||||
}
|
||||
$network_manager = "nova.network.manager.${novanetwork_params['network_manager']}"
|
||||
$management_network_range = hiera('management_network_range')
|
||||
}
|
||||
|
||||
if $node_role == 'primary-controller' {
|
||||
$primary_controller = true
|
||||
} else {
|
||||
$primary_controller = false
|
||||
}
|
||||
|
||||
$controllers_hash = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
|
||||
$mountpoints = filter_hash($mp_hash,'point')
|
||||
|
||||
# AMQP configuration
|
||||
$queue_provider = hiera('queue_provider','rabbitmq')
|
||||
$rabbit_ha_queues = true
|
||||
|
||||
if !$rabbit_hash['user'] {
|
||||
$rabbit_hash['user'] = 'nova'
|
||||
}
|
||||
|
||||
$amqp_port = hiera('amqp_ports', '5673')
|
||||
if hiera('amqp_hosts', false) {
|
||||
# using pre-defined in astute.yaml RabbitMQ servers
|
||||
$amqp_hosts = hiera('amqp_hosts')
|
||||
} else {
|
||||
# using RabbitMQ servers on controllers
|
||||
# todo(sv): switch from 'controller' nodes to 'rmq' nodes as soon as it was implemented as additional node-role
|
||||
$controllers_with_amqp_server = get_node_to_ipaddr_map_by_network_role($controllers_hash, 'mgmt/messaging')
|
||||
$amqp_nodes = ipsort(values($controllers_with_amqp_server))
|
||||
# amqp_hosts() randomize order of RMQ endpoints and put local one first
|
||||
$amqp_hosts = amqp_hosts($amqp_nodes, $amqp_port, get_network_role_property('mgmt/messaging', 'ipaddr'))
|
||||
}
|
||||
|
||||
# MySQL and SQLAlchemy backend configuration
|
||||
$custom_mysql_setup_class = hiera('custom_mysql_setup_class', 'galera')
|
||||
$max_pool_size = hiera('max_pool_size', min($::processorcount * 5 + 0, 30 + 0))
|
||||
$max_overflow = hiera('max_overflow', min($::processorcount * 5 + 0, 60 + 0))
|
||||
$max_retries = hiera('max_retries', '-1')
|
||||
$idle_timeout = hiera('idle_timeout','3600')
|
||||
$nova_db_password = $nova_hash['db_password']
|
||||
$sql_connection = "mysql://nova:${nova_db_password}@${database_vip}/nova?read_timeout = 6 0"
|
||||
$mirror_type = hiera('mirror_type', 'external')
|
||||
$multi_host = hiera('multi_host', true)
|
||||
|
||||
# Determine who should get the volume service
|
||||
if (member($roles, 'cinder') and $storage_hash['volumes_lvm']) {
|
||||
$manage_volumes = 'iscsi'
|
||||
} elsif (member($roles, 'cinder') and $storage_hash['volumes_vmdk']) {
|
||||
$manage_volumes = 'vmdk'
|
||||
} elsif ($storage_hash['volumes_ceph']) {
|
||||
$manage_volumes = 'ceph'
|
||||
} else {
|
||||
$manage_volumes = false
|
||||
}
|
||||
|
||||
# Define ceph-related variables
|
||||
$ceph_primary_monitor_node = get_nodes_hash_by_roles($network_metadata, ['primary-controller'])
|
||||
$ceph_monitor_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
|
||||
$ceph_rgw_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
|
||||
|
||||
#Determine who should be the default backend
|
||||
if ($storage_hash['images_ceph']) {
|
||||
$glance_backend = 'ceph'
|
||||
$glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ]
|
||||
} elsif ($storage_hash['images_vcenter']) {
|
||||
$glance_backend = 'vmware'
|
||||
$glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ]
|
||||
} else {
|
||||
$glance_backend = 'file'
|
||||
$glance_known_stores = false
|
||||
}
|
||||
|
||||
# Define ceilometer-related variables:
|
||||
# todo: use special node-roles instead controllers in the future
|
||||
$ceilometer_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
|
||||
|
||||
# Define memcached-related variables:
|
||||
$memcache_roles = hiera('memcache_roles', ['primary-controller', 'controller'])
|
||||
|
||||
# Define node roles, that will carry corosync/pacemaker
|
||||
$corosync_roles = hiera('corosync_roles', ['primary-controller', 'controller'])
|
||||
|
||||
# Define cinder-related variables
|
||||
# todo: use special node-roles instead controllers in the future
|
||||
$cinder_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
|
||||
|
||||
# Define horizon-related variables:
|
||||
# todo: use special node-roles instead controllers in the future
|
||||
$horizon_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
|
||||
|
||||
# Define swift-related variables
|
||||
# todo(sv): use special node-roles instead controllers in the future
|
||||
$swift_master_role = 'primary-controller'
|
||||
$swift_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
|
||||
$swift_proxies = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
|
||||
$swift_proxy_caches = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) # memcache for swift
|
||||
$is_primary_swift_proxy = $primary_controller
|
||||
|
||||
# Define murano-related variables
|
||||
$murano_roles = ['primary-controller', 'controller']
|
||||
|
||||
# Define heat-related variables:
|
||||
$heat_roles = ['primary-controller', 'controller']
|
||||
|
||||
# Define sahara-related variable
|
||||
$sahara_roles = ['primary-controller', 'controller']
|
||||
|
||||
# Define ceilometer-releated parameters
|
||||
if !$ceilometer_hash['event_time_to_live'] { $ceilometer_hash['event_time_to_live'] = '604800'}
|
||||
if !$ceilometer_hash['metering_time_to_live'] { $ceilometer_hash['metering_time_to_live'] = '604800' }
|
||||
if !$ceilometer_hash['http_timeout'] { $ceilometer_hash['http_timeout'] = '600' }
|
||||
|
||||
# Define database-related variables:
|
||||
# todo: use special node-roles instead controllers in the future
|
||||
$database_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
|
||||
|
||||
# Define Nova-API variables:
|
||||
# todo: use special node-roles instead controllers in the future
|
||||
$nova_api_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
|
||||
|
||||
# Define mongo-related variables
|
||||
$mongo_roles = ['primary-mongo', 'mongo']
|
||||
|
||||
# Define neutron-related variables:
|
||||
# todo: use special node-roles instead controllers in the future
|
||||
$neutron_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller'])
|
||||
|
||||
#Define Ironic-related variables:
|
||||
$ironic_api_nodes = $controllers_hash
|
||||
|
||||
# Change nova_hash to add vnc port to it
|
||||
# TODO(sbog): change this when we will get rid of global hashes
|
||||
$public_ssl_hash = hiera('public_ssl')
|
||||
if $public_ssl_hash['services'] {
|
||||
$nova_hash['vncproxy_protocol'] = 'https'
|
||||
} else {
|
||||
$nova_hash['vncproxy_protocol'] = 'http'
|
||||
}
|
||||
|
||||
# save all these global variables into hiera yaml file for later use
|
||||
# by other manifests with hiera function
|
||||
file { $globals_yaml_file :
|
||||
ensure => 'present',
|
||||
mode => '0644',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('osnailyfacter/globals_yaml.erb')
|
||||
}
|
124
f2s/resources/globals/meta.yaml
Normal file
124
f2s/resources/globals/meta.yaml
Normal file
@ -0,0 +1,124 @@
|
||||
id: globals
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
access:
|
||||
value: null
|
||||
amqp_hosts:
|
||||
value: null
|
||||
amqp_ports:
|
||||
value: null
|
||||
apache_ports:
|
||||
value: null
|
||||
base_syslog:
|
||||
value: null
|
||||
ceilometer:
|
||||
value: null
|
||||
cinder:
|
||||
value: null
|
||||
cinder_rate_limits:
|
||||
value: null
|
||||
corosync_roles:
|
||||
value: null
|
||||
custom_mysql_setup_class:
|
||||
value: null
|
||||
debug:
|
||||
value: null
|
||||
deployment_mode:
|
||||
value: null
|
||||
dns_nameservers:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
glance:
|
||||
value: null
|
||||
heat:
|
||||
value: null
|
||||
idle_timeout:
|
||||
value: null
|
||||
keystone:
|
||||
value: null
|
||||
master_ip:
|
||||
value: null
|
||||
max_overflow:
|
||||
value: null
|
||||
max_pool_size:
|
||||
value: null
|
||||
max_retries:
|
||||
value: null
|
||||
memcache_roles:
|
||||
value: null
|
||||
mirror_type:
|
||||
value: null
|
||||
mp:
|
||||
value: null
|
||||
multi_host:
|
||||
value: null
|
||||
murano:
|
||||
value: null
|
||||
mysql:
|
||||
value: null
|
||||
network_metadata:
|
||||
value: null
|
||||
network_scheme:
|
||||
value: null
|
||||
nodes:
|
||||
value: null
|
||||
nova:
|
||||
value: null
|
||||
nova_rate_limits:
|
||||
value: null
|
||||
nova_report_interval:
|
||||
value: null
|
||||
nova_service_down_time:
|
||||
value: null
|
||||
openstack_version:
|
||||
value: null
|
||||
public_ssl:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
quantum:
|
||||
value: null
|
||||
quantum_settings:
|
||||
value: null
|
||||
queue_provider:
|
||||
value: null
|
||||
rabbit:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
||||
sahara:
|
||||
value: null
|
||||
storage:
|
||||
value: null
|
||||
swift:
|
||||
value: null
|
||||
syslog:
|
||||
value: null
|
||||
syslog_log_facility_ceilometer:
|
||||
value: null
|
||||
syslog_log_facility_ceph:
|
||||
value: null
|
||||
syslog_log_facility_cinder:
|
||||
value: null
|
||||
syslog_log_facility_glance:
|
||||
value: null
|
||||
syslog_log_facility_heat:
|
||||
value: null
|
||||
syslog_log_facility_keystone:
|
||||
value: null
|
||||
syslog_log_facility_murano:
|
||||
value: null
|
||||
syslog_log_facility_neutron:
|
||||
value: null
|
||||
syslog_log_facility_nova:
|
||||
value: null
|
||||
syslog_log_facility_sahara:
|
||||
value: null
|
||||
use_ovs:
|
||||
value: null
|
||||
use_syslog:
|
||||
value: null
|
||||
vcenter:
|
||||
value: null
|
53
f2s/resources/heat-db/actions/run.pp
Normal file
53
f2s/resources/heat-db/actions/run.pp
Normal file
@ -0,0 +1,53 @@
|
||||
notice('MODULAR: heat/db.pp')
|
||||
|
||||
$heat_hash = hiera_hash('heat', {})
|
||||
$mysql_hash = hiera_hash('mysql', {})
|
||||
$management_vip = hiera('management_vip', undef)
|
||||
$database_vip = hiera('database_vip', undef)
|
||||
|
||||
$mysql_root_user = pick($mysql_hash['root_user'], 'root')
|
||||
$mysql_db_create = pick($mysql_hash['db_create'], true)
|
||||
$mysql_root_password = $mysql_hash['root_password']
|
||||
|
||||
$db_user = pick($heat_hash['db_user'], 'heat')
|
||||
$db_name = pick($heat_hash['db_name'], 'heat')
|
||||
$db_password = pick($heat_hash['db_password'], $mysql_root_password)
|
||||
|
||||
$db_host = pick($heat_hash['db_host'], $database_vip)
|
||||
$db_create = pick($heat_hash['db_create'], $mysql_db_create)
|
||||
$db_root_user = pick($heat_hash['root_user'], $mysql_root_user)
|
||||
$db_root_password = pick($heat_hash['root_password'], $mysql_root_password)
|
||||
|
||||
$allowed_hosts = [ $::hostname, 'localhost', '127.0.0.1', '%' ]
|
||||
|
||||
validate_string($mysql_root_user)
|
||||
|
||||
if $db_create {
|
||||
|
||||
class { 'galera::client':
|
||||
custom_setup_class => hiera('mysql_custom_setup_class', 'galera'),
|
||||
}
|
||||
|
||||
class { 'heat::db::mysql':
|
||||
user => $db_user,
|
||||
password => $db_password,
|
||||
dbname => $db_name,
|
||||
allowed_hosts => $allowed_hosts,
|
||||
}
|
||||
|
||||
class { 'osnailyfacter::mysql_access':
|
||||
db_host => $db_host,
|
||||
db_user => $db_root_user,
|
||||
db_password => $db_root_password,
|
||||
}
|
||||
|
||||
Class['galera::client'] ->
|
||||
Class['osnailyfacter::mysql_access'] ->
|
||||
Class['heat::db::mysql']
|
||||
|
||||
}
|
||||
|
||||
class mysql::config {}
|
||||
include mysql::config
|
||||
class mysql::server {}
|
||||
include mysql::server
|
20
f2s/resources/heat-db/meta.yaml
Normal file
20
f2s/resources/heat-db/meta.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
id: heat-db
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
database_vip:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
heat:
|
||||
value: null
|
||||
management_vip:
|
||||
value: null
|
||||
mysql:
|
||||
value: null
|
||||
mysql_custom_setup_class:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
59
f2s/resources/heat-keystone/actions/run.pp
Normal file
59
f2s/resources/heat-keystone/actions/run.pp
Normal file
@ -0,0 +1,59 @@
|
||||
notice('MODULAR: heat/keystone.pp')
|
||||
|
||||
$heat_hash = hiera_hash('heat', {})
|
||||
$public_vip = hiera('public_vip')
|
||||
$admin_address = hiera('management_vip')
|
||||
$region = pick($heat_hash['region'], hiera('region', 'RegionOne'))
|
||||
$public_ssl_hash = hiera('public_ssl')
|
||||
$public_address = $public_ssl_hash['services'] ? {
|
||||
true => $public_ssl_hash['hostname'],
|
||||
default => $public_vip,
|
||||
}
|
||||
$public_protocol = $public_ssl_hash['services'] ? {
|
||||
true => 'https',
|
||||
default => 'http',
|
||||
}
|
||||
|
||||
$password = $heat_hash['user_password']
|
||||
$auth_name = pick($heat_hash['auth_name'], 'heat')
|
||||
$configure_endpoint = pick($heat_hash['configure_endpoint'], true)
|
||||
$configure_user = pick($heat_hash['configure_user'], true)
|
||||
$configure_user_role = pick($heat_hash['configure_user_role'], true)
|
||||
$service_name = pick($heat_hash['service_name'], 'heat')
|
||||
$tenant = pick($heat_hash['tenant'], 'services')
|
||||
|
||||
validate_string($public_address)
|
||||
validate_string($password)
|
||||
|
||||
$public_url = "${public_protocol}://${public_address}:8004/v1/%(tenant_id)s"
|
||||
$admin_url = "http://${admin_address}:8004/v1/%(tenant_id)s"
|
||||
$public_url_cfn = "${public_protocol}://${public_address}:8000/v1"
|
||||
$admin_url_cfn = "http://${admin_address}:8000/v1"
|
||||
|
||||
|
||||
|
||||
class { '::heat::keystone::auth' :
|
||||
password => $password,
|
||||
auth_name => $auth_name,
|
||||
region => $region,
|
||||
tenant => $keystone_tenant,
|
||||
email => "${auth_name}@localhost",
|
||||
configure_endpoint => true,
|
||||
trusts_delegated_roles => $trusts_delegated_roles,
|
||||
public_url => $public_url,
|
||||
internal_url => $admin_url,
|
||||
admin_url => $admin_url,
|
||||
}
|
||||
|
||||
class { '::heat::keystone::auth_cfn' :
|
||||
password => $password,
|
||||
auth_name => "${auth_name}-cfn",
|
||||
service_type => 'cloudformation',
|
||||
region => $region,
|
||||
tenant => $keystone_tenant,
|
||||
email => "${auth_name}-cfn@localhost",
|
||||
configure_endpoint => true,
|
||||
public_url => $public_url_cfn,
|
||||
internal_url => $admin_url_cfn,
|
||||
admin_url => $admin_url_cfn,
|
||||
}
|
20
f2s/resources/heat-keystone/meta.yaml
Normal file
20
f2s/resources/heat-keystone/meta.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
id: heat-keystone
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
heat:
|
||||
value: null
|
||||
management_vip:
|
||||
value: null
|
||||
public_ssl:
|
||||
value: null
|
||||
public_vip:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
region:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
156
f2s/resources/heat/actions/run.pp
Normal file
156
f2s/resources/heat/actions/run.pp
Normal file
@ -0,0 +1,156 @@
|
||||
notice('MODULAR: heat.pp')
|
||||
|
||||
prepare_network_config(hiera('network_scheme', {}))
|
||||
$management_vip = hiera('management_vip')
|
||||
$heat_hash = hiera_hash('heat', {})
|
||||
$rabbit_hash = hiera_hash('rabbit_hash', {})
|
||||
$max_retries = hiera('max_retries')
|
||||
$max_pool_size = hiera('max_pool_size')
|
||||
$max_overflow = hiera('max_overflow')
|
||||
$idle_timeout = hiera('idle_timeout')
|
||||
$service_endpoint = hiera('service_endpoint')
|
||||
$debug = pick($heat_hash['debug'], hiera('debug', false))
|
||||
$verbose = pick($heat_hash['verbose'], hiera('verbose', true))
|
||||
$use_stderr = hiera('use_stderr', false)
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$syslog_log_facility_heat = hiera('syslog_log_facility_heat')
|
||||
$deployment_mode = hiera('deployment_mode')
|
||||
$bind_address = get_network_role_property('heat/api', 'ipaddr')
|
||||
$database_password = $heat_hash['db_password']
|
||||
$keystone_user = pick($heat_hash['user'], 'heat')
|
||||
$keystone_tenant = pick($heat_hash['tenant'], 'services')
|
||||
$db_host = pick($heat_hash['db_host'], hiera('database_vip'))
|
||||
$database_user = pick($heat_hash['db_user'], 'heat')
|
||||
$database_name = hiera('heat_db_name', 'heat')
|
||||
$read_timeout = '60'
|
||||
$sql_connection = "mysql://${database_user}:${database_password}@${db_host}/${database_name}?read_timeout=${read_timeout}"
|
||||
$region = hiera('region', 'RegionOne')
|
||||
$public_ssl_hash = hiera('public_ssl')
|
||||
|
||||
####### Disable upstart startup on install #######
|
||||
if $::operatingsystem == 'Ubuntu' {
|
||||
tweaks::ubuntu_service_override { 'heat-api-cloudwatch':
|
||||
package_name => 'heat-api-cloudwatch',
|
||||
}
|
||||
tweaks::ubuntu_service_override { 'heat-api-cfn':
|
||||
package_name => 'heat-api-cfn',
|
||||
}
|
||||
tweaks::ubuntu_service_override { 'heat-api':
|
||||
package_name => 'heat-api',
|
||||
}
|
||||
tweaks::ubuntu_service_override { 'heat-engine':
|
||||
package_name => 'heat-engine',
|
||||
}
|
||||
|
||||
Tweaks::Ubuntu_service_override['heat-api'] -> Service['heat-api']
|
||||
Tweaks::Ubuntu_service_override['heat-api-cfn'] -> Service['heat-api-cfn']
|
||||
Tweaks::Ubuntu_service_override['heat-api-cloudwatch'] -> Service['heat-api-cloudwatch']
|
||||
Tweaks::Ubuntu_service_override['heat-engine'] -> Service['heat-engine']
|
||||
}
|
||||
|
||||
class { 'openstack::heat' :
|
||||
external_ip => $management_vip,
|
||||
keystone_auth => pick($heat_hash['keystone_auth'], true),
|
||||
api_bind_host => $bind_address,
|
||||
api_cfn_bind_host => $bind_address,
|
||||
api_cloudwatch_bind_host => $bind_address,
|
||||
keystone_host => $service_endpoint,
|
||||
keystone_user => $keystone_user,
|
||||
keystone_password => $heat_hash['user_password'],
|
||||
keystone_tenant => $keystone_tenant,
|
||||
keystone_ec2_uri => "http://${service_endpoint}:5000/v2.0",
|
||||
region => $region,
|
||||
public_ssl => $public_ssl_hash['services'],
|
||||
rpc_backend => 'rabbit',
|
||||
amqp_hosts => split(hiera('amqp_hosts',''), ','),
|
||||
amqp_user => $rabbit_hash['user'],
|
||||
amqp_password => $rabbit_hash['password'],
|
||||
sql_connection => $sql_connection,
|
||||
db_host => $db_host,
|
||||
db_password => $database_password,
|
||||
max_retries => $max_retries,
|
||||
max_pool_size => $max_pool_size,
|
||||
max_overflow => $max_overflow,
|
||||
idle_timeout => $idle_timeout,
|
||||
debug => $debug,
|
||||
verbose => $verbose,
|
||||
use_syslog => $use_syslog,
|
||||
use_stderr => $use_stderr,
|
||||
syslog_log_facility => $syslog_log_facility_heat,
|
||||
auth_encryption_key => $heat_hash['auth_encryption_key'],
|
||||
}
|
||||
|
||||
if hiera('heat_ha_engine', true){
|
||||
if ($deployment_mode == 'ha') or ($deployment_mode == 'ha_compact') {
|
||||
include ::heat_ha::engine
|
||||
}
|
||||
}
|
||||
|
||||
#------------------------------
|
||||
|
||||
class heat::docker_resource (
|
||||
$enabled = true,
|
||||
$package_name = 'heat-docker',
|
||||
) {
|
||||
if $enabled {
|
||||
package { 'heat-docker':
|
||||
ensure => installed,
|
||||
name => $package_name,
|
||||
}
|
||||
|
||||
Package['heat-docker'] ~> Service<| title == 'heat-engine' |>
|
||||
}
|
||||
}
|
||||
|
||||
if $::osfamily == 'RedHat' {
|
||||
$docker_resource_package_name = 'openstack-heat-docker'
|
||||
} elsif $::osfamily == 'Debian' {
|
||||
$docker_resource_package_name = 'heat-docker'
|
||||
}
|
||||
|
||||
class { 'heat::docker_resource' :
|
||||
package_name => $docker_resource_package_name,
|
||||
}
|
||||
|
||||
$haproxy_stats_url = "http://${service_endpoint}:10000/;csv"
|
||||
|
||||
haproxy_backend_status { 'keystone-admin' :
|
||||
name => 'keystone-2',
|
||||
count => '200',
|
||||
step => '6',
|
||||
url => $haproxy_stats_url,
|
||||
}
|
||||
|
||||
class { 'heat::keystone::domain' :
|
||||
auth_url => "http://${service_endpoint}:35357/v2.0",
|
||||
keystone_admin => $keystone_user,
|
||||
keystone_password => $heat_hash['user_password'],
|
||||
keystone_tenant => $keystone_tenant,
|
||||
domain_name => 'heat',
|
||||
domain_admin => 'heat_admin',
|
||||
domain_password => $heat_hash['user_password'],
|
||||
}
|
||||
|
||||
Class['heat'] ->
|
||||
Haproxy_backend_status['keystone-admin'] ->
|
||||
Class['heat::keystone::domain'] ~>
|
||||
Service<| title == 'heat-engine' |>
|
||||
|
||||
######################
|
||||
|
||||
exec { 'wait_for_heat_config' :
|
||||
command => 'sync && sleep 3',
|
||||
provider => 'shell',
|
||||
}
|
||||
|
||||
Heat_config <||> -> Exec['wait_for_heat_config'] -> Service['heat-api']
|
||||
Heat_config <||> -> Exec['wait_for_heat_config'] -> Service['heat-api-cfn']
|
||||
Heat_config <||> -> Exec['wait_for_heat_config'] -> Service['heat-api-cloudwatch']
|
||||
Heat_config <||> -> Exec['wait_for_heat_config'] -> Service['heat-engine']
|
||||
|
||||
######################
|
||||
|
||||
class mysql::server {}
|
||||
class mysql::config {}
|
||||
include mysql::server
|
||||
include mysql::config
|
52
f2s/resources/heat/meta.yaml
Normal file
52
f2s/resources/heat/meta.yaml
Normal file
@ -0,0 +1,52 @@
|
||||
id: heat
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
amqp_hosts:
|
||||
value: null
|
||||
database_vip:
|
||||
value: null
|
||||
debug:
|
||||
value: null
|
||||
deployment_mode:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
heat:
|
||||
value: null
|
||||
heat_db_name:
|
||||
value: null
|
||||
heat_ha_engine:
|
||||
value: null
|
||||
idle_timeout:
|
||||
value: null
|
||||
management_vip:
|
||||
value: null
|
||||
max_overflow:
|
||||
value: null
|
||||
max_pool_size:
|
||||
value: null
|
||||
max_retries:
|
||||
value: null
|
||||
network_scheme:
|
||||
value: null
|
||||
public_ssl:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
rabbit_hash:
|
||||
value: null
|
||||
region:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
||||
service_endpoint:
|
||||
value: null
|
||||
syslog_log_facility_heat:
|
||||
value: null
|
||||
use_stderr:
|
||||
value: null
|
||||
use_syslog:
|
||||
value: null
|
||||
verbose:
|
||||
value: null
|
75
f2s/resources/hiera/actions/run.pp
Normal file
75
f2s/resources/hiera/actions/run.pp
Normal file
@ -0,0 +1,75 @@
|
||||
notice('MODULAR: hiera.pp')
|
||||
|
||||
$deep_merge_package_name = $::osfamily ? {
|
||||
/RedHat/ => 'rubygem-deep_merge',
|
||||
/Debian/ => 'ruby-deep-merge',
|
||||
}
|
||||
|
||||
$data_dir = '/etc/hiera'
|
||||
$data = [
|
||||
'override/node/%{::fqdn}',
|
||||
'override/class/%{calling_class}',
|
||||
'override/module/%{calling_module}',
|
||||
'override/plugins',
|
||||
'override/common',
|
||||
'class/%{calling_class}',
|
||||
'module/%{calling_module}',
|
||||
'nodes',
|
||||
'globals',
|
||||
'astute'
|
||||
]
|
||||
$astute_data_file = '/etc/astute.yaml'
|
||||
$hiera_main_config = '/etc/hiera.yaml'
|
||||
$hiera_puppet_config = '/etc/puppet/hiera.yaml'
|
||||
$hiera_data_file = "${data_dir}/astute.yaml"
|
||||
|
||||
File {
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
}
|
||||
|
||||
$hiera_config_content = inline_template('
|
||||
---
|
||||
:backends:
|
||||
- yaml
|
||||
|
||||
:hierarchy:
|
||||
<% @data.each do |name| -%>
|
||||
- <%= name %>
|
||||
<% end -%>
|
||||
|
||||
:yaml:
|
||||
:datadir: <%= @data_dir %>
|
||||
:merge_behavior: deeper
|
||||
:logger: noop
|
||||
')
|
||||
|
||||
file { 'hiera_data_dir' :
|
||||
ensure => 'directory',
|
||||
path => $data_dir,
|
||||
}
|
||||
|
||||
file { 'hiera_config' :
|
||||
ensure => 'present',
|
||||
path => $hiera_main_config,
|
||||
content => $hiera_config_content,
|
||||
}
|
||||
|
||||
file { 'hiera_data_astute' :
|
||||
ensure => 'symlink',
|
||||
path => $hiera_data_file,
|
||||
target => $astute_data_file,
|
||||
}
|
||||
|
||||
file { 'hiera_puppet_config' :
|
||||
ensure => 'symlink',
|
||||
path => $hiera_puppet_config,
|
||||
target => $hiera_main_config,
|
||||
}
|
||||
|
||||
# needed to support the 'deeper' merge_behavior setting for hiera
|
||||
package { 'rubygem-deep_merge':
|
||||
ensure => present,
|
||||
name => $deep_merge_package_name,
|
||||
}
|
8
f2s/resources/hiera/meta.yaml
Normal file
8
f2s/resources/hiera/meta.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
id: hiera
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
68
f2s/resources/horizon/actions/run.pp
Normal file
68
f2s/resources/horizon/actions/run.pp
Normal file
@ -0,0 +1,68 @@
|
||||
notice('MODULAR: horizon.pp')
|
||||
|
||||
prepare_network_config(hiera('network_scheme', {}))
|
||||
$horizon_hash = hiera_hash('horizon', {})
|
||||
$service_endpoint = hiera('service_endpoint')
|
||||
$memcache_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('memcache_roles'))
|
||||
$memcache_address_map = get_node_to_ipaddr_map_by_network_role($memcache_nodes, 'mgmt/memcache')
|
||||
$bind_address = get_network_role_property('horizon', 'ipaddr')
|
||||
$neutron_advanced_config = hiera_hash('neutron_advanced_configuration', {})
|
||||
$public_ssl = hiera('public_ssl')
|
||||
$ssl_no_verify = $public_ssl['horizon']
|
||||
|
||||
if $horizon_hash['secret_key'] {
|
||||
$secret_key = $horizon_hash['secret_key']
|
||||
} else {
|
||||
$secret_key = 'dummy_secret_key'
|
||||
}
|
||||
|
||||
$neutron_dvr = pick($neutron_advanced_config['neutron_dvr'], false)
|
||||
|
||||
$keystone_scheme = 'http'
|
||||
$keystone_host = $service_endpoint
|
||||
$keystone_port = '5000'
|
||||
$keystone_api = 'v2.0'
|
||||
$keystone_url = "${keystone_scheme}://${keystone_host}:${keystone_port}/${keystone_api}"
|
||||
|
||||
$neutron_options = {'enable_distributed_router' => $neutron_dvr}
|
||||
|
||||
class { 'openstack::horizon':
|
||||
secret_key => $secret_key,
|
||||
cache_server_ip => ipsort(values($memcache_address_map)),
|
||||
package_ensure => hiera('horizon_package_ensure', 'installed'),
|
||||
bind_address => $bind_address,
|
||||
cache_server_port => hiera('memcache_server_port', '11211'),
|
||||
cache_backend => 'django.core.cache.backends.memcached.MemcachedCache',
|
||||
cache_options => {'SOCKET_TIMEOUT' => 1,'SERVER_RETRIES' => 1,'DEAD_RETRY' => 1},
|
||||
neutron => hiera('use_neutron'),
|
||||
keystone_url => $keystone_url,
|
||||
use_ssl => hiera('horizon_use_ssl', false),
|
||||
ssl_no_verify => $ssl_no_verify,
|
||||
verbose => pick($horizon_hash['verbose'], hiera('verbose', true)),
|
||||
debug => pick($horizon_hash['debug'], hiera('debug')),
|
||||
use_syslog => hiera('use_syslog', true),
|
||||
nova_quota => hiera('nova_quota'),
|
||||
servername => hiera('public_vip'),
|
||||
neutron_options => $neutron_options,
|
||||
}
|
||||
|
||||
$haproxy_stats_url = "http://${service_endpoint}:10000/;csv"
|
||||
|
||||
haproxy_backend_status { 'keystone-admin' :
|
||||
name => 'keystone-2',
|
||||
count => '30',
|
||||
step => '3',
|
||||
url => $haproxy_stats_url,
|
||||
}
|
||||
|
||||
haproxy_backend_status { 'keystone-public' :
|
||||
name => 'keystone-1',
|
||||
count => '30',
|
||||
step => '3',
|
||||
url => $haproxy_stats_url,
|
||||
}
|
||||
|
||||
Class['openstack::horizon'] -> Haproxy_backend_status['keystone-admin']
|
||||
Class['openstack::horizon'] -> Haproxy_backend_status['keystone-public']
|
||||
|
||||
include ::tweaks::apache_wrappers
|
44
f2s/resources/horizon/meta.yaml
Normal file
44
f2s/resources/horizon/meta.yaml
Normal file
@ -0,0 +1,44 @@
|
||||
id: horizon
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
apache_ports:
|
||||
value: null
|
||||
debug:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
horizon:
|
||||
value: null
|
||||
horizon_package_ensure:
|
||||
value: null
|
||||
horizon_use_ssl:
|
||||
value: null
|
||||
memcache_roles:
|
||||
value: null
|
||||
memcache_server_port:
|
||||
value: null
|
||||
network_metadata:
|
||||
value: null
|
||||
network_scheme:
|
||||
value: null
|
||||
neutron_advanced_configuration:
|
||||
value: null
|
||||
nova_quota:
|
||||
value: null
|
||||
public_ssl:
|
||||
value: null
|
||||
public_vip:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
||||
service_endpoint:
|
||||
value: null
|
||||
use_neutron:
|
||||
value: null
|
||||
use_syslog:
|
||||
value: null
|
||||
verbose:
|
||||
value: null
|
5
f2s/resources/hosts/actions/run.pp
Normal file
5
f2s/resources/hosts/actions/run.pp
Normal file
@ -0,0 +1,5 @@
|
||||
notice('MODULAR: hosts.pp')
|
||||
|
||||
class { "l23network::hosts_file":
|
||||
nodes => hiera('nodes'),
|
||||
}
|
10
f2s/resources/hosts/meta.yaml
Normal file
10
f2s/resources/hosts/meta.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
id: hosts
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
nodes:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
61
f2s/resources/ironic-api/actions/run.pp
Normal file
61
f2s/resources/ironic-api/actions/run.pp
Normal file
@ -0,0 +1,61 @@
|
||||
notice('MODULAR: ironic/ironic.pp')
|
||||
|
||||
$ironic_hash = hiera_hash('ironic', {})
|
||||
$public_vip = hiera('public_vip')
|
||||
$management_vip = hiera('management_vip')
|
||||
|
||||
$network_metadata = hiera_hash('network_metadata', {})
|
||||
|
||||
$database_vip = hiera('database_vip')
|
||||
$keystone_endpoint = hiera('service_endpoint')
|
||||
$neutron_endpoint = hiera('neutron_endpoint', $management_vip)
|
||||
$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292")
|
||||
$debug = hiera('debug', false)
|
||||
$verbose = hiera('verbose', true)
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$syslog_log_facility_ironic = hiera('syslog_log_facility_ironic', 'LOG_USER')
|
||||
$rabbit_hash = hiera_hash('rabbit_hash', {})
|
||||
$rabbit_ha_queues = hiera('rabbit_ha_queues')
|
||||
$amqp_hosts = hiera('amqp_hosts')
|
||||
$amqp_port = hiera('amqp_port', '5673')
|
||||
$rabbit_hosts = split($amqp_hosts, ',')
|
||||
$neutron_config = hiera_hash('quantum_settings')
|
||||
|
||||
$db_host = pick($ironic_hash['db_host'], $database_vip)
|
||||
$db_user = pick($ironic_hash['db_user'], 'ironic')
|
||||
$db_name = pick($ironic_hash['db_name'], 'ironic')
|
||||
$db_password = pick($ironic_hash['db_password'], 'ironic')
|
||||
$database_connection = "mysql://${db_name}:${db_password}@${db_host}/${db_name}?charset=utf8&read_timeout=60"
|
||||
|
||||
$ironic_tenant = pick($ironic_hash['tenant'],'services')
|
||||
$ironic_user = pick($ironic_hash['auth_name'],'ironic')
|
||||
$ironic_user_password = pick($ironic_hash['user_password'],'ironic')
|
||||
|
||||
prepare_network_config(hiera('network_scheme', {}))
|
||||
|
||||
$baremetal_vip = $network_metadata['vips']['baremetal']['ipaddr']
|
||||
|
||||
class { 'ironic':
|
||||
verbose => $verbose,
|
||||
debug => $debug,
|
||||
rabbit_hosts => $rabbit_hosts,
|
||||
rabbit_port => $amqp_port,
|
||||
rabbit_userid => $rabbit_hash['user'],
|
||||
rabbit_password => $rabbit_hash['password'],
|
||||
amqp_durable_queues => $rabbit_ha_queues,
|
||||
use_syslog => $use_syslog,
|
||||
log_facility => $syslog_log_facility_ironic,
|
||||
database_connection => $database_connection,
|
||||
glance_api_servers => $glance_api_servers,
|
||||
}
|
||||
|
||||
class { 'ironic::client': }
|
||||
|
||||
class { 'ironic::api':
|
||||
host_ip => get_network_role_property('ironic/api', 'ipaddr'),
|
||||
auth_host => $keystone_endpoint,
|
||||
admin_tenant_name => $ironic_tenant,
|
||||
admin_user => $ironic_user,
|
||||
admin_password => $ironic_user_password,
|
||||
neutron_url => "http://${neutron_endpoint}:9696",
|
||||
}
|
8
f2s/resources/ironic-api/meta.yaml
Normal file
8
f2s/resources/ironic-api/meta.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
id: ironic-api
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
ironic:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
98
f2s/resources/ironic-compute/actions/run.pp
Normal file
98
f2s/resources/ironic-compute/actions/run.pp
Normal file
@ -0,0 +1,98 @@
|
||||
#####################################################################################
|
||||
### ironic-compute is additional compute role with compute_driver=ironic. ###
|
||||
### It can't be assigned with nova-compute to the same node. It doesn't include ###
|
||||
### openstack::compute class it is configured separately. ###
|
||||
#####################################################################################
|
||||
|
||||
notice('MODULAR: ironic/ironic-compute.pp')
|
||||
|
||||
$ironic_hash = hiera_hash('ironic', {})
|
||||
$nova_hash = hiera_hash('nova', {})
|
||||
$management_vip = hiera('management_vip')
|
||||
$database_vip = hiera('database_vip')
|
||||
$service_endpoint = hiera('service_endpoint')
|
||||
$neutron_endpoint = hiera('neutron_endpoint', $management_vip)
|
||||
$ironic_endpoint = hiera('ironic_endpoint', $management_vip)
|
||||
$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292")
|
||||
$debug = hiera('debug', false)
|
||||
$verbose = hiera('verbose', true)
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$syslog_log_facility_ironic = hiera('syslog_log_facility_ironic', 'LOG_LOCAL0')
|
||||
$syslog_log_facility_nova = hiera('syslog_log_facility_nova', 'LOG_LOCAL6')
|
||||
$amqp_hosts = hiera('amqp_hosts')
|
||||
$rabbit_hash = hiera_hash('rabbit_hash')
|
||||
$nova_report_interval = hiera('nova_report_interval')
|
||||
$nova_service_down_time = hiera('nova_service_down_time')
|
||||
$neutron_config = hiera_hash('quantum_settings')
|
||||
|
||||
$ironic_tenant = pick($ironic_hash['tenant'],'services')
|
||||
$ironic_user = pick($ironic_hash['auth_name'],'ironic')
|
||||
$ironic_user_password = pick($ironic_hash['user_password'],'ironic')
|
||||
|
||||
$db_host = pick($nova_hash['db_host'], $database_vip)
|
||||
$db_user = pick($nova_hash['db_user'], 'nova')
|
||||
$db_name = pick($nova_hash['db_name'], 'nova')
|
||||
$db_password = pick($nova_hash['db_password'], 'nova')
|
||||
$database_connection = "mysql://${db_name}:${db_password}@${db_host}/${db_name}?read_timeout=60"
|
||||
|
||||
$memcache_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('memcache_roles'))
|
||||
$cache_server_ip = ipsort(values(get_node_to_ipaddr_map_by_network_role($memcache_nodes,'mgmt/memcache')))
|
||||
$memcached_addresses = suffix($cache_server_ip, inline_template(":<%= @cache_server_port %>"))
|
||||
$notify_on_state_change = 'vm_and_task_state'
|
||||
|
||||
|
||||
class { '::nova':
|
||||
install_utilities => false,
|
||||
ensure_package => installed,
|
||||
database_connection => $database_connection,
|
||||
rpc_backend => 'nova.openstack.common.rpc.impl_kombu',
|
||||
#FIXME(bogdando) we have to split amqp_hosts until all modules synced
|
||||
rabbit_hosts => split($amqp_hosts, ','),
|
||||
rabbit_userid => $rabbit_hash['user'],
|
||||
rabbit_password => $rabbit_hash['password'],
|
||||
image_service => 'nova.image.glance.GlanceImageService',
|
||||
glance_api_servers => $glance_api_servers,
|
||||
verbose => $verbose,
|
||||
debug => $debug,
|
||||
use_syslog => $use_syslog,
|
||||
log_facility => $syslog_log_facility_nova,
|
||||
state_path => $nova_hash['state_path'],
|
||||
report_interval => $nova_report_interval,
|
||||
service_down_time => $nova_service_down_time,
|
||||
notify_on_state_change => $notify_on_state_change,
|
||||
memcached_servers => $memcached_addresses,
|
||||
}
|
||||
|
||||
|
||||
class { '::nova::compute':
|
||||
ensure_package => installed,
|
||||
enabled => true,
|
||||
vnc_enabled => false,
|
||||
force_config_drive => $nova_hash['force_config_drive'],
|
||||
#NOTE(bogdando) default became true in 4.0.0 puppet-nova (was false)
|
||||
neutron_enabled => true,
|
||||
default_availability_zone => $nova_hash['default_availability_zone'],
|
||||
default_schedule_zone => $nova_hash['default_schedule_zone'],
|
||||
reserved_host_memory => '0',
|
||||
}
|
||||
|
||||
|
||||
class { 'nova::compute::ironic':
|
||||
admin_url => "http://${service_endpoint}:35357/v2.0",
|
||||
admin_user => $ironic_user,
|
||||
admin_tenant_name => $ironic_tenant,
|
||||
admin_passwd => $ironic_user_password,
|
||||
api_endpoint => "http://${ironic_endpoint}:6385/v1",
|
||||
}
|
||||
|
||||
class { 'nova::network::neutron':
|
||||
neutron_admin_password => $neutron_config['keystone']['admin_password'],
|
||||
neutron_url => "http://${neutron_endpoint}:9696",
|
||||
neutron_admin_auth_url => "http://${service_endpoint}:35357/v2.0",
|
||||
}
|
||||
|
||||
file { '/etc/nova/nova-compute.conf':
|
||||
ensure => absent,
|
||||
require => Package['nova-compute'],
|
||||
} ~> Service['nova-compute']
|
||||
|
10
f2s/resources/ironic-compute/meta.yaml
Normal file
10
f2s/resources/ironic-compute/meta.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
id: ironic-compute
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
121
f2s/resources/ironic-conductor/actions/run.pp
Normal file
121
f2s/resources/ironic-conductor/actions/run.pp
Normal file
@ -0,0 +1,121 @@
|
||||
notice('MODULAR: ironic/ironic-conductor.pp')
|
||||
|
||||
$network_scheme = hiera('network_scheme', {})
|
||||
prepare_network_config($network_scheme)
|
||||
$baremetal_address = get_network_role_property('ironic/baremetal', 'ipaddr')
|
||||
$ironic_hash = hiera_hash('ironic', {})
|
||||
$management_vip = hiera('management_vip')
|
||||
|
||||
$network_metadata = hiera_hash('network_metadata', {})
|
||||
$baremetal_vip = $network_metadata['vips']['baremetal']['ipaddr']
|
||||
|
||||
$database_vip = hiera('database_vip')
|
||||
$service_endpoint = hiera('service_endpoint')
|
||||
$neutron_endpoint = hiera('neutron_endpoint', $management_vip)
|
||||
$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292")
|
||||
$amqp_hosts = hiera('amqp_hosts')
|
||||
$rabbit_hosts = split($amqp_hosts, ',')
|
||||
$debug = hiera('debug', false)
|
||||
$verbose = hiera('verbose', true)
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$syslog_log_facility_ironic = hiera('syslog_log_facility_ironic', 'LOG_USER')
|
||||
$rabbit_hash = hiera_hash('rabbit_hash')
|
||||
$rabbit_ha_queues = hiera('rabbit_ha_queues')
|
||||
|
||||
$ironic_tenant = pick($ironic_hash['tenant'],'services')
|
||||
$ironic_user = pick($ironic_hash['auth_name'],'ironic')
|
||||
$ironic_user_password = pick($ironic_hash['user_password'],'ironic')
|
||||
$ironic_swift_tempurl_key = pick($ironic_hash['swift_tempurl_key'],'ironic')
|
||||
|
||||
$db_host = pick($ironic_hash['db_host'], $database_vip)
|
||||
$db_user = pick($ironic_hash['db_user'], 'ironic')
|
||||
$db_name = pick($ironic_hash['db_name'], 'ironic')
|
||||
$db_password = pick($ironic_hash['db_password'], 'ironic')
|
||||
$database_connection = "mysql://${db_name}:${db_password}@${db_host}/${db_name}?charset=utf8&read_timeout=60"
|
||||
|
||||
$tftp_root = '/var/lib/ironic/tftpboot'
|
||||
|
||||
package { 'ironic-fa-deploy':
|
||||
ensure => 'present',
|
||||
}
|
||||
|
||||
class { '::ironic':
|
||||
verbose => $verbose,
|
||||
debug => $debug,
|
||||
enabled_drivers => ['fuel_ssh', 'fuel_ipmitool', 'fake'],
|
||||
rabbit_hosts => $rabbit_hosts,
|
||||
rabbit_userid => $rabbit_hash['user'],
|
||||
rabbit_password => $rabbit_hash['password'],
|
||||
amqp_durable_queues => $rabbit_ha_queues,
|
||||
use_syslog => $use_syslog,
|
||||
log_facility => $syslog_log_facility_ironic,
|
||||
database_connection => $database_connection,
|
||||
glance_api_servers => $glance_api_servers,
|
||||
}
|
||||
|
||||
class { '::ironic::client': }
|
||||
|
||||
class { '::ironic::conductor': }
|
||||
|
||||
class { '::ironic::drivers::pxe':
|
||||
tftp_server => $baremetal_address,
|
||||
tftp_root => $tftp_root,
|
||||
tftp_master_path => "${tftp_root}/master_images",
|
||||
}
|
||||
|
||||
ironic_config {
|
||||
'neutron/url': value => "http://${neutron_endpoint}:9696";
|
||||
'keystone_authtoken/auth_uri': value => "http://${service_endpoint}:5000/";
|
||||
'keystone_authtoken/auth_host': value => $service_endpoint;
|
||||
'keystone_authtoken/admin_tenant_name': value => $ironic_tenant;
|
||||
'keystone_authtoken/admin_user': value => $ironic_user;
|
||||
'keystone_authtoken/admin_password': value => $ironic_user_password, secret => true;
|
||||
'glance/swift_temp_url_key': value => $ironic_swift_tempurl_key;
|
||||
'glance/swift_endpoint_url': value => "http://${baremetal_vip}:8080";
|
||||
'conductor/api_url': value => "http://${baremetal_vip}:6385";
|
||||
}
|
||||
|
||||
file { $tftp_root:
|
||||
ensure => directory,
|
||||
owner => 'ironic',
|
||||
group => 'ironic',
|
||||
mode => '0755',
|
||||
require => Class['ironic'],
|
||||
}
|
||||
|
||||
file { "${tftp_root}/pxelinux.0":
|
||||
ensure => present,
|
||||
source => '/usr/lib/syslinux/pxelinux.0',
|
||||
require => Package['syslinux'],
|
||||
}
|
||||
|
||||
file { "${tftp_root}/map-file":
|
||||
content => "r ^([^/]) ${tftp_root}/\\1",
|
||||
}
|
||||
|
||||
class { '::tftp':
|
||||
username => 'ironic',
|
||||
directory => $tftp_root,
|
||||
options => "--map-file ${tftp_root}/map-file",
|
||||
inetd => false,
|
||||
require => File["${tftp_root}/map-file"],
|
||||
}
|
||||
|
||||
package { 'syslinux':
|
||||
ensure => 'present',
|
||||
}
|
||||
|
||||
package { 'ipmitool':
|
||||
ensure => 'present',
|
||||
before => Class['::ironic::conductor'],
|
||||
}
|
||||
|
||||
file { "/etc/ironic/fuel_key":
|
||||
ensure => present,
|
||||
source => '/var/lib/astute/ironic/ironic',
|
||||
owner => 'ironic',
|
||||
group => 'ironic',
|
||||
mode => '0600',
|
||||
require => Class['ironic'],
|
||||
}
|
||||
|
10
f2s/resources/ironic-conductor/meta.yaml
Normal file
10
f2s/resources/ironic-conductor/meta.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
id: ironic-conductor
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
51
f2s/resources/ironic-db/actions/run.pp
Normal file
51
f2s/resources/ironic-db/actions/run.pp
Normal file
@ -0,0 +1,51 @@
|
||||
notice('MODULAR: ironic/db.pp')
|
||||
|
||||
$ironic_hash = hiera_hash('ironic', {})
|
||||
$mysql_hash = hiera_hash('mysql', {})
|
||||
$database_vip = hiera('database_vip')
|
||||
|
||||
$mysql_root_user = pick($mysql_hash['root_user'], 'root')
|
||||
$mysql_db_create = pick($mysql_hash['db_create'], true)
|
||||
$mysql_root_password = $mysql_hash['root_password']
|
||||
|
||||
$db_user = pick($ironic_hash['db_user'], 'ironic')
|
||||
$db_name = pick($ironic_hash['db_name'], 'ironic')
|
||||
$db_password = pick($ironic_hash['db_password'], $mysql_root_password)
|
||||
|
||||
$db_host = pick($ironic_hash['db_host'], $database_vip)
|
||||
$db_create = pick($ironic_hash['db_create'], $mysql_db_create)
|
||||
$db_root_user = pick($ironic_hash['root_user'], $mysql_root_user)
|
||||
$db_root_password = pick($ironic_hash['root_password'], $mysql_root_password)
|
||||
|
||||
$allowed_hosts = [ hiera('node_name'), 'localhost', '127.0.0.1', '%' ]
|
||||
|
||||
validate_string($mysql_root_user)
|
||||
validate_string($database_vip)
|
||||
|
||||
if $db_create {
|
||||
class { 'galera::client':
|
||||
custom_setup_class => hiera('mysql_custom_setup_class', 'galera'),
|
||||
}
|
||||
|
||||
class { 'ironic::db::mysql':
|
||||
user => $db_user,
|
||||
password => $db_password,
|
||||
dbname => $db_name,
|
||||
allowed_hosts => $allowed_hosts,
|
||||
}
|
||||
|
||||
class { 'osnailyfacter::mysql_access':
|
||||
db_host => $db_host,
|
||||
db_user => $db_root_user,
|
||||
db_password => $db_root_password,
|
||||
}
|
||||
|
||||
Class['galera::client'] ->
|
||||
Class['osnailyfacter::mysql_access'] ->
|
||||
Class['ironic::db::mysql']
|
||||
}
|
||||
|
||||
class mysql::config {}
|
||||
include mysql::config
|
||||
class mysql::server {}
|
||||
include mysql::server
|
20
f2s/resources/ironic-db/meta.yaml
Normal file
20
f2s/resources/ironic-db/meta.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
id: ironic-db
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
database_vip:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
ironic:
|
||||
value: null
|
||||
mysql:
|
||||
value: null
|
||||
mysql_custom_setup_class:
|
||||
value: null
|
||||
node_name:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
39
f2s/resources/ironic-keystone/actions/run.pp
Normal file
39
f2s/resources/ironic-keystone/actions/run.pp
Normal file
@ -0,0 +1,39 @@
|
||||
notice('MODULAR: ironic/keystone.pp')
|
||||
|
||||
$ironic_hash = hiera_hash('ironic', {})
|
||||
$public_vip = hiera('public_vip')
|
||||
$management_vip = hiera('management_vip')
|
||||
$public_ssl_hash = hiera('public_ssl')
|
||||
$ironic_tenant = pick($ironic_hash['tenant'],'services')
|
||||
$ironic_user = pick($ironic_hash['auth_name'],'ironic')
|
||||
$ironic_user_password = pick($ironic_hash['user_password'],'ironic')
|
||||
$configure_endpoint = pick($ironic_hash['configure_endpoint'], true)
|
||||
$configure_user = pick($ironic_hash['configure_user'], true)
|
||||
$configure_user_role = pick($ironic_hash['configure_user_role'], true)
|
||||
$service_name = pick($ironic_hash['service_name'], 'ironic')
|
||||
|
||||
$public_address = $public_ssl_hash['services'] ? {
|
||||
true => $public_ssl_hash['hostname'],
|
||||
default => $public_vip,
|
||||
}
|
||||
$public_protocol = $public_ssl_hash['services'] ? {
|
||||
true => 'https',
|
||||
default => 'http',
|
||||
}
|
||||
|
||||
$region = hiera('region', 'RegionOne')
|
||||
$public_url = "${public_protocol}://${public_address}:6385"
|
||||
$admin_url = "http://${management_vip}:6385"
|
||||
$internal_url = "http://${management_vip}:6385"
|
||||
|
||||
class { 'ironic::keystone::auth':
|
||||
password => $ironic_user_password,
|
||||
region => $region,
|
||||
public_url => $public_url,
|
||||
internal_url => $internal_url,
|
||||
admin_url => $admin_url,
|
||||
configure_endpoint => $configure_endpoint,
|
||||
configure_user => $configure_user,
|
||||
configure_user_role => $configure_user_role,
|
||||
service_name => $service_name,
|
||||
}
|
20
f2s/resources/ironic-keystone/meta.yaml
Normal file
20
f2s/resources/ironic-keystone/meta.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
id: ironic-keystone
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
ironic:
|
||||
value: null
|
||||
management_vip:
|
||||
value: null
|
||||
public_ssl:
|
||||
value: null
|
||||
public_vip:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
region:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
54
f2s/resources/keystone-db/actions/run.pp
Normal file
54
f2s/resources/keystone-db/actions/run.pp
Normal file
@ -0,0 +1,54 @@
|
||||
notice('MODULAR: keystone/db.pp')
|
||||
|
||||
$node_name = hiera('node_name')
|
||||
$network_metadata = hiera_hash('network_metadata', {})
|
||||
|
||||
$keystone_hash = hiera_hash('keystone', {})
|
||||
$mysql_hash = hiera_hash('mysql', {})
|
||||
$database_vip = hiera('database_vip')
|
||||
|
||||
$mysql_root_user = pick($mysql_hash['root_user'], 'root')
|
||||
$mysql_db_create = pick($mysql_hash['db_create'], true)
|
||||
$mysql_root_password = $mysql_hash['root_password']
|
||||
|
||||
$db_user = pick($keystone_hash['db_user'], 'keystone')
|
||||
$db_name = pick($keystone_hash['db_name'], 'keystone')
|
||||
$db_password = pick($keystone_hash['db_password'], $mysql_root_password)
|
||||
|
||||
$db_host = pick($keystone_hash['db_host'], $database_vip)
|
||||
$db_create = pick($keystone_hash['db_create'], $mysql_db_create)
|
||||
$db_root_user = pick($keystone_hash['root_user'], $mysql_root_user)
|
||||
$db_root_password = pick($keystone_hash['root_password'], $mysql_root_password)
|
||||
|
||||
$allowed_hosts = [ $node_name, 'localhost', '127.0.0.1', '%' ]
|
||||
|
||||
if $db_create {
|
||||
|
||||
class { 'galera::client':
|
||||
custom_setup_class => hiera('mysql_custom_setup_class', 'galera'),
|
||||
}
|
||||
|
||||
class { 'keystone::db::mysql':
|
||||
user => $db_user,
|
||||
password => $db_password,
|
||||
dbname => $db_name,
|
||||
allowed_hosts => $allowed_hosts,
|
||||
}
|
||||
|
||||
class { 'osnailyfacter::mysql_access':
|
||||
db_host => $db_host,
|
||||
db_user => $db_root_user,
|
||||
db_password => $db_root_password,
|
||||
}
|
||||
|
||||
Class['galera::client'] ->
|
||||
Class['osnailyfacter::mysql_access'] ->
|
||||
Class['keystone::db::mysql']
|
||||
|
||||
|
||||
}
|
||||
|
||||
class mysql::config {}
|
||||
include mysql::config
|
||||
class mysql::server {}
|
||||
include mysql::server
|
22
f2s/resources/keystone-db/meta.yaml
Normal file
22
f2s/resources/keystone-db/meta.yaml
Normal file
@ -0,0 +1,22 @@
|
||||
id: keystone-db
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
database_vip:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
keystone:
|
||||
value: null
|
||||
mysql:
|
||||
value: null
|
||||
mysql_custom_setup_class:
|
||||
value: null
|
||||
network_metadata:
|
||||
value: null
|
||||
node_name:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
236
f2s/resources/keystone/actions/run.pp
Normal file
236
f2s/resources/keystone/actions/run.pp
Normal file
@ -0,0 +1,236 @@
|
||||
notice('MODULAR: keystone.pp')
|
||||
|
||||
$network_scheme = hiera_hash('network_scheme', {})
|
||||
$network_metadata = hiera_hash('network_metadata', {})
|
||||
prepare_network_config($network_scheme)
|
||||
|
||||
$node_name = hiera('node_name')
|
||||
|
||||
$keystone_hash = hiera_hash('keystone', {})
|
||||
$verbose = pick($keystone_hash['verbose'], hiera('verbose', true))
|
||||
$debug = pick($keystone_hash['debug'], hiera('debug', false))
|
||||
$use_neutron = hiera('use_neutron', false)
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$use_stderr = hiera('use_stderr', false)
|
||||
$access_hash = hiera_hash('access',{})
|
||||
$management_vip = hiera('management_vip')
|
||||
$database_vip = hiera('database_vip')
|
||||
$public_vip = hiera('public_vip')
|
||||
$service_endpoint = hiera('service_endpoint')
|
||||
$glance_hash = hiera_hash('glance', {})
|
||||
$nova_hash = hiera_hash('nova', {})
|
||||
$cinder_hash = hiera_hash('cinder', {})
|
||||
$ceilometer_hash = hiera_hash('ceilometer', {})
|
||||
$syslog_log_facility = hiera('syslog_log_facility_keystone')
|
||||
$rabbit_hash = hiera_hash('rabbit_hash', {})
|
||||
$neutron_user_password = hiera('neutron_user_password', false)
|
||||
$service_workers = pick($keystone_hash['workers'],
|
||||
min(max($::processorcount, 2), 16))
|
||||
|
||||
$db_type = 'mysql'
|
||||
$db_host = pick($keystone_hash['db_host'], $database_vip)
|
||||
$db_password = $keystone_hash['db_password']
|
||||
$db_name = pick($keystone_hash['db_name'], 'keystone')
|
||||
$db_user = pick($keystone_hash['db_user'], 'keystone')
|
||||
|
||||
$admin_token = $keystone_hash['admin_token']
|
||||
$admin_tenant = $access_hash['tenant']
|
||||
$admin_email = $access_hash['email']
|
||||
$admin_user = $access_hash['user']
|
||||
$admin_password = $access_hash['password']
|
||||
$region = hiera('region', 'RegionOne')
|
||||
|
||||
$public_ssl_hash = hiera('public_ssl')
|
||||
$public_service_endpoint = hiera('public_service_endpoint', $public_vip)
|
||||
$public_address = $public_ssl_hash['services'] ? {
|
||||
true => $public_ssl_hash['hostname'],
|
||||
default => $public_service_endpoint,
|
||||
}
|
||||
|
||||
$admin_address = $service_endpoint
|
||||
$local_address_for_bind = get_network_role_property('keystone/api', 'ipaddr')
|
||||
|
||||
$memcache_server_port = hiera('memcache_server_port', '11211')
|
||||
$memcache_pool_maxsize = '100'
|
||||
$memcache_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('memcache_roles'))
|
||||
$memcache_address_map = get_node_to_ipaddr_map_by_network_role($memcache_nodes, 'mgmt/memcache')
|
||||
|
||||
$public_port = '5000'
|
||||
$admin_port = '35357'
|
||||
$internal_port = '5000'
|
||||
$public_protocol = $public_ssl_hash['services'] ? {
|
||||
true => 'https',
|
||||
default => 'http',
|
||||
}
|
||||
|
||||
$public_url = "${public_protocol}://${public_address}:${public_port}"
|
||||
$admin_url = "http://${admin_address}:${admin_port}"
|
||||
$internal_url = "http://${service_endpoint}:${internal_port}"
|
||||
|
||||
$revoke_driver = 'keystone.contrib.revoke.backends.sql.Revoke'
|
||||
|
||||
$enabled = true
|
||||
$ssl = false
|
||||
|
||||
$vhost_limit_request_field_size = 'LimitRequestFieldSize 81900'
|
||||
|
||||
$rabbit_password = $rabbit_hash['password']
|
||||
$rabbit_user = $rabbit_hash['user']
|
||||
$rabbit_hosts = split(hiera('amqp_hosts',''), ',')
|
||||
$rabbit_virtual_host = '/'
|
||||
|
||||
$max_pool_size = hiera('max_pool_size')
|
||||
$max_overflow = hiera('max_overflow')
|
||||
$max_retries = '-1'
|
||||
$database_idle_timeout = '3600'
|
||||
|
||||
$murano_settings_hash = hiera('murano_settings', {})
|
||||
if has_key($murano_settings_hash, 'murano_repo_url') {
|
||||
$murano_repo_url = $murano_settings_hash['murano_repo_url']
|
||||
} else {
|
||||
$murano_repo_url = 'http://storage.apps.openstack.org'
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
|
||||
####### KEYSTONE ###########
|
||||
class { 'openstack::keystone':
|
||||
verbose => $verbose,
|
||||
debug => $debug,
|
||||
db_type => $db_type,
|
||||
db_host => $db_host,
|
||||
db_password => $db_password,
|
||||
db_name => $db_name,
|
||||
db_user => $db_user,
|
||||
admin_token => $admin_token,
|
||||
public_address => $public_address,
|
||||
public_ssl => $public_ssl_hash['services'],
|
||||
public_hostname => $public_ssl_hash['hostname'],
|
||||
internal_address => $service_endpoint,
|
||||
admin_address => $admin_address,
|
||||
public_bind_host => $local_address_for_bind,
|
||||
admin_bind_host => $local_address_for_bind,
|
||||
enabled => $enabled,
|
||||
use_syslog => $use_syslog,
|
||||
use_stderr => $use_stderr,
|
||||
syslog_log_facility => $syslog_log_facility,
|
||||
region => $region,
|
||||
memcache_servers => values($memcache_address_map),
|
||||
memcache_server_port => $memcache_server_port,
|
||||
memcache_pool_maxsize => $memcache_pool_maxsize,
|
||||
max_retries => $max_retries,
|
||||
max_pool_size => $max_pool_size,
|
||||
max_overflow => $max_overflow,
|
||||
rabbit_password => $rabbit_password,
|
||||
rabbit_userid => $rabbit_user,
|
||||
rabbit_hosts => $rabbit_hosts,
|
||||
rabbit_virtual_host => $rabbit_virtual_host,
|
||||
database_idle_timeout => $database_idle_timeout,
|
||||
revoke_driver => $revoke_driver,
|
||||
public_url => $public_url,
|
||||
admin_url => $admin_url,
|
||||
internal_url => $internal_url,
|
||||
ceilometer => $ceilometer_hash['enabled'],
|
||||
service_workers => $service_workers,
|
||||
}
|
||||
|
||||
####### WSGI ###########
|
||||
|
||||
class { 'osnailyfacter::apache':
|
||||
listen_ports => hiera_array('apache_ports', ['80', '8888', '5000', '35357']),
|
||||
}
|
||||
|
||||
class { 'keystone::wsgi::apache':
|
||||
priority => '05',
|
||||
threads => 3,
|
||||
workers => min($::processorcount, 6),
|
||||
ssl => $ssl,
|
||||
vhost_custom_fragment => $vhost_limit_request_field_size,
|
||||
access_log_format => '%h %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"',
|
||||
|
||||
wsgi_script_ensure => $::osfamily ? {
|
||||
'RedHat' => 'link',
|
||||
default => 'file',
|
||||
},
|
||||
wsgi_script_source => $::osfamily ? {
|
||||
# TODO: (adidenko) use file from package for Debian, when
|
||||
# https://bugs.launchpad.net/fuel/+bug/1476688 is fixed.
|
||||
# 'Debian' => '/usr/share/keystone/wsgi.py',
|
||||
'RedHat' => '/usr/share/keystone/keystone.wsgi',
|
||||
default => undef,
|
||||
},
|
||||
}
|
||||
|
||||
include ::tweaks::apache_wrappers
|
||||
|
||||
###############################################################################
|
||||
|
||||
class { 'keystone::roles::admin':
|
||||
admin => $admin_user,
|
||||
password => $admin_password,
|
||||
email => $admin_email,
|
||||
admin_tenant => $admin_tenant,
|
||||
}
|
||||
|
||||
class { 'openstack::auth_file':
|
||||
admin_user => $admin_user,
|
||||
admin_password => $admin_password,
|
||||
admin_tenant => $admin_tenant,
|
||||
region_name => $region,
|
||||
controller_node => $service_endpoint,
|
||||
murano_repo_url => $murano_repo_url,
|
||||
}
|
||||
|
||||
# Get paste.ini source
|
||||
include keystone::params
|
||||
$keystone_paste_ini = $::keystone::params::paste_config ? {
|
||||
undef => '/etc/keystone/keystone-paste.ini',
|
||||
default => $::keystone::params::paste_config,
|
||||
}
|
||||
|
||||
# Make sure admin token auth middleware is in place
|
||||
exec { 'add_admin_token_auth_middleware':
|
||||
path => ['/bin', '/usr/bin'],
|
||||
command => "sed -i 's/\\( token_auth \\)/\\1admin_token_auth /' $keystone_paste_ini",
|
||||
unless => "fgrep -q ' admin_token_auth' $keystone_paste_ini",
|
||||
require => Package['keystone'],
|
||||
}
|
||||
|
||||
#Can't use openrc to create admin user
|
||||
exec { 'purge_openrc':
|
||||
path => '/bin:/usr/bin:/sbin:/usr/sbin',
|
||||
command => 'rm -f /root/openrc',
|
||||
onlyif => 'test -f /root/openrc',
|
||||
}
|
||||
|
||||
Exec <| title == 'keystone-manage db_sync' |> ~>
|
||||
Exec <| title == 'purge_openrc' |>
|
||||
|
||||
Exec <| title == 'add_admin_token_auth_middleware' |> ->
|
||||
Exec <| title == 'keystone-manage db_sync' |> ->
|
||||
Exec <| title == 'purge_openrc' |> ->
|
||||
Class['keystone::roles::admin'] ->
|
||||
Class['openstack::auth_file']
|
||||
|
||||
$haproxy_stats_url = "http://${service_endpoint}:10000/;csv"
|
||||
|
||||
haproxy_backend_status { 'keystone-public' :
|
||||
name => 'keystone-1',
|
||||
url => $haproxy_stats_url,
|
||||
}
|
||||
|
||||
haproxy_backend_status { 'keystone-admin' :
|
||||
name => 'keystone-2',
|
||||
url => $haproxy_stats_url,
|
||||
}
|
||||
|
||||
Service['keystone'] -> Haproxy_backend_status<||>
|
||||
Service<| title == 'httpd' |> -> Haproxy_backend_status<||>
|
||||
Haproxy_backend_status<||> -> Class['keystone::roles::admin']
|
||||
|
||||
####### Disable upstart startup on install #######
|
||||
if ($::operatingsystem == 'Ubuntu') {
|
||||
tweaks::ubuntu_service_override { 'keystone':
|
||||
package_name => 'keystone',
|
||||
}
|
||||
}
|
74
f2s/resources/keystone/meta.yaml
Normal file
74
f2s/resources/keystone/meta.yaml
Normal file
@ -0,0 +1,74 @@
|
||||
id: keystone
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
access:
|
||||
value: null
|
||||
amqp_hosts:
|
||||
value: null
|
||||
apache_ports:
|
||||
value: null
|
||||
ceilometer:
|
||||
value: null
|
||||
cinder:
|
||||
value: null
|
||||
database_vip:
|
||||
value: null
|
||||
debug:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
glance:
|
||||
value: null
|
||||
keystone:
|
||||
value: null
|
||||
management_vip:
|
||||
value: null
|
||||
max_overflow:
|
||||
value: null
|
||||
max_pool_size:
|
||||
value: null
|
||||
memcache_roles:
|
||||
value: null
|
||||
memcache_server_port:
|
||||
value: null
|
||||
murano_settings:
|
||||
value: null
|
||||
network_metadata:
|
||||
value: null
|
||||
network_scheme:
|
||||
value: null
|
||||
neutron_user_password:
|
||||
value: null
|
||||
node_name:
|
||||
value: null
|
||||
nodes:
|
||||
value: null
|
||||
nova:
|
||||
value: null
|
||||
public_service_endpoint:
|
||||
value: null
|
||||
public_ssl:
|
||||
value: null
|
||||
public_vip:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
rabbit_hash:
|
||||
value: null
|
||||
region:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
||||
service_endpoint:
|
||||
value: null
|
||||
syslog_log_facility_keystone:
|
||||
value: null
|
||||
use_neutron:
|
||||
value: null
|
||||
use_stderr:
|
||||
value: null
|
||||
use_syslog:
|
||||
value: null
|
||||
verbose:
|
||||
value: null
|
67
f2s/resources/logging/actions/run.pp
Normal file
67
f2s/resources/logging/actions/run.pp
Normal file
@ -0,0 +1,67 @@
|
||||
notice('MODULAR: logging.pp')
|
||||
|
||||
$base_syslog_hash = hiera('base_syslog_hash')
|
||||
$syslog_hash = hiera('syslog_hash')
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$debug = pick($syslog_hash['debug'], hiera('debug', false))
|
||||
$nodes_hash = hiera('nodes', {})
|
||||
$roles = node_roles($nodes_hash, hiera('uid'))
|
||||
|
||||
##################################################
|
||||
|
||||
$base_syslog_rserver = {
|
||||
'remote_type' => 'tcp',
|
||||
'server' => $base_syslog_hash['syslog_server'],
|
||||
'port' => $base_syslog_hash['syslog_port']
|
||||
}
|
||||
|
||||
$syslog_rserver = {
|
||||
'remote_type' => $syslog_hash['syslog_transport'],
|
||||
'server' => $syslog_hash['syslog_server'],
|
||||
'port' => $syslog_hash['syslog_port'],
|
||||
}
|
||||
|
||||
if $syslog_hash['metadata']['enabled'] {
|
||||
$rservers = [$base_syslog_rserver, $syslog_rserver]
|
||||
} else {
|
||||
$rservers = [$base_syslog_rserver]
|
||||
}
|
||||
|
||||
if $use_syslog {
|
||||
if ($::operatingsystem == 'Ubuntu') {
|
||||
# ensure the var log folder permissions are correct even if it's a mount
|
||||
# LP#1489347
|
||||
file { '/var/log':
|
||||
owner => 'root',
|
||||
group => 'syslog',
|
||||
mode => '0775',
|
||||
}
|
||||
}
|
||||
|
||||
if member($roles, 'ironic') {
|
||||
$ironic_collector = true
|
||||
}
|
||||
|
||||
class { '::openstack::logging':
|
||||
role => 'client',
|
||||
show_timezone => true,
|
||||
# log both locally include auth, and remote
|
||||
log_remote => true,
|
||||
log_local => true,
|
||||
log_auth_local => true,
|
||||
# keep four weekly log rotations,
|
||||
# force rotate if 300M size have exceeded
|
||||
rotation => 'weekly',
|
||||
keep => '4',
|
||||
minsize => '10M',
|
||||
maxsize => '100M',
|
||||
# remote servers to send logs to
|
||||
rservers => $rservers,
|
||||
# should be true, if client is running at virtual node
|
||||
virtual => str2bool($::is_virtual),
|
||||
# Rabbit doesn't support syslog directly
|
||||
rabbit_log_level => 'NOTICE',
|
||||
debug => $debug,
|
||||
ironic_collector => $ironic_collector,
|
||||
}
|
||||
}
|
24
f2s/resources/logging/meta.yaml
Normal file
24
f2s/resources/logging/meta.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
id: logging
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
base_syslog_hash:
|
||||
value: null
|
||||
debug:
|
||||
value: null
|
||||
fqdn:
|
||||
value: null
|
||||
node_role:
|
||||
value: null
|
||||
nodes:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
||||
syslog_hash:
|
||||
value: null
|
||||
uid:
|
||||
value: null
|
||||
use_syslog:
|
||||
value: null
|
8
f2s/resources/memcached/actions/run.pp
Normal file
8
f2s/resources/memcached/actions/run.pp
Normal file
@ -0,0 +1,8 @@
|
||||
notice('MODULAR: memcached.pp')
|
||||
|
||||
prepare_network_config(hiera('network_scheme', {}))
|
||||
|
||||
class { 'memcached':
|
||||
listen_ip => get_network_role_property('mgmt/memcache', 'ipaddr'),
|
||||
max_memory => '50%',
|
||||
}
|
12
f2s/resources/memcached/meta.yaml
Normal file
12
f2s/resources/memcached/meta.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
id: memcached
|
||||
handler: puppetv2
|
||||
version: '8.0'
|
||||
inputs:
|
||||
fqdn:
|
||||
value: null
|
||||
network_scheme:
|
||||
value: null
|
||||
puppet_modules:
|
||||
value: null
|
||||
role:
|
||||
value: null
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user