Remove unused puppet modules
* Remove the nova api proxy puppet module. * Remove openstack::swift puppet manifest. * Refactor openstack::nova::storage as platform::worker::storage. This requires the nova puppet code in sysinv to write to a different hiera target, and creation of /var/lib/nova. * Remove puppet modules from spec file for modules that are no longer being used. Story: 2004764 Task: 29840 Change-Id: Ifa0171b06e23fd77d373983d644df3f56ae4e2de Signed-off-by: Al Bailey <Al.Bailey@windriver.com>
This commit is contained in:
parent
c7334b1846
commit
dadb8267d2
@ -1,2 +1,2 @@
|
|||||||
SRC_DIR="src"
|
SRC_DIR="src"
|
||||||
TIS_PATCH_VER=81
|
TIS_PATCH_VER=82
|
||||||
|
@ -16,7 +16,6 @@ Requires: puppet-dcorch
|
|||||||
Requires: puppet-dcmanager
|
Requires: puppet-dcmanager
|
||||||
Requires: puppet-mtce
|
Requires: puppet-mtce
|
||||||
Requires: puppet-nfv
|
Requires: puppet-nfv
|
||||||
Requires: puppet-nova_api_proxy
|
|
||||||
Requires: puppet-patching
|
Requires: puppet-patching
|
||||||
Requires: puppet-sysinv
|
Requires: puppet-sysinv
|
||||||
Requires: puppet-sshd
|
Requires: puppet-sshd
|
||||||
@ -24,27 +23,13 @@ Requires: puppet-smapi
|
|||||||
Requires: puppet-fm
|
Requires: puppet-fm
|
||||||
|
|
||||||
# Openstack puppet modules
|
# Openstack puppet modules
|
||||||
Requires: puppet-aodh
|
|
||||||
Requires: puppet-barbican
|
Requires: puppet-barbican
|
||||||
Requires: puppet-ceilometer
|
|
||||||
Requires: puppet-ceph
|
Requires: puppet-ceph
|
||||||
Requires: puppet-cinder
|
|
||||||
Requires: puppet-glance
|
|
||||||
Requires: puppet-heat
|
|
||||||
Requires: puppet-horizon
|
Requires: puppet-horizon
|
||||||
Requires: puppet-keystone
|
Requires: puppet-keystone
|
||||||
Requires: puppet-neutron
|
|
||||||
Requires: puppet-nova
|
|
||||||
Requires: puppet-openstacklib
|
Requires: puppet-openstacklib
|
||||||
Requires: puppet-swift
|
|
||||||
Requires: puppet-tempest
|
|
||||||
Requires: puppet-vswitch
|
Requires: puppet-vswitch
|
||||||
Requires: puppet-murano
|
|
||||||
Requires: puppet-magnum
|
|
||||||
Requires: puppet-ironic
|
|
||||||
Requires: puppet-panko
|
|
||||||
Requires: puppet-memcached
|
Requires: puppet-memcached
|
||||||
Requires: puppet-gnocchi
|
|
||||||
|
|
||||||
# Puppetlabs puppet modules
|
# Puppetlabs puppet modules
|
||||||
Requires: puppet-concat
|
Requires: puppet-concat
|
||||||
@ -56,11 +41,8 @@ Requires: puppet-inifile
|
|||||||
Requires: puppet-lvm
|
Requires: puppet-lvm
|
||||||
Requires: puppet-postgresql
|
Requires: puppet-postgresql
|
||||||
Requires: puppet-rabbitmq
|
Requires: puppet-rabbitmq
|
||||||
Requires: puppet-rsync
|
|
||||||
Requires: puppet-stdlib
|
Requires: puppet-stdlib
|
||||||
Requires: puppet-sysctl
|
Requires: puppet-sysctl
|
||||||
Requires: puppet-vcsrepo
|
|
||||||
Requires: puppet-xinetd
|
|
||||||
Requires: puppet-etcd
|
Requires: puppet-etcd
|
||||||
|
|
||||||
# 3rdparty puppet modules
|
# 3rdparty puppet modules
|
||||||
|
@ -92,9 +92,6 @@ include ::platform::dcorch::snmp
|
|||||||
|
|
||||||
include ::platform::smapi
|
include ::platform::smapi
|
||||||
|
|
||||||
include ::openstack::swift
|
|
||||||
include ::openstack::swift::api
|
|
||||||
|
|
||||||
include ::openstack::barbican
|
include ::openstack::barbican
|
||||||
include ::openstack::barbican::api
|
include ::openstack::barbican::api
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ include ::platform::kubernetes::worker
|
|||||||
include ::platform::multipath
|
include ::platform::multipath
|
||||||
include ::platform::client
|
include ::platform::client
|
||||||
include ::platform::ceph::worker
|
include ::platform::ceph::worker
|
||||||
include ::openstack::nova::storage
|
include ::platform::worker::storage
|
||||||
|
|
||||||
class { '::platform::config::worker::post':
|
class { '::platform::config::worker::post':
|
||||||
stage => post,
|
stage => post,
|
||||||
|
@ -1,678 +0,0 @@
|
|||||||
class openstack::nova::params (
|
|
||||||
$nova_api_port = 8774,
|
|
||||||
$nova_ec2_port = 8773,
|
|
||||||
$placement_port = 8778,
|
|
||||||
$nova_novnc_port = 6080,
|
|
||||||
$nova_serial_port = 6083,
|
|
||||||
$region_name = undef,
|
|
||||||
$service_name = 'openstack-nova',
|
|
||||||
$service_create = false,
|
|
||||||
$configure_endpoint = true,
|
|
||||||
$timeout = '55m',
|
|
||||||
) {
|
|
||||||
include ::platform::network::mgmt::params
|
|
||||||
include ::platform::network::infra::params
|
|
||||||
|
|
||||||
# migration is performed over the managemet network if configured, otherwise
|
|
||||||
# the management network is used
|
|
||||||
if $::platform::network::infra::params::interface_name {
|
|
||||||
$migration_version = $::platform::network::infra::params::subnet_version
|
|
||||||
$migration_ip = $::platform::network::infra::params::interface_address
|
|
||||||
$migration_network = $::platform::network::infra::params::subnet_network
|
|
||||||
$migration_prefixlen = $::platform::network::infra::params::subnet_prefixlen
|
|
||||||
} else {
|
|
||||||
$migration_version = $::platform::network::mgmt::params::subnet_version
|
|
||||||
$migration_ip = $::platform::network::mgmt::params::interface_address
|
|
||||||
$migration_network = $::platform::network::mgmt::params::subnet_network
|
|
||||||
$migration_prefixlen = $::platform::network::mgmt::params::subnet_prefixlen
|
|
||||||
}
|
|
||||||
|
|
||||||
# NOTE: this variable is used in the sshd_config, and therefore needs to
|
|
||||||
# match the Ruby ERB template.
|
|
||||||
$nova_migration_subnet = "${migration_network}/${migration_prefixlen}"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::nova {
|
|
||||||
|
|
||||||
include ::platform::params
|
|
||||||
include ::platform::amqp::params
|
|
||||||
|
|
||||||
include ::platform::network::mgmt::params
|
|
||||||
$metadata_host = $::platform::network::mgmt::params::controller_address
|
|
||||||
|
|
||||||
class { '::nova':
|
|
||||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
|
||||||
default_transport_url => $::platform::amqp::params::transport_url,
|
|
||||||
}
|
|
||||||
|
|
||||||
# User nova is created during python-nova rpm install.
|
|
||||||
# Just update it's permissions.
|
|
||||||
user { 'nova':
|
|
||||||
ensure => 'present',
|
|
||||||
groups => ['nova', $::platform::params::protected_group_name],
|
|
||||||
}
|
|
||||||
|
|
||||||
# TODO(mpeters): move to nova puppet module as formal parameters
|
|
||||||
nova_config {
|
|
||||||
'DEFAULT/notification_format': value => 'unversioned';
|
|
||||||
'DEFAULT/metadata_host': value => $metadata_host;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class openstack::nova::sshd
|
|
||||||
inherits ::openstack::nova::params {
|
|
||||||
|
|
||||||
service { 'sshd':
|
|
||||||
ensure => 'running',
|
|
||||||
enable => true,
|
|
||||||
}
|
|
||||||
|
|
||||||
file { '/etc/ssh/sshd_config':
|
|
||||||
ensure => 'present' ,
|
|
||||||
notify => Service['sshd'],
|
|
||||||
mode => '0600',
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
content => template('sshd/sshd_config.erb'),
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
class openstack::nova::controller
|
|
||||||
inherits ::openstack::nova::params {
|
|
||||||
|
|
||||||
include ::platform::params
|
|
||||||
|
|
||||||
if $::platform::params::init_database {
|
|
||||||
include ::nova::db::postgresql
|
|
||||||
include ::nova::db::postgresql_api
|
|
||||||
}
|
|
||||||
|
|
||||||
include ::nova::pci
|
|
||||||
include ::nova::scheduler
|
|
||||||
include ::nova::scheduler::filter
|
|
||||||
include ::nova::compute::ironic
|
|
||||||
include ::nova::compute::serial
|
|
||||||
|
|
||||||
include ::openstack::nova::sshd
|
|
||||||
|
|
||||||
# TODO(mpeters): move to nova puppet module as formal parameters
|
|
||||||
nova_config{
|
|
||||||
'metrics/required': value => false;
|
|
||||||
}
|
|
||||||
|
|
||||||
class { '::nova::conductor':
|
|
||||||
workers => $::platform::params::eng_workers_by_2,
|
|
||||||
}
|
|
||||||
|
|
||||||
# Run nova-manage to purge deleted rows daily at 15 minute mark
|
|
||||||
cron { 'nova-purge-deleted':
|
|
||||||
ensure => 'present',
|
|
||||||
command => '/usr/bin/nova-purge-deleted-active',
|
|
||||||
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
|
|
||||||
minute => '15',
|
|
||||||
hour => '*/24',
|
|
||||||
user => 'root',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::nova::compute (
|
|
||||||
$ssh_keys,
|
|
||||||
$host_private_key,
|
|
||||||
$host_public_key,
|
|
||||||
$host_public_header,
|
|
||||||
$host_key_type,
|
|
||||||
$migration_private_key,
|
|
||||||
$migration_public_key,
|
|
||||||
$migration_key_type,
|
|
||||||
$compute_monitors,
|
|
||||||
$iscsi_initiator_name = undef,
|
|
||||||
) inherits ::openstack::nova::params {
|
|
||||||
include ::nova::pci
|
|
||||||
include ::platform::params
|
|
||||||
|
|
||||||
include ::platform::network::mgmt::params
|
|
||||||
include ::platform::network::infra::params
|
|
||||||
include ::platform::multipath::params
|
|
||||||
include ::nova::keystone::authtoken
|
|
||||||
include ::nova::compute::neutron
|
|
||||||
|
|
||||||
include ::openstack::nova::sshd
|
|
||||||
|
|
||||||
$host_private_key_file = $host_key_type ? {
|
|
||||||
'ssh-rsa' => '/etc/ssh/ssh_host_rsa_key',
|
|
||||||
'ssh-dsa' => '/etc/ssh/ssh_host_dsa_key',
|
|
||||||
'ssh-ecdsa' => '/etc/ssh/ssh_host_ecdsa_key',
|
|
||||||
default => undef
|
|
||||||
}
|
|
||||||
|
|
||||||
if ! $host_private_key_file {
|
|
||||||
fail("Unable to determine name of private key file. Type specified was '${host_key_type}' but should be one of: ssh-rsa, ssh-dsa, ssh-ecdsa.") # lint:ignore:140chars
|
|
||||||
}
|
|
||||||
|
|
||||||
$host_public_key_file = $host_key_type ? {
|
|
||||||
'ssh-rsa' => '/etc/ssh/ssh_host_rsa_key.pub',
|
|
||||||
'ssh-dsa' => '/etc/ssh/ssh_host_dsa_key.pub',
|
|
||||||
'ssh-ecdsa' => '/etc/ssh/ssh_host_ecdsa_key.pub',
|
|
||||||
default => undef
|
|
||||||
}
|
|
||||||
|
|
||||||
if ! $host_public_key_file {
|
|
||||||
fail("Unable to determine name of public key file. Type specified was '${host_key_type}' but should be one of: ssh-rsa, ssh-dsa, ssh-ecdsa.") # lint:ignore:140chars
|
|
||||||
}
|
|
||||||
|
|
||||||
file { '/etc/ssh':
|
|
||||||
ensure => directory,
|
|
||||||
mode => '0700',
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
}
|
|
||||||
|
|
||||||
-> file { $host_private_key_file:
|
|
||||||
content => $host_private_key,
|
|
||||||
mode => '0600',
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
}
|
|
||||||
|
|
||||||
-> file { $host_public_key_file:
|
|
||||||
content => "${host_public_header} ${host_public_key}",
|
|
||||||
mode => '0644',
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
}
|
|
||||||
|
|
||||||
$migration_private_key_file = $migration_key_type ? {
|
|
||||||
'ssh-rsa' => '/root/.ssh/id_rsa',
|
|
||||||
'ssh-dsa' => '/root/.ssh/id_dsa',
|
|
||||||
'ssh-ecdsa' => '/root/.ssh/id_ecdsa',
|
|
||||||
default => undef
|
|
||||||
}
|
|
||||||
|
|
||||||
if ! $migration_private_key_file {
|
|
||||||
fail("Unable to determine name of private key file. Type specified was '${migration_key_type}' but should be one of: ssh-rsa, ssh-dsa, ssh-ecdsa.") # lint:ignore:140chars
|
|
||||||
}
|
|
||||||
|
|
||||||
$migration_auth_options = [
|
|
||||||
"from=\"${nova_migration_subnet}\"",
|
|
||||||
"command=\"/usr/bin/nova_authorized_cmds\"" ]
|
|
||||||
|
|
||||||
file { '/root/.ssh':
|
|
||||||
ensure => directory,
|
|
||||||
mode => '0700',
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
}
|
|
||||||
|
|
||||||
-> file { $migration_private_key_file:
|
|
||||||
content => $migration_private_key,
|
|
||||||
mode => '0600',
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
}
|
|
||||||
|
|
||||||
-> ssh_authorized_key { 'nova-migration-key-authorization':
|
|
||||||
ensure => present,
|
|
||||||
key => $migration_public_key,
|
|
||||||
type => $migration_key_type,
|
|
||||||
user => 'root',
|
|
||||||
require => File['/root/.ssh'],
|
|
||||||
options => $migration_auth_options,
|
|
||||||
}
|
|
||||||
|
|
||||||
# remove root user's known_hosts as a preventive measure
|
|
||||||
# to ensure it doesn't interfere client side authentication
|
|
||||||
# during VM migration.
|
|
||||||
file { '/root/.ssh/known_hosts':
|
|
||||||
ensure => absent,
|
|
||||||
}
|
|
||||||
|
|
||||||
create_resources(sshkey, $ssh_keys, {})
|
|
||||||
|
|
||||||
class { '::nova::compute':
|
|
||||||
vncserver_proxyclient_address => $::platform::params::hostname,
|
|
||||||
}
|
|
||||||
|
|
||||||
if str2bool($::is_virtual) {
|
|
||||||
# check that we actually support KVM virtualization
|
|
||||||
$kvm_exists = inline_template("<% if File.exists?('/dev/kvm') -%>true<% else %>false<% end -%>")
|
|
||||||
if $::virtual == 'kvm' and str2bool($kvm_exists) {
|
|
||||||
$libvirt_virt_type = 'kvm'
|
|
||||||
} else {
|
|
||||||
$libvirt_virt_type = 'qemu'
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
$libvirt_virt_type = 'kvm'
|
|
||||||
}
|
|
||||||
|
|
||||||
$libvirt_vnc_bind_host = $migration_version ? {
|
|
||||||
4 => '0.0.0.0',
|
|
||||||
6 => '::0',
|
|
||||||
}
|
|
||||||
|
|
||||||
include ::openstack::glance::params
|
|
||||||
if 'rbd' in $::openstack::glance::params::enabled_backends {
|
|
||||||
$libvirt_inject_partition = '-2'
|
|
||||||
$libvirt_images_type = 'rbd'
|
|
||||||
} else {
|
|
||||||
$libvirt_inject_partition = '-1'
|
|
||||||
$libvirt_images_type = 'default'
|
|
||||||
}
|
|
||||||
|
|
||||||
class { '::nova::compute::libvirt':
|
|
||||||
libvirt_virt_type => $libvirt_virt_type,
|
|
||||||
vncserver_listen => $libvirt_vnc_bind_host,
|
|
||||||
libvirt_inject_partition => $libvirt_inject_partition,
|
|
||||||
}
|
|
||||||
|
|
||||||
# TODO(mpeters): convert hard coded config values to hiera class parameters
|
|
||||||
nova_config {
|
|
||||||
'DEFAULT/my_ip': value => $migration_ip;
|
|
||||||
|
|
||||||
'libvirt/libvirt_images_type': value => $libvirt_images_type;
|
|
||||||
'libvirt/live_migration_inbound_addr': value => "${::platform::params::hostname}-infra";
|
|
||||||
'libvirt/live_migration_uri': ensure => absent;
|
|
||||||
'libvirt/volume_use_multipath': value => $::platform::multipath::params::enabled;
|
|
||||||
|
|
||||||
# enable auto-converge by default
|
|
||||||
'libvirt/live_migration_permit_auto_converge': value => 'True';
|
|
||||||
|
|
||||||
# Change the nfs mount options to provide faster detection of unclean
|
|
||||||
# shutdown (e.g. if controller is powered down).
|
|
||||||
'DEFAULT/nfs_mount_options': value => $::platform::params::nfs_mount_options;
|
|
||||||
|
|
||||||
# WRS extension: compute_resource_debug
|
|
||||||
'DEFAULT/compute_resource_debug': value => 'False';
|
|
||||||
|
|
||||||
# WRS extension: reap running deleted VMs
|
|
||||||
'DEFAULT/running_deleted_instance_action': value => 'reap';
|
|
||||||
'DEFAULT/running_deleted_instance_poll_interval': value => '60';
|
|
||||||
|
|
||||||
# Delete rbd_user, for now
|
|
||||||
'DEFAULT/rbd_user': ensure => 'absent';
|
|
||||||
|
|
||||||
# write metadata to a special configuration drive
|
|
||||||
'DEFAULT/mkisofs_cmd': value => '/usr/bin/genisoimage';
|
|
||||||
|
|
||||||
# configure metrics
|
|
||||||
'DEFAULT/compute_available_monitors':
|
|
||||||
value => 'nova.compute.monitors.all_monitors';
|
|
||||||
'DEFAULT/compute_monitors': value => $compute_monitors;
|
|
||||||
|
|
||||||
# need retries under heavy I/O loads
|
|
||||||
'DEFAULT/network_allocate_retries': value => 2;
|
|
||||||
|
|
||||||
# TODO(mpeters): confirm if this is still required - deprecated
|
|
||||||
'DEFAULT/volume_api_class': value => 'nova.volume.cinder.API';
|
|
||||||
|
|
||||||
'DEFAULT/default_ephemeral_format': value => 'ext4';
|
|
||||||
|
|
||||||
# turn on service tokens
|
|
||||||
'service_user/send_service_user_token': value => true;
|
|
||||||
'service_user/project_name': value => $::nova::keystone::authtoken::project_name;
|
|
||||||
'service_user/password': value => $::nova::keystone::authtoken::password;
|
|
||||||
'service_user/username': value => $::nova::keystone::authtoken::username;
|
|
||||||
'service_user/region_name': value => $::nova::keystone::authtoken::region_name;
|
|
||||||
'service_user/auth_url': value => $::nova::keystone::authtoken::auth_url;
|
|
||||||
'service_user/user_domain_name': value => $::nova::keystone::authtoken::user_domain_name;
|
|
||||||
'service_user/project_domain_name': value => $::nova::keystone::authtoken::project_domain_name;
|
|
||||||
'service_user/auth_type': value => 'password';
|
|
||||||
}
|
|
||||||
|
|
||||||
file_line {'cgroup_controllers':
|
|
||||||
ensure => present,
|
|
||||||
path => '/etc/libvirt/qemu.conf',
|
|
||||||
line => 'cgroup_controllers = [ "cpu", "cpuacct" ]',
|
|
||||||
match => '^cgroup_controllers = .*',
|
|
||||||
}
|
|
||||||
|
|
||||||
if $iscsi_initiator_name {
|
|
||||||
$initiator_content = "InitiatorName=${iscsi_initiator_name}\n"
|
|
||||||
file { '/etc/iscsi/initiatorname.iscsi':
|
|
||||||
ensure => 'present',
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
mode => '0644',
|
|
||||||
content => $initiator_content,
|
|
||||||
}
|
|
||||||
-> exec { 'Restart iscsid.service':
|
|
||||||
command => "bash -c 'systemctl restart iscsid.service'",
|
|
||||||
onlyif => 'systemctl status iscsid.service',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
define openstack::nova::storage::wipe_new_pv {
|
|
||||||
$cmd = join(['/sbin/pvs --nosuffix --noheadings ',$name,' 2>/dev/null | grep nova-local || true'])
|
|
||||||
$result = generate('/bin/sh', '-c', $cmd)
|
|
||||||
if $result !~ /nova-local/ {
|
|
||||||
exec { "Wipe New PV not in VG - ${name}":
|
|
||||||
provider => shell,
|
|
||||||
command => "wipefs -a ${name}",
|
|
||||||
before => Lvm::Volume[instances_lv],
|
|
||||||
require => Exec['remove device mapper mapping']
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
define openstack::nova::storage::wipe_pv_and_format {
|
|
||||||
if $name !~ /part/ {
|
|
||||||
exec { "Wipe removing PV ${name}":
|
|
||||||
provider => shell,
|
|
||||||
command => "wipefs -a ${name}",
|
|
||||||
require => File_line[disable_old_lvg_disks]
|
|
||||||
}
|
|
||||||
-> exec { "GPT format disk PV - ${name}":
|
|
||||||
provider => shell,
|
|
||||||
command => "parted -a optimal --script ${name} -- mktable gpt",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
exec { "Wipe removing PV ${name}":
|
|
||||||
provider => shell,
|
|
||||||
command => "wipefs -a ${name}",
|
|
||||||
require => File_line[disable_old_lvg_disks]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class openstack::nova::storage (
|
|
||||||
$adding_pvs,
|
|
||||||
$removing_pvs,
|
|
||||||
$final_pvs,
|
|
||||||
$lvm_global_filter = '[]',
|
|
||||||
$lvm_update_filter = '[]',
|
|
||||||
$instance_backing = 'image',
|
|
||||||
$concurrent_disk_operations = 2,
|
|
||||||
$images_rbd_pool = 'ephemeral',
|
|
||||||
$images_rbd_ceph_conf = '/etc/ceph/ceph.conf'
|
|
||||||
) {
|
|
||||||
$adding_pvs_str = join($adding_pvs,' ')
|
|
||||||
$removing_pvs_str = join($removing_pvs,' ')
|
|
||||||
|
|
||||||
# Ensure partitions update prior to local storage configuration
|
|
||||||
Class['::platform::partitions'] -> Class[$name]
|
|
||||||
|
|
||||||
case $instance_backing {
|
|
||||||
'image': {
|
|
||||||
$images_type = 'default'
|
|
||||||
$images_volume_group = absent
|
|
||||||
$round_to_extent = false
|
|
||||||
$local_monitor_state = 'disabled'
|
|
||||||
$images_rbd_pool_real = absent
|
|
||||||
$images_rbd_ceph_conf_real = absent
|
|
||||||
}
|
|
||||||
'remote': {
|
|
||||||
$images_type = 'rbd'
|
|
||||||
$images_volume_group = absent
|
|
||||||
$round_to_extent = false
|
|
||||||
$local_monitor_state = 'disabled'
|
|
||||||
$images_rbd_pool_real = $images_rbd_pool
|
|
||||||
$images_rbd_ceph_conf_real = $images_rbd_ceph_conf
|
|
||||||
}
|
|
||||||
default: {
|
|
||||||
fail("Unsupported instance backing: ${instance_backing}")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nova_config {
|
|
||||||
'DEFAULT/concurrent_disk_operations': value => $concurrent_disk_operations;
|
|
||||||
}
|
|
||||||
|
|
||||||
::openstack::nova::storage::wipe_new_pv { $adding_pvs: }
|
|
||||||
::openstack::nova::storage::wipe_pv_and_format { $removing_pvs: }
|
|
||||||
|
|
||||||
file_line { 'enable_new_lvg_disks':
|
|
||||||
path => '/etc/lvm/lvm.conf',
|
|
||||||
line => " global_filter = ${lvm_update_filter}",
|
|
||||||
match => '^[ ]*global_filter =',
|
|
||||||
}
|
|
||||||
-> nova_config {
|
|
||||||
'libvirt/images_type': value => $images_type;
|
|
||||||
'libvirt/images_volume_group': value => $images_volume_group;
|
|
||||||
'libvirt/images_rbd_pool': value => $images_rbd_pool_real;
|
|
||||||
'libvirt/images_rbd_ceph_conf': value => $images_rbd_ceph_conf_real;
|
|
||||||
}
|
|
||||||
-> exec { 'umount /var/lib/nova/instances':
|
|
||||||
command => 'umount /var/lib/nova/instances; true',
|
|
||||||
}
|
|
||||||
-> exec { 'umount /dev/nova-local/instances_lv':
|
|
||||||
command => 'umount /dev/nova-local/instances_lv; true',
|
|
||||||
}
|
|
||||||
-> exec { 'remove udev leftovers':
|
|
||||||
unless => 'vgs nova-local',
|
|
||||||
command => 'rm -rf /dev/nova-local || true',
|
|
||||||
}
|
|
||||||
-> exec { 'remove device mapper mapping':
|
|
||||||
command => 'dmsetup remove /dev/mapper/nova--local-instances_lv || true',
|
|
||||||
}
|
|
||||||
-> file_line { 'disable_old_lvg_disks':
|
|
||||||
path => '/etc/lvm/lvm.conf',
|
|
||||||
line => " global_filter = ${lvm_global_filter}",
|
|
||||||
match => '^[ ]*global_filter =',
|
|
||||||
}
|
|
||||||
-> exec { 'add device mapper mapping':
|
|
||||||
command => 'lvchange -ay /dev/nova-local/instances_lv || true',
|
|
||||||
}
|
|
||||||
-> lvm::volume { 'instances_lv':
|
|
||||||
ensure => 'present',
|
|
||||||
vg => 'nova-local',
|
|
||||||
pv => $final_pvs,
|
|
||||||
size => 'max',
|
|
||||||
round_to_extent => $round_to_extent,
|
|
||||||
allow_reduce => true,
|
|
||||||
nuke_fs_on_resize_failure => true,
|
|
||||||
}
|
|
||||||
-> filesystem { '/dev/nova-local/instances_lv':
|
|
||||||
ensure => present,
|
|
||||||
fs_type => 'ext4',
|
|
||||||
options => '-F -F',
|
|
||||||
require => Logical_volume['instances_lv']
|
|
||||||
}
|
|
||||||
-> file { '/var/lib/nova/instances':
|
|
||||||
ensure => 'directory',
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
mode => '0755',
|
|
||||||
}
|
|
||||||
-> exec { 'mount /dev/nova-local/instances_lv':
|
|
||||||
unless => 'mount | grep -q /var/lib/nova/instances',
|
|
||||||
command => 'mount -t ext4 /dev/nova-local/instances_lv /var/lib/nova/instances',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::nova::network {
|
|
||||||
include ::nova::network::neutron
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::nova::placement {
|
|
||||||
include ::nova::placement
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::nova::firewall
|
|
||||||
inherits ::openstack::nova::params {
|
|
||||||
|
|
||||||
platform::firewall::rule { 'nova-api-rules':
|
|
||||||
service_name => 'nova',
|
|
||||||
ports => $nova_api_port,
|
|
||||||
}
|
|
||||||
|
|
||||||
platform::firewall::rule { 'nova-placement-api':
|
|
||||||
service_name => 'placement',
|
|
||||||
ports => $placement_port,
|
|
||||||
}
|
|
||||||
|
|
||||||
platform::firewall::rule { 'nova-novnc':
|
|
||||||
service_name => 'nova-novnc',
|
|
||||||
ports => $nova_novnc_port,
|
|
||||||
}
|
|
||||||
|
|
||||||
platform::firewall::rule { 'nova-serial':
|
|
||||||
service_name => 'nova-serial',
|
|
||||||
ports => $nova_serial_port,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::nova::haproxy
|
|
||||||
inherits ::openstack::nova::params {
|
|
||||||
|
|
||||||
platform::haproxy::proxy { 'nova-restapi':
|
|
||||||
server_name => 's-nova',
|
|
||||||
public_port => $nova_api_port,
|
|
||||||
private_port => $nova_api_port,
|
|
||||||
}
|
|
||||||
|
|
||||||
platform::haproxy::proxy { 'placement-restapi':
|
|
||||||
server_name => 's-placement',
|
|
||||||
public_port => $placement_port,
|
|
||||||
private_port => $placement_port,
|
|
||||||
}
|
|
||||||
|
|
||||||
platform::haproxy::proxy { 'nova-novnc':
|
|
||||||
server_name => 's-nova-novnc',
|
|
||||||
public_port => $nova_novnc_port,
|
|
||||||
private_port => $nova_novnc_port,
|
|
||||||
x_forwarded_proto => false,
|
|
||||||
}
|
|
||||||
|
|
||||||
platform::haproxy::proxy { 'nova-serial':
|
|
||||||
server_name => 's-nova-serial',
|
|
||||||
public_port => $nova_serial_port,
|
|
||||||
private_port => $nova_serial_port,
|
|
||||||
server_timeout => $timeout,
|
|
||||||
client_timeout => $timeout,
|
|
||||||
x_forwarded_proto => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::nova::api::services
|
|
||||||
inherits ::openstack::nova::params {
|
|
||||||
|
|
||||||
include ::nova::pci
|
|
||||||
include ::platform::params
|
|
||||||
|
|
||||||
include ::nova::vncproxy
|
|
||||||
include ::nova::serialproxy
|
|
||||||
include ::nova::consoleauth
|
|
||||||
include ::nova_api_proxy::config
|
|
||||||
|
|
||||||
class {'::nova::api':
|
|
||||||
sync_db => $::platform::params::init_database,
|
|
||||||
sync_db_api => $::platform::params::init_database,
|
|
||||||
osapi_compute_workers => $::platform::params::eng_workers,
|
|
||||||
metadata_workers => $::platform::params::eng_workers_by_2,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::nova::api
|
|
||||||
inherits ::openstack::nova::params {
|
|
||||||
|
|
||||||
include ::platform::kubernetes::params
|
|
||||||
include ::platform::params
|
|
||||||
|
|
||||||
if ($::openstack::nova::params::service_create and
|
|
||||||
$::platform::params::init_keystone) {
|
|
||||||
include ::nova::keystone::auth
|
|
||||||
include ::nova::keystone::auth_placement
|
|
||||||
}
|
|
||||||
|
|
||||||
include ::openstack::nova::api::services
|
|
||||||
|
|
||||||
if $::openstack::nova::params::configure_endpoint {
|
|
||||||
include ::openstack::nova::firewall
|
|
||||||
if $::platform::kubernetes::params::enabled != true {
|
|
||||||
include ::openstack::nova::haproxy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::nova::conductor::reload {
|
|
||||||
exec { 'signal-nova-conductor':
|
|
||||||
command => 'pkill -HUP nova-conductor',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::nova::api::reload {
|
|
||||||
platform::sm::restart {'nova-api': }
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::nova::controller::runtime {
|
|
||||||
include ::openstack::nova
|
|
||||||
include ::openstack::nova::controller
|
|
||||||
include ::openstack::nova::api::services
|
|
||||||
|
|
||||||
class {'::openstack::nova::api::reload':
|
|
||||||
stage => post
|
|
||||||
}
|
|
||||||
|
|
||||||
class {'::openstack::nova::conductor::reload':
|
|
||||||
stage => post
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::nova::api::runtime {
|
|
||||||
|
|
||||||
# both the service configuration and firewall/haproxy needs to be updated
|
|
||||||
include ::openstack::nova
|
|
||||||
include ::openstack::nova::api
|
|
||||||
include ::nova::compute::serial
|
|
||||||
|
|
||||||
class {'::openstack::nova::api::reload':
|
|
||||||
stage => post
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::nova::compute::pci
|
|
||||||
(
|
|
||||||
$pci_pt_whitelist = [],
|
|
||||||
$pci_sriov_whitelist = undef,
|
|
||||||
) {
|
|
||||||
|
|
||||||
# The pci_passthrough option in the nova::compute class is not sufficient.
|
|
||||||
# In particular, it sets the pci_passthrough_whitelist in nova.conf to an
|
|
||||||
# empty string if the list is empty, causing the nova-compute process to fail.
|
|
||||||
if $pci_sriov_whitelist {
|
|
||||||
class { '::nova::compute::pci':
|
|
||||||
passthrough => generate('/usr/bin/nova-sriov',
|
|
||||||
$pci_pt_whitelist, $pci_sriov_whitelist),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
class { '::nova::compute::pci':
|
|
||||||
passthrough => $pci_pt_whitelist,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::nova::compute::reload {
|
|
||||||
include ::platform::kubernetes::params
|
|
||||||
|
|
||||||
if $::platform::kubernetes::params::enabled != true {
|
|
||||||
exec { 'pmon-restart-nova-compute':
|
|
||||||
command => 'pmon-restart nova-compute',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::nova::compute::runtime {
|
|
||||||
include ::openstack::nova
|
|
||||||
include ::openstack::nova::compute
|
|
||||||
|
|
||||||
class {'::openstack::nova::compute::reload':
|
|
||||||
stage => post
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,135 +0,0 @@
|
|||||||
class openstack::swift::params (
|
|
||||||
$swift_hash_path_suffix = 'swift_secret',
|
|
||||||
$service_name = 'openstack-swift',
|
|
||||||
$service_enabled = false,
|
|
||||||
$api_port = 8080,
|
|
||||||
$api_host = '127.0.0.1',
|
|
||||||
$fs_size_mb = '25',
|
|
||||||
) { }
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::swift::firewall
|
|
||||||
inherits ::openstack::swift::params {
|
|
||||||
|
|
||||||
if $service_enabled {
|
|
||||||
platform::firewall::rule { 'swift-api':
|
|
||||||
service_name => 'swift',
|
|
||||||
ports => $api_port,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::swift::haproxy
|
|
||||||
inherits ::openstack::swift::params {
|
|
||||||
|
|
||||||
if $service_enabled {
|
|
||||||
platform::haproxy::proxy { 'swift-restapi':
|
|
||||||
server_name => 's-swift',
|
|
||||||
public_port => $api_port,
|
|
||||||
private_port => $api_port,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::swift::api {
|
|
||||||
include ::openstack::swift::firewall
|
|
||||||
include ::openstack::swift::haproxy
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::swift
|
|
||||||
inherits ::openstack::swift::params {
|
|
||||||
|
|
||||||
include ::platform::params
|
|
||||||
include ::openstack::keystone::params
|
|
||||||
|
|
||||||
if $service_enabled {
|
|
||||||
if str2bool($::is_controller_active) or
|
|
||||||
str2bool($::is_standalone_controller) {
|
|
||||||
class { '::swift::keystone::auth':
|
|
||||||
configure_s3_endpoint => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class { '::swift':
|
|
||||||
swift_hash_path_suffix => $swift_hash_path_suffix
|
|
||||||
}
|
|
||||||
|
|
||||||
include swift::proxy::healthcheck
|
|
||||||
include swift::proxy::proxy_logging
|
|
||||||
include swift::proxy::authtoken
|
|
||||||
include swift::proxy::keystone
|
|
||||||
include swift::proxy::container_quotas
|
|
||||||
class { 'swift::proxy':
|
|
||||||
account_autocreate => true,
|
|
||||||
proxy_local_net_ip => $api_host,
|
|
||||||
port => $api_port,
|
|
||||||
pipeline => ['healthcheck', 'authtoken', 'keystone', 'container-quotas' , 'proxy-logging', 'proxy-server'],
|
|
||||||
}
|
|
||||||
|
|
||||||
swift::storage::loopback { '1':
|
|
||||||
require => Class['swift'],
|
|
||||||
base_dir => '/srv/loopback-device',
|
|
||||||
mnt_base_dir => '/srv/node',
|
|
||||||
byte_size => '1024',
|
|
||||||
seek => $fs_size_mb*1024,
|
|
||||||
}
|
|
||||||
|
|
||||||
# remove dependency on xinetd
|
|
||||||
class { '::rsync::server':
|
|
||||||
use_xinetd => false,
|
|
||||||
address => $api_host,
|
|
||||||
use_chroot => 'no',
|
|
||||||
}
|
|
||||||
|
|
||||||
class { 'swift::storage::all':
|
|
||||||
storage_local_net_ip => $api_host,
|
|
||||||
object_port => '6200',
|
|
||||||
container_port => '6201',
|
|
||||||
account_port => '6202',
|
|
||||||
account_pipeline => ['healthcheck', 'recon', 'account-server'],
|
|
||||||
container_pipeline => ['healthcheck', 'recon', 'container-server'],
|
|
||||||
object_pipeline => ['healthcheck', 'recon', 'object-server'],
|
|
||||||
# Turn on support for object versioning
|
|
||||||
allow_versions => true,
|
|
||||||
}
|
|
||||||
|
|
||||||
$rings = [
|
|
||||||
'account',
|
|
||||||
'object',
|
|
||||||
'container']
|
|
||||||
swift::storage::filter::recon { $rings: }
|
|
||||||
swift::storage::filter::healthcheck { $rings: }
|
|
||||||
|
|
||||||
ring_object_device { "${api_host}:6200/1":
|
|
||||||
region => 1, # optional, defaults to 1
|
|
||||||
zone => 1,
|
|
||||||
weight => 1,
|
|
||||||
}
|
|
||||||
|
|
||||||
ring_container_device { "${api_host}:6201/1":
|
|
||||||
zone => 1,
|
|
||||||
weight => 1,
|
|
||||||
}
|
|
||||||
|
|
||||||
ring_account_device { "${api_host}:6202/1":
|
|
||||||
zone => 1,
|
|
||||||
weight => 1,
|
|
||||||
}
|
|
||||||
|
|
||||||
class { 'swift::ringbuilder':
|
|
||||||
part_power => '10',
|
|
||||||
# number of replicas can not be more than the number of nodes
|
|
||||||
replicas => '1',
|
|
||||||
min_part_hours => '1',
|
|
||||||
require => Class['swift'],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class openstack::swift::runtime {
|
|
||||||
include ::openstack::swift
|
|
||||||
}
|
|
@ -260,18 +260,6 @@ class platform::compute::allocate
|
|||||||
}
|
}
|
||||||
# lint:endignore:variable_is_lowercase
|
# lint:endignore:variable_is_lowercase
|
||||||
|
|
||||||
class platform::compute::extend
|
|
||||||
inherits ::platform::compute::hugepage::params {
|
|
||||||
|
|
||||||
# nova-compute reads on init, extended nova compute options
|
|
||||||
# used with nova accounting
|
|
||||||
file { '/etc/nova/compute_extend.conf':
|
|
||||||
ensure => 'present',
|
|
||||||
replace => true,
|
|
||||||
content => template('platform/compute_extend.conf.erb')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Mount resctrl to allow Cache Allocation Technology per VM
|
# Mount resctrl to allow Cache Allocation Technology per VM
|
||||||
class platform::compute::resctrl {
|
class platform::compute::resctrl {
|
||||||
|
|
||||||
@ -328,6 +316,5 @@ class platform::compute {
|
|||||||
require ::platform::compute::allocate
|
require ::platform::compute::allocate
|
||||||
require ::platform::compute::pmqos
|
require ::platform::compute::pmqos
|
||||||
require ::platform::compute::resctrl
|
require ::platform::compute::resctrl
|
||||||
require ::platform::compute::extend
|
|
||||||
require ::platform::compute::config
|
require ::platform::compute::config
|
||||||
}
|
}
|
||||||
|
@ -142,7 +142,6 @@ class platform::haproxy::runtime {
|
|||||||
include ::platform::dcorch::haproxy
|
include ::platform::dcorch::haproxy
|
||||||
}
|
}
|
||||||
include ::openstack::keystone::haproxy
|
include ::openstack::keystone::haproxy
|
||||||
include ::openstack::swift::haproxy
|
|
||||||
include ::openstack::barbican::haproxy
|
include ::openstack::barbican::haproxy
|
||||||
|
|
||||||
class {'::platform::haproxy::reload':
|
class {'::platform::haproxy::reload':
|
||||||
|
@ -74,7 +74,7 @@ class platform::lvm::vg::nova_local(
|
|||||||
$vg_name = 'nova-local',
|
$vg_name = 'nova-local',
|
||||||
$physical_volumes = [],
|
$physical_volumes = [],
|
||||||
) inherits platform::lvm::params {
|
) inherits platform::lvm::params {
|
||||||
# TODO(rchurch): refactor portions of openstack::nova::storage an move here
|
# TODO(rchurch): refactor portions of platform::worker::storage and move here
|
||||||
}
|
}
|
||||||
|
|
||||||
##################
|
##################
|
||||||
|
134
puppet-manifests/src/modules/platform/manifests/worker.pp
Normal file
134
puppet-manifests/src/modules/platform/manifests/worker.pp
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
|
||||||
|
define platform::worker::storage::wipe_new_pv {
|
||||||
|
$cmd = join(['/sbin/pvs --nosuffix --noheadings ',$name,' 2>/dev/null | grep nova-local || true'])
|
||||||
|
$result = generate('/bin/sh', '-c', $cmd)
|
||||||
|
if $result !~ /nova-local/ {
|
||||||
|
exec { "Wipe New PV not in VG - ${name}":
|
||||||
|
provider => shell,
|
||||||
|
command => "wipefs -a ${name}",
|
||||||
|
before => Lvm::Volume[instances_lv],
|
||||||
|
require => Exec['remove device mapper mapping']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
define platform::worker::storage::wipe_pv_and_format {
|
||||||
|
if $name !~ /part/ {
|
||||||
|
exec { "Wipe removing PV ${name}":
|
||||||
|
provider => shell,
|
||||||
|
command => "wipefs -a ${name}",
|
||||||
|
require => File_line[disable_old_lvg_disks]
|
||||||
|
}
|
||||||
|
-> exec { "GPT format disk PV - ${name}":
|
||||||
|
provider => shell,
|
||||||
|
command => "parted -a optimal --script ${name} -- mktable gpt",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
exec { "Wipe removing PV ${name}":
|
||||||
|
provider => shell,
|
||||||
|
command => "wipefs -a ${name}",
|
||||||
|
require => File_line[disable_old_lvg_disks]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class platform::worker::storage (
|
||||||
|
$adding_pvs,
|
||||||
|
$removing_pvs,
|
||||||
|
$final_pvs,
|
||||||
|
$lvm_global_filter = '[]',
|
||||||
|
$lvm_update_filter = '[]',
|
||||||
|
$instance_backing = 'image',
|
||||||
|
$images_rbd_pool = 'ephemeral',
|
||||||
|
$images_rbd_ceph_conf = '/etc/ceph/ceph.conf'
|
||||||
|
) {
|
||||||
|
$adding_pvs_str = join($adding_pvs,' ')
|
||||||
|
$removing_pvs_str = join($removing_pvs,' ')
|
||||||
|
|
||||||
|
# Ensure partitions update prior to local storage configuration
|
||||||
|
Class['::platform::partitions'] -> Class[$name]
|
||||||
|
|
||||||
|
case $instance_backing {
|
||||||
|
'image': {
|
||||||
|
$images_type = 'default'
|
||||||
|
$images_volume_group = absent
|
||||||
|
$round_to_extent = false
|
||||||
|
$local_monitor_state = 'disabled'
|
||||||
|
$images_rbd_pool_real = absent
|
||||||
|
$images_rbd_ceph_conf_real = absent
|
||||||
|
}
|
||||||
|
'remote': {
|
||||||
|
$images_type = 'rbd'
|
||||||
|
$images_volume_group = absent
|
||||||
|
$round_to_extent = false
|
||||||
|
$local_monitor_state = 'disabled'
|
||||||
|
$images_rbd_pool_real = $images_rbd_pool
|
||||||
|
$images_rbd_ceph_conf_real = $images_rbd_ceph_conf
|
||||||
|
}
|
||||||
|
default: {
|
||||||
|
fail("Unsupported instance backing: ${instance_backing}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
::platform::worker::storage::wipe_new_pv { $adding_pvs: }
|
||||||
|
::platform::worker::storage::wipe_pv_and_format { $removing_pvs: }
|
||||||
|
|
||||||
|
file_line { 'enable_new_lvg_disks':
|
||||||
|
path => '/etc/lvm/lvm.conf',
|
||||||
|
line => " global_filter = ${lvm_update_filter}",
|
||||||
|
match => '^[ ]*global_filter =',
|
||||||
|
}
|
||||||
|
-> file { '/var/lib/nova':
|
||||||
|
ensure => 'directory',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0755',
|
||||||
|
}
|
||||||
|
-> exec { 'umount /var/lib/nova/instances':
|
||||||
|
command => 'umount /var/lib/nova/instances; true',
|
||||||
|
}
|
||||||
|
-> exec { 'umount /dev/nova-local/instances_lv':
|
||||||
|
command => 'umount /dev/nova-local/instances_lv; true',
|
||||||
|
}
|
||||||
|
-> exec { 'remove udev leftovers':
|
||||||
|
unless => 'vgs nova-local',
|
||||||
|
command => 'rm -rf /dev/nova-local || true',
|
||||||
|
}
|
||||||
|
-> exec { 'remove device mapper mapping':
|
||||||
|
command => 'dmsetup remove /dev/mapper/nova--local-instances_lv || true',
|
||||||
|
}
|
||||||
|
-> file_line { 'disable_old_lvg_disks':
|
||||||
|
path => '/etc/lvm/lvm.conf',
|
||||||
|
line => " global_filter = ${lvm_global_filter}",
|
||||||
|
match => '^[ ]*global_filter =',
|
||||||
|
}
|
||||||
|
-> exec { 'add device mapper mapping':
|
||||||
|
command => 'lvchange -ay /dev/nova-local/instances_lv || true',
|
||||||
|
}
|
||||||
|
-> lvm::volume { 'instances_lv':
|
||||||
|
ensure => 'present',
|
||||||
|
vg => 'nova-local',
|
||||||
|
pv => $final_pvs,
|
||||||
|
size => 'max',
|
||||||
|
round_to_extent => $round_to_extent,
|
||||||
|
allow_reduce => true,
|
||||||
|
nuke_fs_on_resize_failure => true,
|
||||||
|
}
|
||||||
|
-> filesystem { '/dev/nova-local/instances_lv':
|
||||||
|
ensure => present,
|
||||||
|
fs_type => 'ext4',
|
||||||
|
options => '-F -F',
|
||||||
|
require => Logical_volume['instances_lv']
|
||||||
|
}
|
||||||
|
-> file { '/var/lib/nova/instances':
|
||||||
|
ensure => 'directory',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0755',
|
||||||
|
}
|
||||||
|
-> exec { 'mount /dev/nova-local/instances_lv':
|
||||||
|
unless => 'mount | grep -q /var/lib/nova/instances',
|
||||||
|
command => 'mount -t ext4 /dev/nova-local/instances_lv /var/lib/nova/instances',
|
||||||
|
}
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user