Merge pull request #24 from enovance/bug/23/sbadia

Re-enable galera cluster httpchk
This commit is contained in:
Yanis Guenane 2014-01-22 09:13:06 -08:00
commit 3e398c76be
6 changed files with 217 additions and 93 deletions

View File

@ -17,37 +17,40 @@
#
class cloud::database::sql (
$api_eth = $os_params::api_eth,
$service_provider = sysv,
$galera_nextserver = $os_params::galera_nextserver,
$galera_master = $os_params::galera_master,
$mysql_password = $os_params::mysql_password,
$keystone_db_host = $os_params::keystone_db_host,
$keystone_db_user = $os_params::keystone_db_user,
$keystone_db_password = $os_params::keystone_db_password,
$keystone_db_allowed_hosts = $os_params::keystone_db_allowed_hosts,
$cinder_db_host = $os_params::cinder_db_host,
$cinder_db_user = $os_params::cinder_db_user,
$cinder_db_password = $os_params::cinder_db_password,
$cinder_db_allowed_hosts = $os_params::cinder_db_allowed_hosts,
$glance_db_host = $os_params::glance_db_host,
$glance_db_user = $os_params::glance_db_user,
$glance_db_password = $os_params::glance_db_password,
$glance_db_allowed_hosts = $os_params::glance_db_allowed_hosts,
$heat_db_host = $os_params::heat_db_host,
$heat_db_user = $os_params::heat_db_user,
$heat_db_password = $os_params::heat_db_password,
$heat_db_allowed_hosts = $os_params::heat_db_allowed_hosts,
$nova_db_host = $os_params::nova_db_host,
$nova_db_user = $os_params::nova_db_user,
$nova_db_password = $os_params::nova_db_password,
$nova_db_allowed_hosts = $os_params::nova_db_allowed_hosts,
$neutron_db_host = $os_params::neutron_db_host,
$neutron_db_user = $os_params::neutron_db_user,
$neutron_db_password = $os_params::neutron_db_password,
$neutron_db_allowed_hosts = $os_params::neutron_db_allowed_hosts,
$mysql_password = $os_params::mysql_password,
$mysql_sys_maint = $os_params::mysql_sys_maint
$api_eth = $os_params::api_eth,
$service_provider = 'sysv',
$galera_nextserver = $os_params::galera_nextserver,
$galera_master = $os_params::galera_master,
$keystone_db_host = $os_params::keystone_db_host,
$keystone_db_user = $os_params::keystone_db_user,
$keystone_db_password = $os_params::keystone_db_password,
$keystone_db_allowed_hosts = $os_params::keystone_db_allowed_hosts,
$cinder_db_host = $os_params::cinder_db_host,
$cinder_db_user = $os_params::cinder_db_user,
$cinder_db_password = $os_params::cinder_db_password,
$cinder_db_allowed_hosts = $os_params::cinder_db_allowed_hosts,
$glance_db_host = $os_params::glance_db_host,
$glance_db_user = $os_params::glance_db_user,
$glance_db_password = $os_params::glance_db_password,
$glance_db_allowed_hosts = $os_params::glance_db_allowed_hosts,
$heat_db_host = $os_params::heat_db_host,
$heat_db_user = $os_params::heat_db_user,
$heat_db_password = $os_params::heat_db_password,
$heat_db_allowed_hosts = $os_params::heat_db_allowed_hosts,
$nova_db_host = $os_params::nova_db_host,
$nova_db_user = $os_params::nova_db_user,
$nova_db_password = $os_params::nova_db_password,
$nova_db_allowed_hosts = $os_params::nova_db_allowed_hosts,
$neutron_db_host = $os_params::neutron_db_host,
$neutron_db_user = $os_params::neutron_db_user,
$neutron_db_password = $os_params::neutron_db_password,
$neutron_db_allowed_hosts = $os_params::neutron_db_allowed_hosts,
$mysql_root_password = $os_params::mysql_root_password,
$mysql_sys_maint_user = $os_params::mysql_sys_maint_user,
$mysql_sys_maint_password = $os_params::mysql_sys_maint_password,
$galera_clustercheck_dbuser = $os_params::galera_clustercheck_dbuser,
$galera_clustercheck_dbpassword = $os_params::galera_clustercheck_dbuser,
$galera_clustercheck_ipaddress = $::ipaddress
) {
include 'xinetd'
@ -90,7 +93,7 @@ class cloud::database::sql (
class { 'mysql::server':
config_hash => {
bind_address => $api_eth,
root_password => $mysql_password,
root_password => $mysql_root_password,
},
notify => Service['xinetd'],
}
@ -144,6 +147,7 @@ class cloud::database::sql (
allowed_hosts => $heat_db_allowed_hosts,
}
# Monitoring DB
warning('Database mapping must be updated to puppetlabs/puppetlabs-mysql >= 2.x (see: https://dev.ring.enovance.com/redmine/issues/4510)')
@ -152,25 +156,50 @@ class cloud::database::sql (
charset => 'utf8',
require => File['/root/.my.cnf']
}
database_user { 'clustercheckuser@localhost':
database_user { "${galera_clustercheck_dbuser}@localhost":
ensure => 'present',
# can not change password in clustercheck script
password_hash => mysql_password('clustercheckpassword!'),
password_hash => mysql_password($galera_clustercheck_dbpassword),
provider => 'mysql',
require => File['/root/.my.cnf']
}
database_grant { 'clustercheckuser@localhost/monitoring':
database_grant { "${galera_clustercheck_dbuser}@localhost/monitoring":
privileges => ['all']
}
database_user { 'sys-maint@localhost':
database_user { "${mysql_sys_maint_user}@localhost":
ensure => 'present',
password_hash => mysql_password($mysql_sys_maint),
password_hash => mysql_password($mysql_sys_maint_password),
provider => 'mysql',
require => File['/root/.my.cnf']
}
Database_user<<| |>>
} # if $::hostname == $galera_master
# Haproxy http monitoring
file_line { 'mysqlchk-in-etc-services':
path => '/etc/services',
line => 'mysqlchk 9200/tcp',
match => '^mysqlchk 9200/tcp$',
notify => Service['xinetd'];
}
file {
'/etc/xinetd.d/mysqlchk':
content => template('cloud/database/mysqlchk.erb'),
owner => 'root',
group => 'root',
mode => '0755',
require => File['/usr/bin/clustercheck'],
notify => Service['xinetd'];
'/usr/bin/clustercheck':
ensure => present,
content => template('cloud/database/clustercheck.erb'),
mode => '0755',
owner => 'root',
group => 'root';
}
exec{'clean-mysql-binlog':
@ -185,28 +214,15 @@ class cloud::database::sql (
onlyif => "stat ${::mysql::params::datadir}/ib_logfile0 && test `du -sh ${::mysql::params::datadir}/ib_logfile0 | cut -f1` != '256M'",
}
file{'/etc/mysql/sys.cnf':
content => "# Automatically generated. DO NOT TOUCH!
[client]
host = localhost
user = sys-maint
password = ${mysql_sys_maint}
socket = /var/run/mysqld/mysqld.sock
[mysql_upgrade]
host = localhost
user = sys-maint
password = ${mysql_sys_maint}
socket = /var/run/mysqld/mysqld.sock
basedir = /usr
",
ensure => file,
content => template('cloud/database/sys.cnf.erb'),
owner => 'root',
group => 'root',
mode => '0600',
require => Exec['clean-mysql-binlog'],
}
# Disabled because monitor depends on checkmulti which is broken
# class { 'monitor::galera::httpsrv': }
# TODO/WARNING(Gonéri): template changes do not trigger configuration changes
mysql::server::config{'basic_config':
notify_service => true,

View File

@ -205,7 +205,7 @@ class cloud::loadbalancer(
options => {
'mode' => 'tcp',
'balance' => 'roundrobin',
'option' => ['tcpka', 'tcplog'],
'option' => ['tcpka', 'tcplog', 'httpchk'], #httpchk mandatory expect 200 on port 9000
'timeout client' => '400s',
'timeout server' => '400s',
}

View File

@ -27,36 +27,42 @@ describe 'cloud::database::sql' do
end
let :params do
{ :service_provider => 'sysv',
:api_eth => '10.0.0.1',
:galera_master => '10.0.0.1',
:galera_nextserver => ['10.0.0.1','10.0.0.2','10.0.0.3'],
:mysql_password => 'secrete',
:keystone_db_host => '10.0.0.1',
:keystone_db_user => 'keystone',
:keystone_db_password => 'secrete',
:keystone_db_allowed_hosts => ['10.0.0.1','10.0.0.2','10.0.0.3'],
:cinder_db_host => '10.0.0.1',
:cinder_db_user => 'cinder',
:cinder_db_password => 'secrete',
:cinder_db_allowed_hosts => ['10.0.0.1','10.0.0.2','10.0.0.3'],
:glance_db_host => '10.0.0.1',
:glance_db_user => 'glance',
:glance_db_password => 'secrete',
:glance_db_allowed_hosts => ['10.0.0.1','10.0.0.2','10.0.0.3'],
:heat_db_host => '10.0.0.1',
:heat_db_user => 'heat',
:heat_db_password => 'secrete',
:heat_db_allowed_hosts => ['10.0.0.1','10.0.0.2','10.0.0.3'],
:nova_db_host => '10.0.0.1',
:nova_db_user => 'nova',
:nova_db_password => 'secrete',
:nova_db_allowed_hosts => ['10.0.0.1','10.0.0.2','10.0.0.3'],
:neutron_db_host => '10.0.0.1',
:neutron_db_user => 'neutron',
:neutron_db_password => 'secrete',
:neutron_db_allowed_hosts => ['10.0.0.1','10.0.0.2','10.0.0.3'],
:mysql_sys_maint => 'sys' }
{
:service_provider => 'sysv',
:api_eth => '10.0.0.1',
:galera_master => '10.0.0.1',
:galera_nextserver => ['10.0.0.1','10.0.0.2','10.0.0.3'],
:keystone_db_host => '10.0.0.1',
:keystone_db_user => 'keystone',
:keystone_db_password => 'secrete',
:keystone_db_allowed_hosts => ['10.0.0.1','10.0.0.2','10.0.0.3'],
:cinder_db_host => '10.0.0.1',
:cinder_db_user => 'cinder',
:cinder_db_password => 'secrete',
:cinder_db_allowed_hosts => ['10.0.0.1','10.0.0.2','10.0.0.3'],
:glance_db_host => '10.0.0.1',
:glance_db_user => 'glance',
:glance_db_password => 'secrete',
:glance_db_allowed_hosts => ['10.0.0.1','10.0.0.2','10.0.0.3'],
:heat_db_host => '10.0.0.1',
:heat_db_user => 'heat',
:heat_db_password => 'secrete',
:heat_db_allowed_hosts => ['10.0.0.1','10.0.0.2','10.0.0.3'],
:nova_db_host => '10.0.0.1',
:nova_db_user => 'nova',
:nova_db_password => 'secrete',
:nova_db_allowed_hosts => ['10.0.0.1','10.0.0.2','10.0.0.3'],
:neutron_db_host => '10.0.0.1',
:neutron_db_user => 'neutron',
:neutron_db_password => 'secrete',
:neutron_db_allowed_hosts => ['10.0.0.1','10.0.0.2','10.0.0.3'],
:mysql_root_password => 'secrete',
:mysql_sys_maint_user => 'sys-maint',
:mysql_sys_maint_password => 'sys',
:galera_clustercheck_dbuser => 'clustercheckuser',
:galera_clustercheck_dbpassword => 'clustercheckpassword!',
:galera_clustercheck_ipaddress => '10.0.0.1'
}
end
it 'configure mysql galera server' do
@ -67,10 +73,25 @@ describe 'cloud::database::sql' do
)
should contain_class('mysql::server').with(
:config_hash => { 'bind_address' => '10.0.0.1', 'root_password' => 'secrete' },
:config_hash => { 'bind_address' => '10.0.0.1', 'root_password' => params[:mysql_root_password] },
:notify => 'Service[xinetd]'
)
end
end # configure mysql galera server
context 'configure mysqlchk http replication' do
it { should contain_file_line('mysqlchk-in-etc-services').with(
:line => 'mysqlchk 9200/tcp',
:path => '/etc/services',
:notify => 'Service[xinetd]'
)}
it { should contain_file('/etc/xinetd.d/mysqlchk').with_mode('0755') }
it { should contain_file('/usr/bin/clustercheck').with_mode('0755') }
it { should contain_file('/usr/bin/clustercheck').with_content(/MYSQL_USERNAME="#{params[:galera_clustercheck_dbuser]}"/)}
it { should contain_file('/usr/bin/clustercheck').with_content(/MYSQL_PASSWORD="#{params[:galera_clustercheck_dbpassword]}"/)}
it { should contain_file('/etc/xinetd.d/mysqlchk').with_content(/bind = #{params[:galera_clustercheck_ipaddress]}/)}
end # configure mysqlchk http replication
context 'configure databases on the galera master server' do
@ -137,23 +158,35 @@ describe 'cloud::database::sql' do
:ensure => 'present',
:charset => 'utf8'
)
should contain_database_user('clustercheckuser@localhost').with(
should contain_database_user("#{params[:galera_clustercheck_dbuser]}@localhost").with(
:ensure => 'present',
:password_hash => '*FDC68394456829A7344C2E9D4CDFD43DCE2EFD8F',
:provider => 'mysql'
)
should contain_database_grant('clustercheckuser@localhost/monitoring').with(
should contain_database_grant("#{params[:galera_clustercheck_dbuser]}@localhost/monitoring").with(
:privileges => 'all'
)
should contain_database_user('sys-maint@localhost').with(
should contain_database_user("#{params[:mysql_sys_maint_user]}@localhost").with(
:ensure => 'present',
:password_hash => '*BE353D0D7826681F8B7C136ED9824915F5B99E7D',
:provider => 'mysql'
)
end
end
end # configure monitoring database
end # configure databases on the galera master server
end
context 'configure MySQL sys config' do
it { should contain_file('/etc/mysql/sys.cnf').with(
:mode => '0600',
:owner => 'root',
:group => 'root',
:require => 'Exec[clean-mysql-binlog]'
)}
it { should contain_file('/etc/mysql/sys.cnf').with_content(/password = #{params[:mysql_sys_maint_password]}/)}
end # configure MySQL sys config
end # openstack database sql
context 'on Debian platforms' do
let :facts do

View File

@ -0,0 +1,38 @@
#!/bin/bash
# Managed by puppet
# Module cloud
#
# Script to make a proxy (ie HAProxy) capable of monitoring Percona XtraDB Cluster nodes properly
#
# Author: Olaf van Zandwijk olaf.vanzandwijk@nedap.com
# Documentation and download: https://github.com/olafz/percona-clustercheck
#
# Based on the original script from Unai Rodriguez
#
MYSQL_USERNAME="<%= @galera_clustercheck_dbuser %>"
MYSQL_PASSWORD="<%= @galera_clustercheck_dbpassword %>"
ERR_FILE="/dev/null"
AVAILABLE_WHEN_DONOR=0
#
# Perform the query to check the wsrep_local_state
#
WSREP_STATUS=`mysql --user=${MYSQL_USERNAME} --password=${MYSQL_PASSWORD} -e "SHOW STATUS LIKE 'wsrep_local_state';" 2>${ERR_FILE} | awk '{if (NR!=1){print $2}}' 2>${ERR_FILE}`
if [[ "${WSREP_STATUS}" == "4" ]] || [[ "${WSREP_STATUS}" == "2" && ${AVAILABLE_WHEN_DONOR} == 1 ]]
then
# Percona XtraDB Cluster node local state is 'Synced' == return HTTP 200
/bin/echo -en "HTTP/1.1 200 OK\r\n"
/bin/echo -en "Content-Type: text/plain\r\n"
/bin/echo -en "\r\n"
/bin/echo -en "Mariadb Cluster Node is synced.\r\n"
/bin/echo -en "\r\n"
else
# Percona XtraDB Cluster node local state is not 'Synced' == return HTTP 503
/bin/echo -en "HTTP/1.1 503 Service Unavailable\r\n"
/bin/echo -en "Content-Type: text/plain\r\n"
/bin/echo -en "\r\n"
/bin/echo -en "Mariadb Cluster Node is not synced.\r\n"
/bin/echo -en "\r\n"
fi

View File

@ -0,0 +1,23 @@
# Managed by puppet
# Module cloud
#
# default: on
# description: mysqlchk
service mysqlchk
{
# this is a config for xinetd, place it in /etc/xinetd.d/
disable = no
flags = REUSE
socket_type = stream
port = 9200
wait = no
user = nobody
server = /usr/bin/clustercheck
log_on_failure += USERID
#FIXME(sbadia) Security: Restrict this parameter to HAProxy pool.
only_from = 0.0.0.0/0
bind = <%= @galera_clustercheck_ipaddress %>
# recommended to put the IPs that need
# to connect exclusively (security purposes)
per_source = UNLIMITED
}

View File

@ -0,0 +1,14 @@
# Managed by Puppet
# Module cloud::database::sql
#
[client]
host = localhost
user = sys-maint
password = <%= @mysql_sys_maint_password %>
socket = /var/run/mysqld/mysqld.sock
[mysql_upgrade]
host = localhost
user = sys-maint
password = <%= @mysql_sys_maint_password %>
socket = /var/run/mysqld/mysqld.sock
basedir = /usr