lint: Unpin puppet-lint

puppet-lint 1.0.x has some nice features, including auto-fix, and other
things.
This commit is contained in:
Sebastien Badia 2014-09-27 11:47:49 +02:00
parent 6ba52933b1
commit 7a81eca442
9 changed files with 70 additions and 70 deletions

View File

@ -2,7 +2,7 @@ source 'https://rubygems.org'
group :development, :test do
gem 'puppetlabs_spec_helper', :require => false
gem 'puppet-lint', '~> 0.3.2'
gem 'puppet-lint'
gem 'rake', '10.1.1'
gem 'puppet-syntax'
gem 'rspec-puppet', :git => 'https://github.com/rodjek/rspec-puppet.git'

View File

@ -124,7 +124,7 @@ class cloud::dashboard(
keystone_url => $keystone_url,
cache_server_ip => false,
django_debug => $debug,
neutron_options => { 'enable_lb' => true },
neutron_options => { 'enable_lb' => true },
listen_ssl => $listen_ssl,
horizon_cert => $horizon_cert,
horizon_key => $horizon_key,

View File

@ -508,10 +508,10 @@ class cloud::identity (
keystone_role { $identity_roles_addons: ensure => present }
class {'keystone::endpoint':
public_url => "${ks_keystone_public_proto}://${ks_keystone_public_host}:${ks_keystone_public_port}",
internal_url => "${ks_keystone_internal_proto}://${ks_keystone_internal_host}:${ks_keystone_internal_port}",
admin_url => "${ks_keystone_admin_proto}://${ks_keystone_admin_host}:${ks_keystone_admin_port}",
region => $region,
public_url => "${ks_keystone_public_proto}://${ks_keystone_public_host}:${ks_keystone_public_port}",
internal_url => "${ks_keystone_internal_proto}://${ks_keystone_internal_host}:${ks_keystone_internal_port}",
admin_url => "${ks_keystone_admin_proto}://${ks_keystone_admin_host}:${ks_keystone_admin_port}",
region => $region,
}
# TODO(EmilienM) Disable WSGI - bug #98
@ -556,29 +556,29 @@ class cloud::identity (
}
class { 'nova::keystone::auth':
cinder => true,
admin_address => $ks_nova_admin_host,
internal_address => $ks_nova_internal_host,
public_address => $ks_nova_public_host,
compute_port => $ks_nova_public_port,
public_protocol => $ks_nova_public_proto,
admin_protocol => $ks_nova_admin_proto,
internal_protocol => $ks_nova_internal_proto,
ec2_port => $ks_ec2_public_port,
region => $region,
password => $ks_nova_password
cinder => true,
admin_address => $ks_nova_admin_host,
internal_address => $ks_nova_internal_host,
public_address => $ks_nova_public_host,
compute_port => $ks_nova_public_port,
public_protocol => $ks_nova_public_proto,
admin_protocol => $ks_nova_admin_proto,
internal_protocol => $ks_nova_internal_proto,
ec2_port => $ks_ec2_public_port,
region => $region,
password => $ks_nova_password
}
class { 'neutron::keystone::auth':
admin_address => $ks_neutron_admin_host,
internal_address => $ks_neutron_internal_host,
public_address => $ks_neutron_public_host,
public_protocol => $ks_neutron_public_proto,
internal_protocol => $ks_neutron_internal_proto,
admin_protocol => $ks_neutron_admin_proto,
port => $ks_neutron_public_port,
region => $region,
password => $ks_neutron_password
admin_address => $ks_neutron_admin_host,
internal_address => $ks_neutron_internal_host,
public_address => $ks_neutron_public_host,
public_protocol => $ks_neutron_public_proto,
internal_protocol => $ks_neutron_internal_proto,
admin_protocol => $ks_neutron_admin_proto,
port => $ks_neutron_public_port,
region => $region,
password => $ks_neutron_password
}
class { 'cinder::keystone::auth':

View File

@ -62,10 +62,10 @@ This node is under the control of Puppet ${::puppetversion}.
# Strong root password for all servers
user { 'root':
ensure => 'present',
gid => '0',
password => $root_password,
uid => '0',
ensure => 'present',
gid => '0',
password => $root_password,
uid => '0',
}
$cron_service_name = $::osfamily ? {

View File

@ -359,16 +359,16 @@ class cloud::loadbalancer(
bind_options => $metadata_bind_options,
}
cloud::loadbalancer::binding { 'spice_cluster':
ip => $spice,
port => $spice_port,
options => {
ip => $spice,
port => $spice_port,
options => {
'mode' => 'tcp',
'option' => ['tcpka', 'tcplog', 'forwardfor'],
'balance' => 'source',
'timeout server' => '120m',
'timeout client' => '120m',
},
bind_options => $spice_bind_options,
bind_options => $spice_bind_options,
}
cloud::loadbalancer::binding { 'trove_api_cluster':
ip => $trove_api,
@ -376,16 +376,16 @@ class cloud::loadbalancer(
bind_options => $trove_bind_options,
}
cloud::loadbalancer::binding { 'glance_api_cluster':
ip => $glance_api,
options => {
ip => $glance_api,
options => {
'mode' => 'tcp',
'balance' => 'source',
'option' => ['tcpka', 'tcplog', 'forwardfor'],
'timeout server' => '120m',
'timeout client' => '120m',
},
port => $ks_glance_api_public_port,
bind_options => $glance_api_bind_options,
port => $ks_glance_api_public_port,
bind_options => $glance_api_bind_options,
}
cloud::loadbalancer::binding { 'glance_registry_cluster':
ip => $glance_registry,

View File

@ -84,17 +84,17 @@ allow_versions = on
ensure_resource('cloud::object::set_io_scheduler', keys($device_config_hash))
@@ring_container_device { "${storage_eth}:${container_port}/${ring_container_device}":
zone => $swift_zone,
weight => '100.0',
zone => $swift_zone,
weight => '100.0',
}
@@ring_account_device { "${storage_eth}:${account_port}/${ring_account_device}":
zone => $swift_zone,
weight => '100.0',
zone => $swift_zone,
weight => '100.0',
}
$object_urls = prefix(keys($device_config_hash), "${storage_eth}:${object_port}/")
@@ring_object_device {$object_urls:
zone => $swift_zone,
weight => '100.0',
zone => $swift_zone,
weight => '100.0',
}
Swift::Ringsync<<| |>> ->

View File

@ -58,10 +58,10 @@ class cloud::spof(
disable => true
}
file { '/usr/lib/ocf/resource.d/heartbeat/ceilometer-agent-central':
source => 'puppet:///modules/cloud/heartbeat/ceilometer-agent-central',
mode => '0755',
owner => 'root',
group => 'root',
source => 'puppet:///modules/cloud/heartbeat/ceilometer-agent-central',
mode => '0755',
owner => 'root',
group => 'root',
} ->
exec {'pcmk_ceilometer_agent_central':
command => 'pcs resource create ceilometer-agent-central ocf:heartbeat:ceilometer-agent-central',
@ -91,10 +91,10 @@ class cloud::spof(
'cluster-recheck-interval': value => '5min';
} ->
file { '/usr/lib/ocf/resource.d/heartbeat/ceilometer-agent-central':
source => 'puppet:///modules/cloud/heartbeat/ceilometer-agent-central',
mode => '0755',
owner => 'root',
group => 'root',
source => 'puppet:///modules/cloud/heartbeat/ceilometer-agent-central',
mode => '0755',
owner => 'root',
group => 'root',
} ->
cs_primitive { 'ceilometer-agent-central':
primitive_class => 'ocf',

View File

@ -102,9 +102,9 @@ class cloud::storage::rbd::pools(
}
@@exec { 'set_secret_value_virsh':
command => "virsh secret-set-value --secret ${ceph_fsid} --base64 ${::ceph_keyring_cinder}",
tag => 'ceph_compute_set_secret',
refreshonly => true,
command => "virsh secret-set-value --secret ${ceph_fsid} --base64 ${::ceph_keyring_cinder}",
tag => 'ceph_compute_set_secret',
refreshonly => true,
}
} # !empty($::ceph_admin_key)

View File

@ -133,21 +133,21 @@ define cloud::volume::backend::netapp (
cinder::backend::netapp { $name:
netapp_server_hostname => $netapp_server_hostname,
netapp_login => $netapp_login,
netapp_password => $netapp_password,
netapp_server_port => $netapp_server_port,
netapp_size_multiplier => $netapp_size_multiplier,
netapp_storage_family => $netapp_storage_family,
netapp_storage_protocol => $netapp_storage_protocol,
netapp_transport_type => $netapp_transport_type,
netapp_vfiler => $netapp_vfiler,
netapp_volume_list => $netapp_volume_list,
netapp_vserver => $netapp_vserver,
expiry_thres_minutes => $expiry_thres_minutes,
thres_avl_size_perc_start => $thres_avl_size_perc_start,
thres_avl_size_perc_stop => $thres_avl_size_perc_stop,
nfs_shares_config => $nfs_shares_config,
netapp_server_hostname => $netapp_server_hostname,
netapp_login => $netapp_login,
netapp_password => $netapp_password,
netapp_server_port => $netapp_server_port,
netapp_size_multiplier => $netapp_size_multiplier,
netapp_storage_family => $netapp_storage_family,
netapp_storage_protocol => $netapp_storage_protocol,
netapp_transport_type => $netapp_transport_type,
netapp_vfiler => $netapp_vfiler,
netapp_volume_list => $netapp_volume_list,
netapp_vserver => $netapp_vserver,
expiry_thres_minutes => $expiry_thres_minutes,
thres_avl_size_perc_start => $thres_avl_size_perc_start,
thres_avl_size_perc_stop => $thres_avl_size_perc_stop,
nfs_shares_config => $nfs_shares_config,
}
@cinder::type { $volume_backend_name: