Add golden fuelweb configurations.
This commit is contained in:
parent
4054011405
commit
76066c0128
@ -0,0 +1,18 @@
|
||||
#
|
||||
# This can be used to setup URI aliases for frequently
|
||||
# used connection URIs. Aliases may contain only the
|
||||
# characters a-Z, 0-9, _, -.
|
||||
#
|
||||
# Following the '=' may be any valid libvirt connection
|
||||
# URI, including arbitrary parameters
|
||||
|
||||
#uri_aliases = [
|
||||
# "hail=qemu+ssh://root@hail.cloud.example.com/system",
|
||||
# "sleet=qemu+ssh://root@sleet.cloud.example.com/system",
|
||||
#]
|
||||
|
||||
#
|
||||
# This can be used to prevent probing of the hypervisor
|
||||
# driver when no URI is supplied by the application.
|
||||
|
||||
#uri_default = "qemu:///system"
|
398
config_samples/fuel_web/golden_fuelweb/cmp/libvirt/libvirtd.conf
Normal file
398
config_samples/fuel_web/golden_fuelweb/cmp/libvirt/libvirtd.conf
Normal file
@ -0,0 +1,398 @@
|
||||
# Master libvirt daemon configuration file
|
||||
#
|
||||
# For further information consult http://libvirt.org/format.html
|
||||
#
|
||||
# NOTE: the tests/daemon-conf regression test script requires
|
||||
# that each "PARAMETER = VALUE" line in this file have the parameter
|
||||
# name just after a leading "#".
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# Network connectivity controls
|
||||
#
|
||||
|
||||
# Flag listening for secure TLS connections on the public TCP/IP port.
|
||||
# NB, must pass the --listen flag to the libvirtd process for this to
|
||||
# have any effect.
|
||||
#
|
||||
# It is necessary to setup a CA and issue server certificates before
|
||||
# using this capability.
|
||||
#
|
||||
# This is enabled by default, uncomment this to disable it
|
||||
#listen_tls = 0
|
||||
|
||||
# Listen for unencrypted TCP connections on the public TCP/IP port.
|
||||
# NB, must pass the --listen flag to the libvirtd process for this to
|
||||
# have any effect.
|
||||
#
|
||||
# Using the TCP socket requires SASL authentication by default. Only
|
||||
# SASL mechanisms which support data encryption are allowed. This is
|
||||
# DIGEST_MD5 and GSSAPI (Kerberos5)
|
||||
#
|
||||
# This is disabled by default, uncomment this to enable it.
|
||||
#listen_tcp = 1
|
||||
|
||||
|
||||
|
||||
# Override the port for accepting secure TLS connections
|
||||
# This can be a port number, or service name
|
||||
#
|
||||
#tls_port = "16514"
|
||||
|
||||
# Override the port for accepting insecure TCP connections
|
||||
# This can be a port number, or service name
|
||||
#
|
||||
#tcp_port = "16509"
|
||||
|
||||
|
||||
# Override the default configuration which binds to all network
|
||||
# interfaces. This can be a numeric IPv4/6 address, or hostname
|
||||
#
|
||||
#listen_addr = "192.168.0.1"
|
||||
|
||||
|
||||
# Flag toggling mDNS advertizement of the libvirt service.
|
||||
#
|
||||
# Alternatively can disable for all services on a host by
|
||||
# stopping the Avahi daemon
|
||||
#
|
||||
# This is disabled by default, uncomment this to enable it
|
||||
#mdns_adv = 1
|
||||
|
||||
# Override the default mDNS advertizement name. This must be
|
||||
# unique on the immediate broadcast network.
|
||||
#
|
||||
# The default is "Virtualization Host HOSTNAME", where HOSTNAME
|
||||
# is subsituted for the short hostname of the machine (without domain)
|
||||
#
|
||||
#mdns_name = "Virtualization Host Joe Demo"
|
||||
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# UNIX socket access controls
|
||||
#
|
||||
|
||||
# Set the UNIX domain socket group ownership. This can be used to
|
||||
# allow a 'trusted' set of users access to management capabilities
|
||||
# without becoming root.
|
||||
#
|
||||
# This is restricted to 'root' by default.
|
||||
#unix_sock_group = "libvirt"
|
||||
|
||||
# Set the UNIX socket permissions for the R/O socket. This is used
|
||||
# for monitoring VM status only
|
||||
#
|
||||
# Default allows any user. If setting group ownership may want to
|
||||
# restrict this to:
|
||||
#unix_sock_ro_perms = "0777"
|
||||
|
||||
# Set the UNIX socket permissions for the R/W socket. This is used
|
||||
# for full management of VMs
|
||||
#
|
||||
# Default allows only root. If PolicyKit is enabled on the socket,
|
||||
# the default will change to allow everyone (eg, 0777)
|
||||
#
|
||||
# If not using PolicyKit and setting group ownership for access
|
||||
# control then you may want to relax this to:
|
||||
#unix_sock_rw_perms = "0770"
|
||||
|
||||
# Set the name of the directory in which sockets will be found/created.
|
||||
#unix_sock_dir = "/var/run/libvirt"
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# Authentication.
|
||||
#
|
||||
# - none: do not perform auth checks. If you can connect to the
|
||||
# socket you are allowed. This is suitable if there are
|
||||
# restrictions on connecting to the socket (eg, UNIX
|
||||
# socket permissions), or if there is a lower layer in
|
||||
# the network providing auth (eg, TLS/x509 certificates)
|
||||
#
|
||||
# - sasl: use SASL infrastructure. The actual auth scheme is then
|
||||
# controlled from /etc/sasl2/libvirt.conf. For the TCP
|
||||
# socket only GSSAPI & DIGEST-MD5 mechanisms will be used.
|
||||
# For non-TCP or TLS sockets, any scheme is allowed.
|
||||
#
|
||||
# - polkit: use PolicyKit to authenticate. This is only suitable
|
||||
# for use on the UNIX sockets. The default policy will
|
||||
# require a user to supply their own password to gain
|
||||
# full read/write access (aka sudo like), while anyone
|
||||
# is allowed read/only access.
|
||||
#
|
||||
# Set an authentication scheme for UNIX read-only sockets
|
||||
# By default socket permissions allow anyone to connect
|
||||
#
|
||||
# To restrict monitoring of domains you may wish to enable
|
||||
# an authentication mechanism here
|
||||
#auth_unix_ro = "none"
|
||||
|
||||
# Set an authentication scheme for UNIX read-write sockets
|
||||
# By default socket permissions only allow root. If PolicyKit
|
||||
# support was compiled into libvirt, the default will be to
|
||||
# use 'polkit' auth.
|
||||
#
|
||||
# If the unix_sock_rw_perms are changed you may wish to enable
|
||||
# an authentication mechanism here
|
||||
#auth_unix_rw = "none"
|
||||
|
||||
# Change the authentication scheme for TCP sockets.
|
||||
#
|
||||
# If you don't enable SASL, then all TCP traffic is cleartext.
|
||||
# Don't do this outside of a dev/test scenario. For real world
|
||||
# use, always enable SASL and use the GSSAPI or DIGEST-MD5
|
||||
# mechanism in /etc/sasl2/libvirt.conf
|
||||
#auth_tcp = "sasl"
|
||||
|
||||
# Change the authentication scheme for TLS sockets.
|
||||
#
|
||||
# TLS sockets already have encryption provided by the TLS
|
||||
# layer, and limited authentication is done by certificates
|
||||
#
|
||||
# It is possible to make use of any SASL authentication
|
||||
# mechanism as well, by using 'sasl' for this option
|
||||
#auth_tls = "none"
|
||||
|
||||
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# TLS x509 certificate configuration
|
||||
#
|
||||
|
||||
|
||||
# Override the default server key file path
|
||||
#
|
||||
#key_file = "/etc/pki/libvirt/private/serverkey.pem"
|
||||
|
||||
# Override the default server certificate file path
|
||||
#
|
||||
#cert_file = "/etc/pki/libvirt/servercert.pem"
|
||||
|
||||
# Override the default CA certificate path
|
||||
#
|
||||
#ca_file = "/etc/pki/CA/cacert.pem"
|
||||
|
||||
# Specify a certificate revocation list.
|
||||
#
|
||||
# Defaults to not using a CRL, uncomment to enable it
|
||||
#crl_file = "/etc/pki/CA/crl.pem"
|
||||
|
||||
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# Authorization controls
|
||||
#
|
||||
|
||||
|
||||
# Flag to disable verification of our own server certificates
|
||||
#
|
||||
# When libvirtd starts it performs some sanity checks against
|
||||
# its own certificates.
|
||||
#
|
||||
# Default is to always run sanity checks. Uncommenting this
|
||||
# will disable sanity checks which is not a good idea
|
||||
#tls_no_sanity_certificate = 1
|
||||
|
||||
# Flag to disable verification of client certificates
|
||||
#
|
||||
# Client certificate verification is the primary authentication mechanism.
|
||||
# Any client which does not present a certificate signed by the CA
|
||||
# will be rejected.
|
||||
#
|
||||
# Default is to always verify. Uncommenting this will disable
|
||||
# verification - make sure an IP whitelist is set
|
||||
#tls_no_verify_certificate = 1
|
||||
|
||||
|
||||
# A whitelist of allowed x509 Distinguished Names
|
||||
# This list may contain wildcards such as
|
||||
#
|
||||
# "C=GB,ST=London,L=London,O=Red Hat,CN=*"
|
||||
#
|
||||
# See the POSIX fnmatch function for the format of the wildcards.
|
||||
#
|
||||
# NB If this is an empty list, no client can connect, so comment out
|
||||
# entirely rather than using empty list to disable these checks
|
||||
#
|
||||
# By default, no DN's are checked
|
||||
#tls_allowed_dn_list = ["DN1", "DN2"]
|
||||
|
||||
|
||||
# A whitelist of allowed SASL usernames. The format for usernames
|
||||
# depends on the SASL authentication mechanism. Kerberos usernames
|
||||
# look like username@REALM
|
||||
#
|
||||
# This list may contain wildcards such as
|
||||
#
|
||||
# "*@EXAMPLE.COM"
|
||||
#
|
||||
# See the POSIX fnmatch function for the format of the wildcards.
|
||||
#
|
||||
# NB If this is an empty list, no client can connect, so comment out
|
||||
# entirely rather than using empty list to disable these checks
|
||||
#
|
||||
# By default, no Username's are checked
|
||||
#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ]
|
||||
|
||||
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# Processing controls
|
||||
#
|
||||
|
||||
# The maximum number of concurrent client connections to allow
|
||||
# over all sockets combined.
|
||||
#max_clients = 20
|
||||
|
||||
|
||||
# The minimum limit sets the number of workers to start up
|
||||
# initially. If the number of active clients exceeds this,
|
||||
# then more threads are spawned, upto max_workers limit.
|
||||
# Typically you'd want max_workers to equal maximum number
|
||||
# of clients allowed
|
||||
#min_workers = 5
|
||||
#max_workers = 20
|
||||
|
||||
|
||||
# The number of priority workers. If all workers from above
|
||||
# pool will stuck, some calls marked as high priority
|
||||
# (notably domainDestroy) can be executed in this pool.
|
||||
#prio_workers = 5
|
||||
|
||||
# Total global limit on concurrent RPC calls. Should be
|
||||
# at least as large as max_workers. Beyond this, RPC requests
|
||||
# will be read into memory and queued. This directly impact
|
||||
# memory usage, currently each request requires 256 KB of
|
||||
# memory. So by default upto 5 MB of memory is used
|
||||
#
|
||||
# XXX this isn't actually enforced yet, only the per-client
|
||||
# limit is used so far
|
||||
#max_requests = 20
|
||||
|
||||
# Limit on concurrent requests from a single client
|
||||
# connection. To avoid one client monopolizing the server
|
||||
# this should be a small fraction of the global max_requests
|
||||
# and max_workers parameter
|
||||
#max_client_requests = 5
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# Logging controls
|
||||
#
|
||||
|
||||
# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
|
||||
# basically 1 will log everything possible
|
||||
#log_level = 3
|
||||
|
||||
# Logging filters:
|
||||
# A filter allows to select a different logging level for a given category
|
||||
# of logs
|
||||
# The format for a filter is one of:
|
||||
# x:name
|
||||
# x:+name
|
||||
# where name is a string which is matched against source file name,
|
||||
# e.g., "remote", "qemu", or "util/json", the optional "+" prefix
|
||||
# tells libvirt to log stack trace for each message matching name,
|
||||
# and x is the minimal level where matching messages should be logged:
|
||||
# 1: DEBUG
|
||||
# 2: INFO
|
||||
# 3: WARNING
|
||||
# 4: ERROR
|
||||
#
|
||||
# Multiple filter can be defined in a single @filters, they just need to be
|
||||
# separated by spaces.
|
||||
#
|
||||
# e.g. to only get warning or errors from the remote layer and only errors
|
||||
# from the event layer:
|
||||
#log_filters="3:remote 4:event"
|
||||
|
||||
# Logging outputs:
|
||||
# An output is one of the places to save logging information
|
||||
# The format for an output can be:
|
||||
# x:stderr
|
||||
# output goes to stderr
|
||||
# x:syslog:name
|
||||
# use syslog for the output and use the given name as the ident
|
||||
# x:file:file_path
|
||||
# output to a file, with the given filepath
|
||||
# In all case the x prefix is the minimal level, acting as a filter
|
||||
# 1: DEBUG
|
||||
# 2: INFO
|
||||
# 3: WARNING
|
||||
# 4: ERROR
|
||||
#
|
||||
# Multiple output can be defined, they just need to be separated by spaces.
|
||||
# e.g. to log all warnings and errors to syslog under the libvirtd ident:
|
||||
#log_outputs="3:syslog:libvirtd"
|
||||
#
|
||||
|
||||
# Log debug buffer size: default 64
|
||||
# The daemon keeps an internal debug log buffer which will be dumped in case
|
||||
# of crash or upon receiving a SIGUSR2 signal. This setting allows to override
|
||||
# the default buffer size in kilobytes.
|
||||
# If value is 0 or less the debug log buffer is deactivated
|
||||
#log_buffer_size = 64
|
||||
|
||||
|
||||
##################################################################
|
||||
#
|
||||
# Auditing
|
||||
#
|
||||
# This setting allows usage of the auditing subsystem to be altered:
|
||||
#
|
||||
# audit_level == 0 -> disable all auditing
|
||||
# audit_level == 1 -> enable auditing, only if enabled on host (default)
|
||||
# audit_level == 2 -> enable auditing, and exit if disabled on host
|
||||
#
|
||||
#audit_level = 2
|
||||
#
|
||||
# If set to 1, then audit messages will also be sent
|
||||
# via libvirt logging infrastructure. Defaults to 0
|
||||
#
|
||||
#audit_logging = 1
|
||||
|
||||
###################################################################
|
||||
# UUID of the host:
|
||||
# Provide the UUID of the host here in case the command
|
||||
# 'dmidecode -s system-uuid' does not provide a valid uuid. In case
|
||||
# 'dmidecode' does not provide a valid UUID and none is provided here, a
|
||||
# temporary UUID will be generated.
|
||||
# Keep the format of the example UUID below. UUID must not have all digits
|
||||
# be the same.
|
||||
|
||||
# NB This default all-zeros UUID will not work. Replace
|
||||
# it with the output of the 'uuidgen' command and then
|
||||
# uncomment this entry
|
||||
#host_uuid = "00000000-0000-0000-0000-000000000000"
|
||||
|
||||
###################################################################
|
||||
# Keepalive protocol:
|
||||
# This allows libvirtd to detect broken client connections or even
|
||||
# dead client. A keepalive message is sent to a client after
|
||||
# keepalive_interval seconds of inactivity to check if the client is
|
||||
# still responding; keepalive_count is a maximum number of keepalive
|
||||
# messages that are allowed to be sent to the client without getting
|
||||
# any response before the connection is considered broken. In other
|
||||
# words, the connection is automatically closed approximately after
|
||||
# keepalive_interval * (keepalive_count + 1) seconds since the last
|
||||
# message received from the client. If keepalive_interval is set to
|
||||
# -1, libvirtd will never send keepalive requests; however clients
|
||||
# can still send them and the deamon will send responses. When
|
||||
# keepalive_count is set to 0, connections will be automatically
|
||||
# closed after keepalive_interval seconds of inactivity without
|
||||
# sending any keepalive messages.
|
||||
#
|
||||
#keepalive_interval = 5
|
||||
#keepalive_count = 5
|
||||
#
|
||||
# If set to 1, libvirtd will refuse to talk to clients that do not
|
||||
# support keepalive protocol. Defaults to 0.
|
||||
#
|
||||
#keepalive_required = 1
|
||||
listen_tls = 0
|
||||
listen_tcp = 1
|
||||
auth_tcp = "none"
|
31
config_samples/fuel_web/golden_fuelweb/cmp/libvirt/lxc.conf
Normal file
31
config_samples/fuel_web/golden_fuelweb/cmp/libvirt/lxc.conf
Normal file
@ -0,0 +1,31 @@
|
||||
# Master configuration file for the LXC driver.
|
||||
# All settings described here are optional - if omitted, sensible
|
||||
# defaults are used.
|
||||
|
||||
# By default, log messages generated by the lxc controller go to the
|
||||
# container logfile. It is also possible to accumulate log messages
|
||||
# from all lxc controllers along with libvirtd's log outputs. In this
|
||||
# case, the lxc controller will honor either LIBVIRT_LOG_OUTPUTS or
|
||||
# log_outputs from libvirtd.conf.
|
||||
#
|
||||
# This is disabled by default, uncomment below to enable it.
|
||||
#
|
||||
#log_with_libvirtd = 1
|
||||
|
||||
|
||||
# The default security driver is SELinux. If SELinux is disabled
|
||||
# on the host, then the security driver will automatically disable
|
||||
# itself. If you wish to disable QEMU SELinux security driver while
|
||||
# leaving SELinux enabled for the host in general, then set this
|
||||
# to 'none' instead.
|
||||
#
|
||||
#security_driver = "selinux"
|
||||
|
||||
# If set to non-zero, then the default security labeling
|
||||
# will make guests confined. If set to zero, then guests
|
||||
# will be unconfined by default. Defaults to 0.
|
||||
#security_default_confined = 1
|
||||
|
||||
# If set to non-zero, then attempts to create unconfined
|
||||
# guests will be blocked. Defaults to 0.
|
||||
#security_require_confined = 1
|
@ -0,0 +1,3 @@
|
||||
<filter name='allow-arp' chain='arp'>
|
||||
<rule direction='inout' action='accept'/>
|
||||
</filter>
|
@ -0,0 +1,24 @@
|
||||
<filter name='allow-dhcp-server' chain='ipv4'>
|
||||
|
||||
<!-- accept outgoing DHCP requests -->
|
||||
<!-- note, this rule must be evaluated before general MAC broadcast
|
||||
traffic is discarded since DHCP requests use MAC broadcast -->
|
||||
<rule action='accept' direction='out' priority='100'>
|
||||
<ip srcipaddr='0.0.0.0'
|
||||
dstipaddr='255.255.255.255'
|
||||
protocol='udp'
|
||||
srcportstart='68'
|
||||
dstportstart='67' />
|
||||
</rule>
|
||||
|
||||
<!-- accept incoming DHCP responses from a specific DHCP server
|
||||
parameter DHPCSERVER needs to be passed from where this filter is
|
||||
referenced -->
|
||||
<rule action='accept' direction='in' priority='100' >
|
||||
<ip srcipaddr='$DHCPSERVER'
|
||||
protocol='udp'
|
||||
srcportstart='67'
|
||||
dstportstart='68'/>
|
||||
</rule>
|
||||
|
||||
</filter>
|
@ -0,0 +1,21 @@
|
||||
<filter name='allow-dhcp' chain='ipv4'>
|
||||
|
||||
<!-- accept outgoing DHCP requests -->
|
||||
<!-- not, this rule must be evaluated before general MAC broadcast
|
||||
traffic is discarded since DHCP requests use MAC broadcast -->
|
||||
<rule action='accept' direction='out' priority='100'>
|
||||
<ip srcipaddr='0.0.0.0'
|
||||
dstipaddr='255.255.255.255'
|
||||
protocol='udp'
|
||||
srcportstart='68'
|
||||
dstportstart='67' />
|
||||
</rule>
|
||||
|
||||
<!-- accept incoming DHCP responses from any DHCP server -->
|
||||
<rule action='accept' direction='in' priority='100' >
|
||||
<ip protocol='udp'
|
||||
srcportstart='67'
|
||||
dstportstart='68'/>
|
||||
</rule>
|
||||
|
||||
</filter>
|
@ -0,0 +1,3 @@
|
||||
<filter name='allow-incoming-ipv4' chain='ipv4'>
|
||||
<rule direction='in' action='accept'/>
|
||||
</filter>
|
@ -0,0 +1,3 @@
|
||||
<filter name='allow-ipv4' chain='ipv4'>
|
||||
<rule direction='inout' action='accept'/>
|
||||
</filter>
|
@ -0,0 +1,30 @@
|
||||
<filter name='clean-traffic' chain='root'>
|
||||
<!-- An example of a traffic filter enforcing clean traffic
|
||||
from a VM by
|
||||
- preventing MAC spoofing -->
|
||||
<filterref filter='no-mac-spoofing'/>
|
||||
|
||||
<!-- preventing IP spoofing on outgoing, allow all IPv4 in incoming -->
|
||||
<filterref filter='no-ip-spoofing'/>
|
||||
|
||||
<rule direction='out' action='accept' priority='-650'>
|
||||
<mac protocolid='ipv4'/>
|
||||
</rule>
|
||||
|
||||
<filterref filter='allow-incoming-ipv4'/>
|
||||
|
||||
<!-- preventing ARP spoofing/poisoning -->
|
||||
<filterref filter='no-arp-spoofing'/>
|
||||
|
||||
<!-- accept all other incoming and outgoing ARP traffic -->
|
||||
<rule action='accept' direction='inout' priority='-500'>
|
||||
<mac protocolid='arp'/>
|
||||
</rule>
|
||||
|
||||
<!-- preventing any other traffic than IPv4 and ARP -->
|
||||
<filterref filter='no-other-l2-traffic'/>
|
||||
|
||||
<!-- allow qemu to send a self-announce upon migration end -->
|
||||
<filterref filter='qemu-announce-self'/>
|
||||
|
||||
</filter>
|
@ -0,0 +1,9 @@
|
||||
<filter name='no-arp-ip-spoofing' chain='arp-ip' priority='-510'>
|
||||
<!-- no arp spoofing -->
|
||||
<!-- drop if ipaddr does not belong to guest -->
|
||||
<rule action='return' direction='out' priority='400' >
|
||||
<arp match='yes' arpsrcipaddr='$IP' />
|
||||
</rule>
|
||||
<!-- drop everything else -->
|
||||
<rule action='drop' direction='out' priority='1000' />
|
||||
</filter>
|
@ -0,0 +1,7 @@
|
||||
<filter name='no-arp-mac-spoofing' chain='arp-mac' priority='-520'>
|
||||
<rule action='return' direction='out' priority='350' >
|
||||
<arp match='yes' arpsrcmacaddr='$MAC'/>
|
||||
</rule>
|
||||
<!-- drop everything else -->
|
||||
<rule action='drop' direction='out' priority='1000' />
|
||||
</filter>
|
@ -0,0 +1,4 @@
|
||||
<filter name='no-arp-spoofing' chain='root'>
|
||||
<filterref filter='no-arp-mac-spoofing'/>
|
||||
<filterref filter='no-arp-ip-spoofing'/>
|
||||
</filter>
|
@ -0,0 +1,9 @@
|
||||
<filter name='no-ip-multicast' chain='ipv4'>
|
||||
|
||||
<!-- drop if destination IP address is in the 224.0.0.0/4 subnet -->
|
||||
<rule action='drop' direction='out'>
|
||||
<ip dstipaddr='224.0.0.0' dstipmask='4' />
|
||||
</rule>
|
||||
|
||||
<!-- not doing anything with receiving side ... -->
|
||||
</filter>
|
@ -0,0 +1,14 @@
|
||||
<filter name='no-ip-spoofing' chain='ipv4-ip' priority='-710'>
|
||||
<!-- allow UDP sent from 0.0.0.0 (DHCP); filter more exact later -->
|
||||
<rule action='return' direction='out' priority='100'>
|
||||
<ip srcipaddr='0.0.0.0' protocol='udp'/>
|
||||
</rule>
|
||||
|
||||
<!-- allow all known IP addresses -->
|
||||
<rule direction='out' action='return' priority='500'>
|
||||
<ip srcipaddr='$IP'/>
|
||||
</rule>
|
||||
|
||||
<!-- drop everything else -->
|
||||
<rule direction='out' action='drop' priority='1000'/>
|
||||
</filter>
|
@ -0,0 +1,8 @@
|
||||
<filter name='no-mac-broadcast' chain='ipv4'>
|
||||
<!-- drop if destination mac is bcast mac addr. -->
|
||||
<rule action='drop' direction='out'>
|
||||
<mac dstmacaddr='ff:ff:ff:ff:ff:ff' />
|
||||
</rule>
|
||||
|
||||
<!-- not doing anything with receiving side ... -->
|
||||
</filter>
|
@ -0,0 +1,10 @@
|
||||
<filter name='no-mac-spoofing' chain='mac' priority='-800'>
|
||||
<!-- return packets with VM's MAC address as source address -->
|
||||
<rule direction='out' action='return'>
|
||||
<mac srcmacaddr='$MAC'/>
|
||||
</rule>
|
||||
<!-- drop everything else -->
|
||||
<rule direction='out' action='drop'>
|
||||
<mac/>
|
||||
</rule>
|
||||
</filter>
|
@ -0,0 +1,7 @@
|
||||
<filter name='no-other-l2-traffic'>
|
||||
|
||||
<!-- drop all other l2 traffic than for which rules have been
|
||||
written for; i.e., drop all other than arp and ipv4 traffic -->
|
||||
<rule action='drop' direction='inout' priority='1000'/>
|
||||
|
||||
</filter>
|
@ -0,0 +1,3 @@
|
||||
<filter name='no-other-rarp-traffic' chain='rarp'>
|
||||
<rule action='drop' direction='inout' priority='1000'/>
|
||||
</filter>
|
@ -0,0 +1,14 @@
|
||||
<filter name='qemu-announce-self-rarp' chain='rarp'>
|
||||
<rule action='accept' direction='out' priority='500'>
|
||||
<rarp opcode='Request_Reverse'
|
||||
srcmacaddr='$MAC' dstmacaddr='ff:ff:ff:ff:ff:ff'
|
||||
arpsrcmacaddr='$MAC' arpdstmacaddr='$MAC'
|
||||
arpsrcipaddr='0.0.0.0' arpdstipaddr='0.0.0.0'/>
|
||||
</rule>
|
||||
<rule action='accept' direction='in' priority='500'>
|
||||
<rarp opcode='Request_Reverse'
|
||||
dstmacaddr='ff:ff:ff:ff:ff:ff'
|
||||
arpsrcmacaddr='$MAC' arpdstmacaddr='$MAC'
|
||||
arpsrcipaddr='0.0.0.0' arpdstipaddr='0.0.0.0'/>
|
||||
</rule>
|
||||
</filter>
|
@ -0,0 +1,13 @@
|
||||
<filter name='qemu-announce-self' chain='root'>
|
||||
<!-- as of 4/26/2010 qemu sends out a bogus packet with
|
||||
wrong rarp protocol ID -->
|
||||
<!-- accept what is being sent now -->
|
||||
<rule action='accept' direction='out'>
|
||||
<mac protocolid='0x835'/>
|
||||
</rule>
|
||||
|
||||
<!-- accept if it was changed to rarp -->
|
||||
<filterref filter='qemu-announce-self-rarp'/>
|
||||
<filterref filter='no-other-rarp-traffic'/>
|
||||
|
||||
</filter>
|
404
config_samples/fuel_web/golden_fuelweb/cmp/libvirt/qemu.conf
Normal file
404
config_samples/fuel_web/golden_fuelweb/cmp/libvirt/qemu.conf
Normal file
@ -0,0 +1,404 @@
|
||||
# Master configuration file for the QEMU driver.
|
||||
# All settings described here are optional - if omitted, sensible
|
||||
# defaults are used.
|
||||
|
||||
# VNC is configured to listen on 127.0.0.1 by default.
|
||||
# To make it listen on all public interfaces, uncomment
|
||||
# this next option.
|
||||
#
|
||||
# NB, strong recommendation to enable TLS + x509 certificate
|
||||
# verification when allowing public access
|
||||
#
|
||||
#vnc_listen = "0.0.0.0"
|
||||
|
||||
# Enable this option to have VNC served over an automatically created
|
||||
# unix socket. This prevents unprivileged access from users on the
|
||||
# host machine, though most VNC clients do not support it.
|
||||
#
|
||||
# This will only be enabled for VNC configurations that do not have
|
||||
# a hardcoded 'listen' or 'socket' value. This setting takes preference
|
||||
# over vnc_listen.
|
||||
#
|
||||
#vnc_auto_unix_socket = 1
|
||||
|
||||
# Enable use of TLS encryption on the VNC server. This requires
|
||||
# a VNC client which supports the VeNCrypt protocol extension.
|
||||
# Examples include vinagre, virt-viewer, virt-manager and vencrypt
|
||||
# itself. UltraVNC, RealVNC, TightVNC do not support this
|
||||
#
|
||||
# It is necessary to setup CA and issue a server certificate
|
||||
# before enabling this.
|
||||
#
|
||||
#vnc_tls = 1
|
||||
|
||||
|
||||
# Use of TLS requires that x509 certificates be issued. The
|
||||
# default it to keep them in /etc/pki/libvirt-vnc. This directory
|
||||
# must contain
|
||||
#
|
||||
# ca-cert.pem - the CA master certificate
|
||||
# server-cert.pem - the server certificate signed with ca-cert.pem
|
||||
# server-key.pem - the server private key
|
||||
#
|
||||
# This option allows the certificate directory to be changed
|
||||
#
|
||||
#vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc"
|
||||
|
||||
|
||||
# The default TLS configuration only uses certificates for the server
|
||||
# allowing the client to verify the server's identity and establish
|
||||
# an encrypted channel.
|
||||
#
|
||||
# It is possible to use x509 certificates for authentication too, by
|
||||
# issuing a x509 certificate to every client who needs to connect.
|
||||
#
|
||||
# Enabling this option will reject any client who does not have a
|
||||
# certificate signed by the CA in /etc/pki/libvirt-vnc/ca-cert.pem
|
||||
#
|
||||
#vnc_tls_x509_verify = 1
|
||||
|
||||
|
||||
# The default VNC password. Only 8 letters are significant for
|
||||
# VNC passwords. This parameter is only used if the per-domain
|
||||
# XML config does not already provide a password. To allow
|
||||
# access without passwords, leave this commented out. An empty
|
||||
# string will still enable passwords, but be rejected by QEMU,
|
||||
# effectively preventing any use of VNC. Obviously change this
|
||||
# example here before you set this.
|
||||
#
|
||||
#vnc_password = "XYZ12345"
|
||||
|
||||
|
||||
# Enable use of SASL encryption on the VNC server. This requires
|
||||
# a VNC client which supports the SASL protocol extension.
|
||||
# Examples include vinagre, virt-viewer and virt-manager
|
||||
# itself. UltraVNC, RealVNC, TightVNC do not support this
|
||||
#
|
||||
# It is necessary to configure /etc/sasl2/qemu.conf to choose
|
||||
# the desired SASL plugin (eg, GSSPI for Kerberos)
|
||||
#
|
||||
#vnc_sasl = 1
|
||||
|
||||
|
||||
# The default SASL configuration file is located in /etc/sasl2/
|
||||
# When running libvirtd unprivileged, it may be desirable to
|
||||
# override the configs in this location. Set this parameter to
|
||||
# point to the directory, and create a qemu.conf in that location
|
||||
#
|
||||
#vnc_sasl_dir = "/some/directory/sasl2"
|
||||
|
||||
|
||||
# QEMU implements an extension for providing audio over a VNC connection,
|
||||
# though if your VNC client does not support it, your only chance for getting
|
||||
# sound output is through regular audio backends. By default, libvirt will
|
||||
# disable all QEMU sound backends if using VNC, since they can cause
|
||||
# permissions issues. Enabling this option will make libvirtd honor the
|
||||
# QEMU_AUDIO_DRV environment variable when using VNC.
|
||||
#
|
||||
#vnc_allow_host_audio = 0
|
||||
|
||||
|
||||
|
||||
# SPICE is configured to listen on 127.0.0.1 by default.
|
||||
# To make it listen on all public interfaces, uncomment
|
||||
# this next option.
|
||||
#
|
||||
# NB, strong recommendation to enable TLS + x509 certificate
|
||||
# verification when allowing public access
|
||||
#
|
||||
#spice_listen = "0.0.0.0"
|
||||
|
||||
|
||||
# Enable use of TLS encryption on the SPICE server.
|
||||
#
|
||||
# It is necessary to setup CA and issue a server certificate
|
||||
# before enabling this.
|
||||
#
|
||||
#spice_tls = 1
|
||||
|
||||
|
||||
# Use of TLS requires that x509 certificates be issued. The
|
||||
# default it to keep them in /etc/pki/libvirt-spice. This directory
|
||||
# must contain
|
||||
#
|
||||
# ca-cert.pem - the CA master certificate
|
||||
# server-cert.pem - the server certificate signed with ca-cert.pem
|
||||
# server-key.pem - the server private key
|
||||
#
|
||||
# This option allows the certificate directory to be changed.
|
||||
#
|
||||
#spice_tls_x509_cert_dir = "/etc/pki/libvirt-spice"
|
||||
|
||||
|
||||
# The default SPICE password. This parameter is only used if the
|
||||
# per-domain XML config does not already provide a password. To
|
||||
# allow access without passwords, leave this commented out. An
|
||||
# empty string will still enable passwords, but be rejected by
|
||||
# QEMU, effectively preventing any use of SPICE. Obviously change
|
||||
# this example here before you set this.
|
||||
#
|
||||
#spice_password = "XYZ12345"
|
||||
|
||||
|
||||
# Override the port for creating both VNC and SPICE sessions (min).
|
||||
# This defaults to 5900 and increases for consecutive sessions
|
||||
# or when ports are occupied, until it hits the maximum.
|
||||
#
|
||||
# Minimum must be greater than or equal to 5900 as lower number would
|
||||
# result into negative vnc display number.
|
||||
#
|
||||
# Maximum must be less than 65536, because higher numbers do not make
|
||||
# sense as a port number.
|
||||
#
|
||||
#remote_display_port_min = 5900
|
||||
#remote_display_port_max = 65535
|
||||
|
||||
|
||||
# The default security driver is SELinux. If SELinux is disabled
|
||||
# on the host, then the security driver will automatically disable
|
||||
# itself. If you wish to disable QEMU SELinux security driver while
|
||||
# leaving SELinux enabled for the host in general, then set this
|
||||
# to 'none' instead. It's also possible to use more than one security
|
||||
# driver at the same time, for this use a list of names separated by
|
||||
# comma and delimited by square brackets. For example:
|
||||
#
|
||||
# security_driver = [ "selinux", "apparmor" ]
|
||||
#
|
||||
# Notes: The DAC security driver is always enabled; as a result, the
|
||||
# value of security_driver cannot contain "dac". The value "none" is
|
||||
# a special value; security_driver can be set to that value in
|
||||
# isolation, but it cannot appear in a list of drivers.
|
||||
#
|
||||
#security_driver = "selinux"
|
||||
|
||||
# If set to non-zero, then the default security labeling
|
||||
# will make guests confined. If set to zero, then guests
|
||||
# will be unconfined by default. Defaults to 1.
|
||||
#security_default_confined = 1
|
||||
|
||||
# If set to non-zero, then attempts to create unconfined
|
||||
# guests will be blocked. Defaults to 0.
|
||||
#security_require_confined = 1
|
||||
|
||||
# The user for QEMU processes run by the system instance. It can be
|
||||
# specified as a user name or as a user id. The qemu driver will try to
|
||||
# parse this value first as a name and then, if the name doesn't exist,
|
||||
# as a user id.
|
||||
#
|
||||
# Since a sequence of digits is a valid user name, a leading plus sign
|
||||
# can be used to ensure that a user id will not be interpreted as a user
|
||||
# name.
|
||||
#
|
||||
# Some examples of valid values are:
|
||||
#
|
||||
# user = "qemu" # A user named "qemu"
|
||||
# user = "+0" # Super user (uid=0)
|
||||
# user = "100" # A user named "100" or a user with uid=100
|
||||
#
|
||||
#user = "root"
|
||||
|
||||
# The group for QEMU processes run by the system instance. It can be
|
||||
# specified in a similar way to user.
|
||||
#group = "root"
|
||||
|
||||
# Whether libvirt should dynamically change file ownership
|
||||
# to match the configured user/group above. Defaults to 1.
|
||||
# Set to 0 to disable file ownership changes.
|
||||
#dynamic_ownership = 1
|
||||
|
||||
|
||||
# What cgroup controllers to make use of with QEMU guests
|
||||
#
|
||||
# - 'cpu' - use for schedular tunables
|
||||
# - 'devices' - use for device whitelisting
|
||||
# - 'memory' - use for memory tunables
|
||||
# - 'blkio' - use for block devices I/O tunables
|
||||
# - 'cpuset' - use for CPUs and memory nodes
|
||||
# - 'cpuacct' - use for CPUs statistics.
|
||||
#
|
||||
# NB, even if configured here, they won't be used unless
|
||||
# the administrator has mounted cgroups, e.g.:
|
||||
#
|
||||
# mkdir /dev/cgroup
|
||||
# mount -t cgroup -o devices,cpu,memory,blkio,cpuset none /dev/cgroup
|
||||
#
|
||||
# They can be mounted anywhere, and different controllers
|
||||
# can be mounted in different locations. libvirt will detect
|
||||
# where they are located.
|
||||
#
|
||||
#cgroup_controllers = [ "cpu", "devices", "memory", "blkio", "cpuset", "cpuacct" ]
|
||||
|
||||
# This is the basic set of devices allowed / required by
|
||||
# all virtual machines.
|
||||
#
|
||||
# As well as this, any configured block backed disks,
|
||||
# all sound device, and all PTY devices are allowed.
|
||||
#
|
||||
# This will only need setting if newer QEMU suddenly
|
||||
# wants some device we don't already know about.
|
||||
#
|
||||
#cgroup_device_acl = [
|
||||
# "/dev/null", "/dev/full", "/dev/zero",
|
||||
# "/dev/random", "/dev/urandom",
|
||||
# "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
|
||||
# "/dev/rtc","/dev/hpet"
|
||||
#]
|
||||
|
||||
|
||||
# The default format for Qemu/KVM guest save images is raw; that is, the
|
||||
# memory from the domain is dumped out directly to a file. If you have
|
||||
# guests with a large amount of memory, however, this can take up quite
|
||||
# a bit of space. If you would like to compress the images while they
|
||||
# are being saved to disk, you can also set "lzop", "gzip", "bzip2", or "xz"
|
||||
# for save_image_format. Note that this means you slow down the process of
|
||||
# saving a domain in order to save disk space; the list above is in descending
|
||||
# order by performance and ascending order by compression ratio.
|
||||
#
|
||||
# save_image_format is used when you use 'virsh save' at scheduled
|
||||
# saving, and it is an error if the specified save_image_format is
|
||||
# not valid, or the requested compression program can't be found.
|
||||
#
|
||||
# dump_image_format is used when you use 'virsh dump' at emergency
|
||||
# crashdump, and if the specified dump_image_format is not valid, or
|
||||
# the requested compression program can't be found, this falls
|
||||
# back to "raw" compression.
|
||||
#
|
||||
#save_image_format = "raw"
|
||||
#dump_image_format = "raw"
|
||||
|
||||
# When a domain is configured to be auto-dumped when libvirtd receives a
|
||||
# watchdog event from qemu guest, libvirtd will save dump files in directory
|
||||
# specified by auto_dump_path. Default value is /var/lib/libvirt/qemu/dump
|
||||
#
|
||||
#auto_dump_path = "/var/lib/libvirt/qemu/dump"
|
||||
|
||||
# When a domain is configured to be auto-dumped, enabling this flag
|
||||
# has the same effect as using the VIR_DUMP_BYPASS_CACHE flag with the
|
||||
# virDomainCoreDump API. That is, the system will avoid using the
|
||||
# file system cache while writing the dump file, but may cause
|
||||
# slower operation.
|
||||
#
|
||||
#auto_dump_bypass_cache = 0
|
||||
|
||||
# When a domain is configured to be auto-started, enabling this flag
|
||||
# has the same effect as using the VIR_DOMAIN_START_BYPASS_CACHE flag
|
||||
# with the virDomainCreateWithFlags API. That is, the system will
|
||||
# avoid using the file system cache when restoring any managed state
|
||||
# file, but may cause slower operation.
|
||||
#
|
||||
#auto_start_bypass_cache = 0
|
||||
|
||||
# If provided by the host and a hugetlbfs mount point is configured,
|
||||
# a guest may request huge page backing. When this mount point is
|
||||
# unspecified here, determination of a host mount point in /proc/mounts
|
||||
# will be attempted. Specifying an explicit mount overrides detection
|
||||
# of the same in /proc/mounts. Setting the mount point to "" will
|
||||
# disable guest hugepage backing.
|
||||
#
|
||||
# NB, within this mount point, guests will create memory backing files
|
||||
# in a location of $MOUNTPOINT/libvirt/qemu
|
||||
#
|
||||
#hugetlbfs_mount = "/dev/hugepages"
|
||||
|
||||
|
||||
# If clear_emulator_capabilities is enabled, libvirt will drop all
|
||||
# privileged capabilities of the QEmu/KVM emulator. This is enabled by
|
||||
# default.
|
||||
#
|
||||
# Warning: Disabling this option means that a compromised guest can
|
||||
# exploit the privileges and possibly do damage to the host.
|
||||
#
|
||||
#clear_emulator_capabilities = 1
|
||||
|
||||
|
||||
# If enabled, libvirt will have QEMU set its process name to
|
||||
# "qemu:VM_NAME", where VM_NAME is the name of the VM. The QEMU
|
||||
# process will appear as "qemu:VM_NAME" in process listings and
|
||||
# other system monitoring tools. By default, QEMU does not set
|
||||
# its process title, so the complete QEMU command (emulator and
|
||||
# its arguments) appear in process listings.
|
||||
#
|
||||
#set_process_name = 1
|
||||
|
||||
|
||||
# If max_processes is set to a positive integer, libvirt will use
|
||||
# it to set the maximum number of processes that can be run by qemu
|
||||
# user. This can be used to override default value set by host OS.
|
||||
# The same applies to max_files which sets the limit on the maximum
|
||||
# number of opened files.
|
||||
#
|
||||
#max_processes = 0
|
||||
#max_files = 0
|
||||
|
||||
|
||||
|
||||
# mac_filter enables MAC addressed based filtering on bridge ports.
|
||||
# This currently requires ebtables to be installed.
|
||||
#
|
||||
#mac_filter = 1
|
||||
|
||||
|
||||
# By default, PCI devices below non-ACS switch are not allowed to be assigned
|
||||
# to guests. By setting relaxed_acs_check to 1 such devices will be allowed to
|
||||
# be assigned to guests.
|
||||
#
|
||||
#relaxed_acs_check = 1
|
||||
|
||||
|
||||
# If allow_disk_format_probing is enabled, libvirt will probe disk
|
||||
# images to attempt to identify their format, when not otherwise
|
||||
# specified in the XML. This is disabled by default.
|
||||
#
|
||||
# WARNING: Enabling probing is a security hole in almost all
|
||||
# deployments. It is strongly recommended that users update their
|
||||
# guest XML <disk> elements to include <driver type='XXXX'/>
|
||||
# elements instead of enabling this option.
|
||||
#
|
||||
#allow_disk_format_probing = 1
|
||||
|
||||
|
||||
# To enable 'Sanlock' project based locking of the file
|
||||
# content (to prevent two VMs writing to the same
|
||||
# disk), uncomment this
|
||||
#
|
||||
#lock_manager = "sanlock"
|
||||
|
||||
|
||||
|
||||
# Set limit of maximum APIs queued on one domain. All other APIs
|
||||
# over this threshold will fail on acquiring job lock. Specially,
|
||||
# setting to zero turns this feature off.
|
||||
# Note, that job lock is per domain.
|
||||
#
|
||||
#max_queued = 0
|
||||
|
||||
###################################################################
|
||||
# Keepalive protocol:
|
||||
# This allows qemu driver to detect broken connections to remote
|
||||
# libvirtd during peer-to-peer migration. A keepalive message is
|
||||
# sent to the deamon after keepalive_interval seconds of inactivity
|
||||
# to check if the deamon is still responding; keepalive_count is a
|
||||
# maximum number of keepalive messages that are allowed to be sent
|
||||
# to the deamon without getting any response before the connection
|
||||
# is considered broken. In other words, the connection is
|
||||
# automatically closed approximately after
|
||||
# keepalive_interval * (keepalive_count + 1) seconds since the last
|
||||
# message received from the deamon. If keepalive_interval is set to
|
||||
# -1, qemu driver will not send keepalive requests during
|
||||
# peer-to-peer migration; however, the remote libvirtd can still
|
||||
# send them and source libvirtd will send responses. When
|
||||
# keepalive_count is set to 0, connections will be automatically
|
||||
# closed after keepalive_interval seconds of inactivity without
|
||||
# sending any keepalive messages.
|
||||
#
|
||||
#keepalive_interval = 5
|
||||
#keepalive_count = 5
|
||||
|
||||
|
||||
|
||||
# Use seccomp syscall whitelisting in QEMU.
|
||||
# 1 = on, 0 = off, -1 = use QEMU default
|
||||
# Defaults to -1.
|
||||
#
|
||||
#seccomp_sandbox = 1
|
||||
security_driver="none"
|
@ -0,0 +1,12 @@
|
||||
<network>
|
||||
<name>default</name>
|
||||
<uuid>7c31927a-3bca-4e99-88c3-066478cc6a62</uuid>
|
||||
<bridge name="virbr0" />
|
||||
<mac address='52:54:00:E3:F7:82'/>
|
||||
<forward/>
|
||||
<ip address="192.168.122.1" netmask="255.255.255.0">
|
||||
<dhcp>
|
||||
<range start="192.168.122.2" end="192.168.122.254" />
|
||||
</dhcp>
|
||||
</ip>
|
||||
</network>
|
@ -0,0 +1,12 @@
|
||||
<network>
|
||||
<name>default</name>
|
||||
<uuid>7c31927a-3bca-4e99-88c3-066478cc6a62</uuid>
|
||||
<bridge name="virbr0" />
|
||||
<mac address='52:54:00:E3:F7:82'/>
|
||||
<forward/>
|
||||
<ip address="192.168.122.1" netmask="255.255.255.0">
|
||||
<dhcp>
|
||||
<range start="192.168.122.2" end="192.168.122.254" />
|
||||
</dhcp>
|
||||
</ip>
|
||||
</network>
|
@ -0,0 +1,80 @@
|
||||
# Generated by LVM2 version 2.02.98(2)-RHEL6 (2012-10-15): Mon Sep 16 13:31:51 2013
|
||||
|
||||
contents = "Text Format Volume Group"
|
||||
version = 1
|
||||
|
||||
description = "Created *before* executing '/sbin/vgs --noheadings -o name --config 'log{command_names=0 prefix=\" \"}''"
|
||||
|
||||
creation_host = "compute-16.domain.tld" # Linux compute-16.domain.tld 2.6.32-358.6.2.el6.x86_64 #1 SMP Thu May 16 20:59:36 UTC 2013 x86_64
|
||||
creation_time = 1379338311 # Mon Sep 16 13:31:51 2013
|
||||
|
||||
os {
|
||||
id = "vOWwZt-1R8M-W1fR-DdyR-MtPN-LEuV-G4247X"
|
||||
seqno = 3
|
||||
format = "lvm2" # informational
|
||||
status = ["RESIZEABLE", "READ", "WRITE"]
|
||||
flags = []
|
||||
extent_size = 65536 # 32 Megabytes
|
||||
max_lv = 0
|
||||
max_pv = 0
|
||||
metadata_copies = 0
|
||||
|
||||
physical_volumes {
|
||||
|
||||
pv0 {
|
||||
id = "DcHtP1-1FFx-x9wL-xi7w-qihb-ytLQ-LWkpHc"
|
||||
device = "/dev/sda2" # Hint only
|
||||
|
||||
status = ["ALLOCATABLE"]
|
||||
flags = []
|
||||
dev_size = 29321216 # 13.9814 Gigabytes
|
||||
pe_start = 2048
|
||||
pe_count = 447 # 13.9688 Gigabytes
|
||||
}
|
||||
}
|
||||
|
||||
logical_volumes {
|
||||
|
||||
root {
|
||||
id = "b4uooc-r4Kx-7JON-FPoO-0JAL-QMg1-mr1vyi"
|
||||
status = ["READ", "WRITE", "VISIBLE"]
|
||||
flags = []
|
||||
creation_host = "compute-16.domain.tld"
|
||||
creation_time = 1379337936 # 2013-09-16 13:25:36 +0000
|
||||
segment_count = 1
|
||||
|
||||
segment1 {
|
||||
start_extent = 0
|
||||
extent_count = 320 # 10 Gigabytes
|
||||
|
||||
type = "striped"
|
||||
stripe_count = 1 # linear
|
||||
|
||||
stripes = [
|
||||
"pv0", 0
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
swap {
|
||||
id = "6wpV9P-RA5V-NivX-P2d0-eTJm-pqp9-ayOkrP"
|
||||
status = ["READ", "WRITE", "VISIBLE"]
|
||||
flags = []
|
||||
creation_host = "compute-16.domain.tld"
|
||||
creation_time = 1379337940 # 2013-09-16 13:25:40 +0000
|
||||
segment_count = 1
|
||||
|
||||
segment1 {
|
||||
start_extent = 0
|
||||
extent_count = 126 # 3.9375 Gigabytes
|
||||
|
||||
type = "striped"
|
||||
stripe_count = 1 # linear
|
||||
|
||||
stripes = [
|
||||
"pv0", 320
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,59 @@
|
||||
# Generated by LVM2 version 2.02.98(2)-RHEL6 (2012-10-15): Mon Sep 16 13:31:51 2013
|
||||
|
||||
contents = "Text Format Volume Group"
|
||||
version = 1
|
||||
|
||||
description = "Created *before* executing '/sbin/vgs --noheadings -o name --config 'log{command_names=0 prefix=\" \"}''"
|
||||
|
||||
creation_host = "compute-16.domain.tld" # Linux compute-16.domain.tld 2.6.32-358.6.2.el6.x86_64 #1 SMP Thu May 16 20:59:36 UTC 2013 x86_64
|
||||
creation_time = 1379338311 # Mon Sep 16 13:31:51 2013
|
||||
|
||||
vm {
|
||||
id = "82EdLb-ChnU-MHfe-NMwG-Ssyj-0VKi-Sd0VG8"
|
||||
seqno = 2
|
||||
format = "lvm2" # informational
|
||||
status = ["RESIZEABLE", "READ", "WRITE"]
|
||||
flags = []
|
||||
extent_size = 65536 # 32 Megabytes
|
||||
max_lv = 0
|
||||
max_pv = 0
|
||||
metadata_copies = 0
|
||||
|
||||
physical_volumes {
|
||||
|
||||
pv0 {
|
||||
id = "K5Epsi-flho-3DPH-83e8-mTCh-qEZY-p9yFiS"
|
||||
device = "/dev/sda3" # Hint only
|
||||
|
||||
status = ["ALLOCATABLE"]
|
||||
flags = []
|
||||
dev_size = 3801088 # 1.8125 Gigabytes
|
||||
pe_start = 2048
|
||||
pe_count = 57 # 1.78125 Gigabytes
|
||||
}
|
||||
}
|
||||
|
||||
logical_volumes {
|
||||
|
||||
libvirt {
|
||||
id = "tBFkcf-B6xs-9fUH-lEhW-XL2p-PSQF-EpMAHq"
|
||||
status = ["READ", "WRITE", "VISIBLE"]
|
||||
flags = []
|
||||
creation_host = "compute-16.domain.tld"
|
||||
creation_time = 1379337941 # 2013-09-16 13:25:41 +0000
|
||||
segment_count = 1
|
||||
|
||||
segment1 {
|
||||
start_extent = 0
|
||||
extent_count = 56 # 1.75 Gigabytes
|
||||
|
||||
type = "striped"
|
||||
stripe_count = 1 # linear
|
||||
|
||||
stripes = [
|
||||
"pv0", 0
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
80
config_samples/fuel_web/golden_fuelweb/cmp/lvm/backup/os
Normal file
80
config_samples/fuel_web/golden_fuelweb/cmp/lvm/backup/os
Normal file
@ -0,0 +1,80 @@
|
||||
# Generated by LVM2 version 2.02.98(2)-RHEL6 (2012-10-15): Mon Sep 16 13:31:51 2013
|
||||
|
||||
contents = "Text Format Volume Group"
|
||||
version = 1
|
||||
|
||||
description = "Created *after* executing '/sbin/vgs --noheadings -o name --config 'log{command_names=0 prefix=\" \"}''"
|
||||
|
||||
creation_host = "compute-16.domain.tld" # Linux compute-16.domain.tld 2.6.32-358.6.2.el6.x86_64 #1 SMP Thu May 16 20:59:36 UTC 2013 x86_64
|
||||
creation_time = 1379338311 # Mon Sep 16 13:31:51 2013
|
||||
|
||||
os {
|
||||
id = "vOWwZt-1R8M-W1fR-DdyR-MtPN-LEuV-G4247X"
|
||||
seqno = 3
|
||||
format = "lvm2" # informational
|
||||
status = ["RESIZEABLE", "READ", "WRITE"]
|
||||
flags = []
|
||||
extent_size = 65536 # 32 Megabytes
|
||||
max_lv = 0
|
||||
max_pv = 0
|
||||
metadata_copies = 0
|
||||
|
||||
physical_volumes {
|
||||
|
||||
pv0 {
|
||||
id = "DcHtP1-1FFx-x9wL-xi7w-qihb-ytLQ-LWkpHc"
|
||||
device = "/dev/sda2" # Hint only
|
||||
|
||||
status = ["ALLOCATABLE"]
|
||||
flags = []
|
||||
dev_size = 29321216 # 13.9814 Gigabytes
|
||||
pe_start = 2048
|
||||
pe_count = 447 # 13.9688 Gigabytes
|
||||
}
|
||||
}
|
||||
|
||||
logical_volumes {
|
||||
|
||||
root {
|
||||
id = "b4uooc-r4Kx-7JON-FPoO-0JAL-QMg1-mr1vyi"
|
||||
status = ["READ", "WRITE", "VISIBLE"]
|
||||
flags = []
|
||||
creation_host = "compute-16.domain.tld"
|
||||
creation_time = 1379337936 # 2013-09-16 13:25:36 +0000
|
||||
segment_count = 1
|
||||
|
||||
segment1 {
|
||||
start_extent = 0
|
||||
extent_count = 320 # 10 Gigabytes
|
||||
|
||||
type = "striped"
|
||||
stripe_count = 1 # linear
|
||||
|
||||
stripes = [
|
||||
"pv0", 0
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
swap {
|
||||
id = "6wpV9P-RA5V-NivX-P2d0-eTJm-pqp9-ayOkrP"
|
||||
status = ["READ", "WRITE", "VISIBLE"]
|
||||
flags = []
|
||||
creation_host = "compute-16.domain.tld"
|
||||
creation_time = 1379337940 # 2013-09-16 13:25:40 +0000
|
||||
segment_count = 1
|
||||
|
||||
segment1 {
|
||||
start_extent = 0
|
||||
extent_count = 126 # 3.9375 Gigabytes
|
||||
|
||||
type = "striped"
|
||||
stripe_count = 1 # linear
|
||||
|
||||
stripes = [
|
||||
"pv0", 320
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
59
config_samples/fuel_web/golden_fuelweb/cmp/lvm/backup/vm
Normal file
59
config_samples/fuel_web/golden_fuelweb/cmp/lvm/backup/vm
Normal file
@ -0,0 +1,59 @@
|
||||
# Generated by LVM2 version 2.02.98(2)-RHEL6 (2012-10-15): Mon Sep 16 13:31:51 2013
|
||||
|
||||
contents = "Text Format Volume Group"
|
||||
version = 1
|
||||
|
||||
description = "Created *after* executing '/sbin/vgs --noheadings -o name --config 'log{command_names=0 prefix=\" \"}''"
|
||||
|
||||
creation_host = "compute-16.domain.tld" # Linux compute-16.domain.tld 2.6.32-358.6.2.el6.x86_64 #1 SMP Thu May 16 20:59:36 UTC 2013 x86_64
|
||||
creation_time = 1379338311 # Mon Sep 16 13:31:51 2013
|
||||
|
||||
vm {
|
||||
id = "82EdLb-ChnU-MHfe-NMwG-Ssyj-0VKi-Sd0VG8"
|
||||
seqno = 2
|
||||
format = "lvm2" # informational
|
||||
status = ["RESIZEABLE", "READ", "WRITE"]
|
||||
flags = []
|
||||
extent_size = 65536 # 32 Megabytes
|
||||
max_lv = 0
|
||||
max_pv = 0
|
||||
metadata_copies = 0
|
||||
|
||||
physical_volumes {
|
||||
|
||||
pv0 {
|
||||
id = "K5Epsi-flho-3DPH-83e8-mTCh-qEZY-p9yFiS"
|
||||
device = "/dev/sda3" # Hint only
|
||||
|
||||
status = ["ALLOCATABLE"]
|
||||
flags = []
|
||||
dev_size = 3801088 # 1.8125 Gigabytes
|
||||
pe_start = 2048
|
||||
pe_count = 57 # 1.78125 Gigabytes
|
||||
}
|
||||
}
|
||||
|
||||
logical_volumes {
|
||||
|
||||
libvirt {
|
||||
id = "tBFkcf-B6xs-9fUH-lEhW-XL2p-PSQF-EpMAHq"
|
||||
status = ["READ", "WRITE", "VISIBLE"]
|
||||
flags = []
|
||||
creation_host = "compute-16.domain.tld"
|
||||
creation_time = 1379337941 # 2013-09-16 13:25:41 +0000
|
||||
segment_count = 1
|
||||
|
||||
segment1 {
|
||||
start_extent = 0
|
||||
extent_count = 56 # 1.75 Gigabytes
|
||||
|
||||
type = "striped"
|
||||
stripe_count = 1 # linear
|
||||
|
||||
stripes = [
|
||||
"pv0", 0
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
843
config_samples/fuel_web/golden_fuelweb/cmp/lvm/lvm.conf
Normal file
843
config_samples/fuel_web/golden_fuelweb/cmp/lvm/lvm.conf
Normal file
@ -0,0 +1,843 @@
|
||||
# This is an example configuration file for the LVM2 system.
|
||||
# It contains the default settings that would be used if there was no
|
||||
# /etc/lvm/lvm.conf file.
|
||||
#
|
||||
# Refer to 'man lvm.conf' for further information including the file layout.
|
||||
#
|
||||
# To put this file in a different directory and override /etc/lvm set
|
||||
# the environment variable LVM_SYSTEM_DIR before running the tools.
|
||||
#
|
||||
# N.B. Take care that each setting only appears once if uncommenting
|
||||
# example settings in this file.
|
||||
|
||||
|
||||
# This section allows you to configure which block devices should
|
||||
# be used by the LVM system.
|
||||
devices {
|
||||
|
||||
# Where do you want your volume groups to appear ?
|
||||
dir = "/dev"
|
||||
|
||||
# An array of directories that contain the device nodes you wish
|
||||
# to use with LVM2.
|
||||
scan = [ "/dev" ]
|
||||
|
||||
# If set, the cache of block device nodes with all associated symlinks
|
||||
# will be constructed out of the existing udev database content.
|
||||
# This avoids using and opening any inapplicable non-block devices or
|
||||
# subdirectories found in the device directory. This setting is applied
|
||||
# to udev-managed device directory only, other directories will be scanned
|
||||
# fully. LVM2 needs to be compiled with udev support for this setting to
|
||||
# take effect. N.B. Any device node or symlink not managed by udev in
|
||||
# udev directory will be ignored with this setting on.
|
||||
obtain_device_list_from_udev = 1
|
||||
|
||||
# If several entries in the scanned directories correspond to the
|
||||
# same block device and the tools need to display a name for device,
|
||||
# all the pathnames are matched against each item in the following
|
||||
# list of regular expressions in turn and the first match is used.
|
||||
# preferred_names = [ ]
|
||||
|
||||
# Try to avoid using undescriptive /dev/dm-N names, if present.
|
||||
preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
|
||||
|
||||
# A filter that tells LVM2 to only use a restricted set of devices.
|
||||
# The filter consists of an array of regular expressions. These
|
||||
# expressions can be delimited by a character of your choice, and
|
||||
# prefixed with either an 'a' (for accept) or 'r' (for reject).
|
||||
# The first expression found to match a device name determines if
|
||||
# the device will be accepted or rejected (ignored). Devices that
|
||||
# don't match any patterns are accepted.
|
||||
|
||||
# Be careful if there there are symbolic links or multiple filesystem
|
||||
# entries for the same device as each name is checked separately against
|
||||
# the list of patterns. The effect is that if the first pattern in the
|
||||
# list to match a name is an 'a' pattern for any of the names, the device
|
||||
# is accepted; otherwise if the first pattern in the list to match a name
|
||||
# is an 'r' pattern for any of the names it is rejected; otherwise it is
|
||||
# accepted.
|
||||
|
||||
# Don't have more than one filter line active at once: only one gets used.
|
||||
|
||||
# Run vgscan after you change this parameter to ensure that
|
||||
# the cache file gets regenerated (see below).
|
||||
# If it doesn't do what you expect, check the output of 'vgscan -vvvv'.
|
||||
|
||||
|
||||
# By default we accept every block device:
|
||||
filter = [ "a/.*/" ]
|
||||
|
||||
# Exclude the cdrom drive
|
||||
# filter = [ "r|/dev/cdrom|" ]
|
||||
|
||||
# When testing I like to work with just loopback devices:
|
||||
# filter = [ "a/loop/", "r/.*/" ]
|
||||
|
||||
# Or maybe all loops and ide drives except hdc:
|
||||
# filter =[ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
|
||||
|
||||
# Use anchors if you want to be really specific
|
||||
# filter = [ "a|^/dev/hda8$|", "r/.*/" ]
|
||||
|
||||
# Since "filter" is often overriden from command line, it is not suitable
|
||||
# for system-wide device filtering (udev rules, lvmetad). To hide devices
|
||||
# from LVM-specific udev processing and/or from lvmetad, you need to set
|
||||
# global_filter. The syntax is the same as for normal "filter"
|
||||
# above. Devices that fail the global_filter are not even opened by LVM.
|
||||
|
||||
# global_filter = []
|
||||
|
||||
# The results of the filtering are cached on disk to avoid
|
||||
# rescanning dud devices (which can take a very long time).
|
||||
# By default this cache is stored in the /etc/lvm/cache directory
|
||||
# in a file called '.cache'.
|
||||
# It is safe to delete the contents: the tools regenerate it.
|
||||
# (The old setting 'cache' is still respected if neither of
|
||||
# these new ones is present.)
|
||||
# N.B. If obtain_device_list_from_udev is set to 1 the list of
|
||||
# devices is instead obtained from udev and any existing .cache
|
||||
# file is removed.
|
||||
cache_dir = "/etc/lvm/cache"
|
||||
cache_file_prefix = ""
|
||||
|
||||
# You can turn off writing this cache file by setting this to 0.
|
||||
write_cache_state = 1
|
||||
|
||||
# Advanced settings.
|
||||
|
||||
# List of pairs of additional acceptable block device types found
|
||||
# in /proc/devices with maximum (non-zero) number of partitions.
|
||||
# types = [ "fd", 16 ]
|
||||
|
||||
# If sysfs is mounted (2.6 kernels) restrict device scanning to
|
||||
# the block devices it believes are valid.
|
||||
# 1 enables; 0 disables.
|
||||
sysfs_scan = 1
|
||||
|
||||
# By default, LVM2 will ignore devices used as component paths
|
||||
# of device-mapper multipath devices.
|
||||
# 1 enables; 0 disables.
|
||||
multipath_component_detection = 1
|
||||
|
||||
# By default, LVM2 will ignore devices used as components of
|
||||
# software RAID (md) devices by looking for md superblocks.
|
||||
# 1 enables; 0 disables.
|
||||
md_component_detection = 1
|
||||
|
||||
# By default, if a PV is placed directly upon an md device, LVM2
|
||||
# will align its data blocks with the md device's stripe-width.
|
||||
# 1 enables; 0 disables.
|
||||
md_chunk_alignment = 1
|
||||
|
||||
# Default alignment of the start of a data area in MB. If set to 0,
|
||||
# a value of 64KB will be used. Set to 1 for 1MiB, 2 for 2MiB, etc.
|
||||
# default_data_alignment = 1
|
||||
|
||||
# By default, the start of a PV's data area will be a multiple of
|
||||
# the 'minimum_io_size' or 'optimal_io_size' exposed in sysfs.
|
||||
# - minimum_io_size - the smallest request the device can perform
|
||||
# w/o incurring a read-modify-write penalty (e.g. MD's chunk size)
|
||||
# - optimal_io_size - the device's preferred unit of receiving I/O
|
||||
# (e.g. MD's stripe width)
|
||||
# minimum_io_size is used if optimal_io_size is undefined (0).
|
||||
# If md_chunk_alignment is enabled, that detects the optimal_io_size.
|
||||
# This setting takes precedence over md_chunk_alignment.
|
||||
# 1 enables; 0 disables.
|
||||
data_alignment_detection = 1
|
||||
|
||||
# Alignment (in KB) of start of data area when creating a new PV.
|
||||
# md_chunk_alignment and data_alignment_detection are disabled if set.
|
||||
# Set to 0 for the default alignment (see: data_alignment_default)
|
||||
# or page size, if larger.
|
||||
data_alignment = 0
|
||||
|
||||
# By default, the start of the PV's aligned data area will be shifted by
|
||||
# the 'alignment_offset' exposed in sysfs. This offset is often 0 but
|
||||
# may be non-zero; e.g.: certain 4KB sector drives that compensate for
|
||||
# windows partitioning will have an alignment_offset of 3584 bytes
|
||||
# (sector 7 is the lowest aligned logical block, the 4KB sectors start
|
||||
# at LBA -1, and consequently sector 63 is aligned on a 4KB boundary).
|
||||
# But note that pvcreate --dataalignmentoffset will skip this detection.
|
||||
# 1 enables; 0 disables.
|
||||
data_alignment_offset_detection = 1
|
||||
|
||||
# If, while scanning the system for PVs, LVM2 encounters a device-mapper
|
||||
# device that has its I/O suspended, it waits for it to become accessible.
|
||||
# Set this to 1 to skip such devices. This should only be needed
|
||||
# in recovery situations.
|
||||
ignore_suspended_devices = 0
|
||||
|
||||
# During each LVM operation errors received from each device are counted.
|
||||
# If the counter of a particular device exceeds the limit set here, no
|
||||
# further I/O is sent to that device for the remainder of the respective
|
||||
# operation. Setting the parameter to 0 disables the counters altogether.
|
||||
disable_after_error_count = 0
|
||||
|
||||
# Allow use of pvcreate --uuid without requiring --restorefile.
|
||||
require_restorefile_with_uuid = 1
|
||||
|
||||
# Minimum size (in KB) of block devices which can be used as PVs.
|
||||
# In a clustered environment all nodes must use the same value.
|
||||
# Any value smaller than 512KB is ignored.
|
||||
|
||||
# Ignore devices smaller than 2MB such as floppy drives.
|
||||
pv_min_size = 2048
|
||||
|
||||
# The original built-in setting was 512 up to and including version 2.02.84.
|
||||
# pv_min_size = 512
|
||||
|
||||
# Issue discards to a logical volumes's underlying physical volume(s) when
|
||||
# the logical volume is no longer using the physical volumes' space (e.g.
|
||||
# lvremove, lvreduce, etc). Discards inform the storage that a region is
|
||||
# no longer in use. Storage that supports discards advertise the protocol
|
||||
# specific way discards should be issued by the kernel (TRIM, UNMAP, or
|
||||
# WRITE SAME with UNMAP bit set). Not all storage will support or benefit
|
||||
# from discards but SSDs and thinly provisioned LUNs generally do. If set
|
||||
# to 1, discards will only be issued if both the storage and kernel provide
|
||||
# support.
|
||||
# 1 enables; 0 disables.
|
||||
issue_discards = 0
|
||||
}
|
||||
|
||||
# This section allows you to configure the way in which LVM selects
|
||||
# free space for its Logical Volumes.
|
||||
allocation {
|
||||
|
||||
# When searching for free space to extend an LV, the "cling"
|
||||
# allocation policy will choose space on the same PVs as the last
|
||||
# segment of the existing LV. If there is insufficient space and a
|
||||
# list of tags is defined here, it will check whether any of them are
|
||||
# attached to the PVs concerned and then seek to match those PV tags
|
||||
# between existing extents and new extents.
|
||||
# Use the special tag "@*" as a wildcard to match any PV tag.
|
||||
|
||||
# Example: LVs are mirrored between two sites within a single VG.
|
||||
# PVs are tagged with either @site1 or @site2 to indicate where
|
||||
# they are situated.
|
||||
|
||||
# cling_tag_list = [ "@site1", "@site2" ]
|
||||
# cling_tag_list = [ "@*" ]
|
||||
|
||||
# Changes made in version 2.02.85 extended the reach of the 'cling'
|
||||
# policies to detect more situations where data can be grouped
|
||||
# onto the same disks. Set this to 0 to revert to the previous
|
||||
# algorithm.
|
||||
maximise_cling = 1
|
||||
|
||||
# Set to 1 to guarantee that mirror logs will always be placed on
|
||||
# different PVs from the mirror images. This was the default
|
||||
# until version 2.02.85.
|
||||
mirror_logs_require_separate_pvs = 0
|
||||
|
||||
# Set to 1 to guarantee that thin pool metadata will always
|
||||
# be placed on different PVs from the pool data.
|
||||
thin_pool_metadata_require_separate_pvs = 0
|
||||
|
||||
# Specify the minimal chunk size (in KB) for thin pool volumes.
|
||||
# Use of the larger chunk size may improve perfomance for plain
|
||||
# thin volumes, however using them for snapshot volumes is less efficient,
|
||||
# as it consumes more space and takes extra time for copying.
|
||||
# When unset, lvm tries to estimate chunk size starting from 64KB
|
||||
# Supported values are in range from 64 to 1048576.
|
||||
# thin_pool_chunk_size = 64
|
||||
|
||||
# Specify discards behavior of the thin pool volume.
|
||||
# Select one of "ignore", "nopassdown", "passdown"
|
||||
# thin_pool_discards = "passdown"
|
||||
|
||||
# Set to 0, to disable zeroing of thin pool data chunks before their
|
||||
# first use.
|
||||
# N.B. zeroing larger thin pool chunk size degrades performance.
|
||||
# thin_pool_zero = 1
|
||||
}
|
||||
|
||||
# This section that allows you to configure the nature of the
|
||||
# information that LVM2 reports.
|
||||
log {
|
||||
|
||||
# Controls the messages sent to stdout or stderr.
|
||||
# There are three levels of verbosity, 3 being the most verbose.
|
||||
verbose = 0
|
||||
|
||||
# Set to 1 to suppress all non-essential messages from stdout.
|
||||
# This has the same effect as -qq.
|
||||
# When this is set, the following commands still produce output:
|
||||
# dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
|
||||
# pvs, version, vgcfgrestore -l, vgdisplay, vgs.
|
||||
# Non-essential messages are shifted from log level 4 to log level 5
|
||||
# for syslog and lvm2_log_fn purposes.
|
||||
# Any 'yes' or 'no' questions not overridden by other arguments
|
||||
# are suppressed and default to 'no'.
|
||||
silent = 0
|
||||
|
||||
# Should we send log messages through syslog?
|
||||
# 1 is yes; 0 is no.
|
||||
syslog = 1
|
||||
|
||||
# Should we log error and debug messages to a file?
|
||||
# By default there is no log file.
|
||||
#file = "/var/log/lvm2.log"
|
||||
|
||||
# Should we overwrite the log file each time the program is run?
|
||||
# By default we append.
|
||||
overwrite = 0
|
||||
|
||||
# What level of log messages should we send to the log file and/or syslog?
|
||||
# There are 6 syslog-like log levels currently in use - 2 to 7 inclusive.
|
||||
# 7 is the most verbose (LOG_DEBUG).
|
||||
level = 0
|
||||
|
||||
# Format of output messages
|
||||
# Whether or not (1 or 0) to indent messages according to their severity
|
||||
indent = 1
|
||||
|
||||
# Whether or not (1 or 0) to display the command name on each line output
|
||||
command_names = 0
|
||||
|
||||
# A prefix to use before the message text (but after the command name,
|
||||
# if selected). Default is two spaces, so you can see/grep the severity
|
||||
# of each message.
|
||||
prefix = " "
|
||||
|
||||
# To make the messages look similar to the original LVM tools use:
|
||||
# indent = 0
|
||||
# command_names = 1
|
||||
# prefix = " -- "
|
||||
|
||||
# Set this if you want log messages during activation.
|
||||
# Don't use this in low memory situations (can deadlock).
|
||||
# activation = 0
|
||||
}
|
||||
|
||||
# Configuration of metadata backups and archiving. In LVM2 when we
|
||||
# talk about a 'backup' we mean making a copy of the metadata for the
|
||||
# *current* system. The 'archive' contains old metadata configurations.
|
||||
# Backups are stored in a human readeable text format.
|
||||
backup {
|
||||
|
||||
# Should we maintain a backup of the current metadata configuration ?
|
||||
# Use 1 for Yes; 0 for No.
|
||||
# Think very hard before turning this off!
|
||||
backup = 1
|
||||
|
||||
# Where shall we keep it ?
|
||||
# Remember to back up this directory regularly!
|
||||
backup_dir = "/etc/lvm/backup"
|
||||
|
||||
# Should we maintain an archive of old metadata configurations.
|
||||
# Use 1 for Yes; 0 for No.
|
||||
# On by default. Think very hard before turning this off.
|
||||
archive = 1
|
||||
|
||||
# Where should archived files go ?
|
||||
# Remember to back up this directory regularly!
|
||||
archive_dir = "/etc/lvm/archive"
|
||||
|
||||
# What is the minimum number of archive files you wish to keep ?
|
||||
retain_min = 10
|
||||
|
||||
# What is the minimum time you wish to keep an archive file for ?
|
||||
retain_days = 30
|
||||
}
|
||||
|
||||
# Settings for the running LVM2 in shell (readline) mode.
|
||||
shell {
|
||||
|
||||
# Number of lines of history to store in ~/.lvm_history
|
||||
history_size = 100
|
||||
}
|
||||
|
||||
|
||||
# Miscellaneous global LVM2 settings
|
||||
global {
|
||||
|
||||
# The file creation mask for any files and directories created.
|
||||
# Interpreted as octal if the first digit is zero.
|
||||
umask = 077
|
||||
|
||||
# Allow other users to read the files
|
||||
#umask = 022
|
||||
|
||||
# Enabling test mode means that no changes to the on disk metadata
|
||||
# will be made. Equivalent to having the -t option on every
|
||||
# command. Defaults to off.
|
||||
test = 0
|
||||
|
||||
# Default value for --units argument
|
||||
units = "h"
|
||||
|
||||
# Since version 2.02.54, the tools distinguish between powers of
|
||||
# 1024 bytes (e.g. KiB, MiB, GiB) and powers of 1000 bytes (e.g.
|
||||
# KB, MB, GB).
|
||||
# If you have scripts that depend on the old behaviour, set this to 0
|
||||
# temporarily until you update them.
|
||||
si_unit_consistency = 1
|
||||
|
||||
# Whether or not to communicate with the kernel device-mapper.
|
||||
# Set to 0 if you want to use the tools to manipulate LVM metadata
|
||||
# without activating any logical volumes.
|
||||
# If the device-mapper kernel driver is not present in your kernel
|
||||
# setting this to 0 should suppress the error messages.
|
||||
activation = 1
|
||||
|
||||
# If we can't communicate with device-mapper, should we try running
|
||||
# the LVM1 tools?
|
||||
# This option only applies to 2.4 kernels and is provided to help you
|
||||
# switch between device-mapper kernels and LVM1 kernels.
|
||||
# The LVM1 tools need to be installed with .lvm1 suffices
|
||||
# e.g. vgscan.lvm1 and they will stop working after you start using
|
||||
# the new lvm2 on-disk metadata format.
|
||||
# The default value is set when the tools are built.
|
||||
# fallback_to_lvm1 = 0
|
||||
|
||||
# The default metadata format that commands should use - "lvm1" or "lvm2".
|
||||
# The command line override is -M1 or -M2.
|
||||
# Defaults to "lvm2".
|
||||
# format = "lvm2"
|
||||
|
||||
# Location of proc filesystem
|
||||
proc = "/proc"
|
||||
|
||||
# Type of locking to use. Defaults to local file-based locking (1).
|
||||
# Turn locking off by setting to 0 (dangerous: risks metadata corruption
|
||||
# if LVM2 commands get run concurrently).
|
||||
# Type 2 uses the external shared library locking_library.
|
||||
# Type 3 uses built-in clustered locking.
|
||||
# Type 4 uses read-only locking which forbids any operations that might
|
||||
# change metadata.
|
||||
locking_type = 1
|
||||
|
||||
# Set to 0 to fail when a lock request cannot be satisfied immediately.
|
||||
wait_for_locks = 1
|
||||
|
||||
# If using external locking (type 2) and initialisation fails,
|
||||
# with this set to 1 an attempt will be made to use the built-in
|
||||
# clustered locking.
|
||||
# If you are using a customised locking_library you should set this to 0.
|
||||
fallback_to_clustered_locking = 1
|
||||
|
||||
# If an attempt to initialise type 2 or type 3 locking failed, perhaps
|
||||
# because cluster components such as clvmd are not running, with this set
|
||||
# to 1 an attempt will be made to use local file-based locking (type 1).
|
||||
# If this succeeds, only commands against local volume groups will proceed.
|
||||
# Volume Groups marked as clustered will be ignored.
|
||||
fallback_to_local_locking = 1
|
||||
|
||||
# Local non-LV directory that holds file-based locks while commands are
|
||||
# in progress. A directory like /tmp that may get wiped on reboot is OK.
|
||||
locking_dir = "/var/lock/lvm"
|
||||
|
||||
# Whenever there are competing read-only and read-write access requests for
|
||||
# a volume group's metadata, instead of always granting the read-only
|
||||
# requests immediately, delay them to allow the read-write requests to be
|
||||
# serviced. Without this setting, write access may be stalled by a high
|
||||
# volume of read-only requests.
|
||||
# NB. This option only affects locking_type = 1 viz. local file-based
|
||||
# locking.
|
||||
prioritise_write_locks = 1
|
||||
|
||||
# Other entries can go here to allow you to load shared libraries
|
||||
# e.g. if support for LVM1 metadata was compiled as a shared library use
|
||||
# format_libraries = "liblvm2format1.so"
|
||||
# Full pathnames can be given.
|
||||
|
||||
# Search this directory first for shared libraries.
|
||||
# library_dir = "/lib"
|
||||
|
||||
# The external locking library to load if locking_type is set to 2.
|
||||
# locking_library = "liblvm2clusterlock.so"
|
||||
|
||||
# Treat any internal errors as fatal errors, aborting the process that
|
||||
# encountered the internal error. Please only enable for debugging.
|
||||
abort_on_internal_errors = 0
|
||||
|
||||
# Check whether CRC is matching when parsed VG is used multiple times.
|
||||
# This is useful to catch unexpected internal cached volume group
|
||||
# structure modification. Please only enable for debugging.
|
||||
detect_internal_vg_cache_corruption = 0
|
||||
|
||||
# If set to 1, no operations that change on-disk metadata will be permitted.
|
||||
# Additionally, read-only commands that encounter metadata in need of repair
|
||||
# will still be allowed to proceed exactly as if the repair had been
|
||||
# performed (except for the unchanged vg_seqno).
|
||||
# Inappropriate use could mess up your system, so seek advice first!
|
||||
metadata_read_only = 0
|
||||
|
||||
# 'mirror_segtype_default' defines which segtype will be used when the
|
||||
# shorthand '-m' option is used for mirroring. The possible options are:
|
||||
#
|
||||
# "mirror" - The original RAID1 implementation provided by LVM2/DM. It is
|
||||
# characterized by a flexible log solution (core, disk, mirrored)
|
||||
# and by the necessity to block I/O while reconfiguring in the
|
||||
# event of a failure.
|
||||
#
|
||||
# There is an inherent race in the dmeventd failure handling
|
||||
# logic with snapshots of devices using this type of RAID1 that
|
||||
# in the worst case could cause a deadlock.
|
||||
# Ref: https://bugzilla.redhat.com/show_bug.cgi?id=817130#c10
|
||||
#
|
||||
# "raid1" - This implementation leverages MD's RAID1 personality through
|
||||
# device-mapper. It is characterized by a lack of log options.
|
||||
# (A log is always allocated for every device and they are placed
|
||||
# on the same device as the image - no separate devices are
|
||||
# required.) This mirror implementation does not require I/O
|
||||
# to be blocked in the kernel in the event of a failure.
|
||||
# This mirror implementation is not cluster-aware and cannot be
|
||||
# used in a shared (active/active) fashion in a cluster.
|
||||
#
|
||||
# Specify the '--type <mirror|raid1>' option to override this default
|
||||
# setting.
|
||||
mirror_segtype_default = "mirror"
|
||||
|
||||
# The default format for displaying LV names in lvdisplay was changed
|
||||
# in version 2.02.89 to show the LV name and path separately.
|
||||
# Previously this was always shown as /dev/vgname/lvname even when that
|
||||
# was never a valid path in the /dev filesystem.
|
||||
# Set to 1 to reinstate the previous format.
|
||||
#
|
||||
# lvdisplay_shows_full_device_path = 0
|
||||
|
||||
# Whether to use (trust) a running instance of lvmetad. If this is set to
|
||||
# 0, all commands fall back to the usual scanning mechanisms. When set to 1
|
||||
# *and* when lvmetad is running (it is not auto-started), the volume group
|
||||
# metadata and PV state flags are obtained from the lvmetad instance and no
|
||||
# scanning is done by the individual commands. In a setup with lvmetad,
|
||||
# lvmetad udev rules *must* be set up for LVM to work correctly. Without
|
||||
# proper udev rules, all changes in block device configuration will be
|
||||
# *ignored* until a manual 'pvscan --cache' is performed.
|
||||
#
|
||||
# If lvmetad has been running while use_lvmetad was 0, it MUST be stopped
|
||||
# before changing use_lvmetad to 1 and started again afterwards.
|
||||
use_lvmetad = 0
|
||||
|
||||
# Full path of the utility called to check that a thin metadata device
|
||||
# is in a state that allows it to be used.
|
||||
# Each time a thin pool needs to be activated or after it is deactivated
|
||||
# this utility is executed. The activation will only proceed if the utility
|
||||
# has an exit status of 0.
|
||||
# Set to "" to skip this check. (Not recommended.)
|
||||
# The thin tools are available as part of the device-mapper-persistent-data
|
||||
# package from https://github.com/jthornber/thin-provisioning-tools.
|
||||
#
|
||||
thin_check_executable = "/usr/sbin/thin_check"
|
||||
|
||||
# String with options passed with thin_check command. By default,
|
||||
# option '-q' is for quiet output.
|
||||
thin_check_options = [ "-q" ]
|
||||
|
||||
# If set, given features are not used by thin driver.
|
||||
# This can be helpful not just for testing, but i.e. allows to avoid
|
||||
# using problematic implementation of some thin feature.
|
||||
# Features:
|
||||
# block_size
|
||||
# discards
|
||||
# discards_non_power_2
|
||||
#
|
||||
# thin_disabled_features = [ "discards", "block_size" ]
|
||||
}
|
||||
|
||||
activation {
|
||||
# Set to 1 to perform internal checks on the operations issued to
|
||||
# libdevmapper. Useful for debugging problems with activation.
|
||||
# Some of the checks may be expensive, so it's best to use this
|
||||
# only when there seems to be a problem.
|
||||
checks = 0
|
||||
|
||||
# Set to 0 to disable udev synchronisation (if compiled into the binaries).
|
||||
# Processes will not wait for notification from udev.
|
||||
# They will continue irrespective of any possible udev processing
|
||||
# in the background. You should only use this if udev is not running
|
||||
# or has rules that ignore the devices LVM2 creates.
|
||||
# The command line argument --nodevsync takes precedence over this setting.
|
||||
# If set to 1 when udev is not running, and there are LVM2 processes
|
||||
# waiting for udev, run 'dmsetup udevcomplete_all' manually to wake them up.
|
||||
udev_sync = 1
|
||||
|
||||
# Set to 0 to disable the udev rules installed by LVM2 (if built with
|
||||
# --enable-udev_rules). LVM2 will then manage the /dev nodes and symlinks
|
||||
# for active logical volumes directly itself.
|
||||
# N.B. Manual intervention may be required if this setting is changed
|
||||
# while any logical volumes are active.
|
||||
udev_rules = 1
|
||||
|
||||
# Set to 1 for LVM2 to verify operations performed by udev. This turns on
|
||||
# additional checks (and if necessary, repairs) on entries in the device
|
||||
# directory after udev has completed processing its events.
|
||||
# Useful for diagnosing problems with LVM2/udev interactions.
|
||||
verify_udev_operations = 0
|
||||
|
||||
# If set to 1 and if deactivation of an LV fails, perhaps because
|
||||
# a process run from a quick udev rule temporarily opened the device,
|
||||
# retry the operation for a few seconds before failing.
|
||||
retry_deactivation = 1
|
||||
|
||||
# How to fill in missing stripes if activating an incomplete volume.
|
||||
# Using "error" will make inaccessible parts of the device return
|
||||
# I/O errors on access. You can instead use a device path, in which
|
||||
# case, that device will be used to in place of missing stripes.
|
||||
# But note that using anything other than "error" with mirrored
|
||||
# or snapshotted volumes is likely to result in data corruption.
|
||||
missing_stripe_filler = "error"
|
||||
|
||||
# The linear target is an optimised version of the striped target
|
||||
# that only handles a single stripe. Set this to 0 to disable this
|
||||
# optimisation and always use the striped target.
|
||||
use_linear_target = 1
|
||||
|
||||
# How much stack (in KB) to reserve for use while devices suspended
|
||||
# Prior to version 2.02.89 this used to be set to 256KB
|
||||
reserved_stack = 64
|
||||
|
||||
# How much memory (in KB) to reserve for use while devices suspended
|
||||
reserved_memory = 8192
|
||||
|
||||
# Nice value used while devices suspended
|
||||
process_priority = -18
|
||||
|
||||
# If volume_list is defined, each LV is only activated if there is a
|
||||
# match against the list.
|
||||
# "vgname" and "vgname/lvname" are matched exactly.
|
||||
# "@tag" matches any tag set in the LV or VG.
|
||||
# "@*" matches if any tag defined on the host is also set in the LV or VG
|
||||
#
|
||||
# volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
|
||||
|
||||
# If auto_activation_volume_list is defined, each LV that is to be
|
||||
# activated is checked against the list while using the autoactivation
|
||||
# option (--activate ay/-a ay), and if it matches, it is activated.
|
||||
# "vgname" and "vgname/lvname" are matched exactly.
|
||||
# "@tag" matches any tag set in the LV or VG.
|
||||
# "@*" matches if any tag defined on the host is also set in the LV or VG
|
||||
#
|
||||
# auto_activation_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
|
||||
|
||||
# If read_only_volume_list is defined, each LV that is to be activated
|
||||
# is checked against the list, and if it matches, it as activated
|
||||
# in read-only mode. (This overrides '--permission rw' stored in the
|
||||
# metadata.)
|
||||
# "vgname" and "vgname/lvname" are matched exactly.
|
||||
# "@tag" matches any tag set in the LV or VG.
|
||||
# "@*" matches if any tag defined on the host is also set in the LV or VG
|
||||
#
|
||||
# read_only_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
|
||||
|
||||
# Size (in KB) of each copy operation when mirroring
|
||||
mirror_region_size = 512
|
||||
|
||||
# Setting to use when there is no readahead value stored in the metadata.
|
||||
#
|
||||
# "none" - Disable readahead.
|
||||
# "auto" - Use default value chosen by kernel.
|
||||
readahead = "auto"
|
||||
|
||||
# 'raid_fault_policy' defines how a device failure in a RAID logical
|
||||
# volume is handled. This includes logical volumes that have the following
|
||||
# segment types: raid1, raid4, raid5*, and raid6*.
|
||||
#
|
||||
# In the event of a failure, the following policies will determine what
|
||||
# actions are performed during the automated response to failures (when
|
||||
# dmeventd is monitoring the RAID logical volume) and when 'lvconvert' is
|
||||
# called manually with the options '--repair' and '--use-policies'.
|
||||
#
|
||||
# "warn" - Use the system log to warn the user that a device in the RAID
|
||||
# logical volume has failed. It is left to the user to run
|
||||
# 'lvconvert --repair' manually to remove or replace the failed
|
||||
# device. As long as the number of failed devices does not
|
||||
# exceed the redundancy of the logical volume (1 device for
|
||||
# raid4/5, 2 for raid6, etc) the logical volume will remain
|
||||
# usable.
|
||||
#
|
||||
# "allocate" - Attempt to use any extra physical volumes in the volume
|
||||
# group as spares and replace faulty devices.
|
||||
#
|
||||
raid_fault_policy = "warn"
|
||||
|
||||
# 'mirror_image_fault_policy' and 'mirror_log_fault_policy' define
|
||||
# how a device failure affecting a mirror (of "mirror" segment type) is
|
||||
# handled. A mirror is composed of mirror images (copies) and a log.
|
||||
# A disk log ensures that a mirror does not need to be re-synced
|
||||
# (all copies made the same) every time a machine reboots or crashes.
|
||||
#
|
||||
# In the event of a failure, the specified policy will be used to determine
|
||||
# what happens. This applies to automatic repairs (when the mirror is being
|
||||
# monitored by dmeventd) and to manual lvconvert --repair when
|
||||
# --use-policies is given.
|
||||
#
|
||||
# "remove" - Simply remove the faulty device and run without it. If
|
||||
# the log device fails, the mirror would convert to using
|
||||
# an in-memory log. This means the mirror will not
|
||||
# remember its sync status across crashes/reboots and
|
||||
# the entire mirror will be re-synced. If a
|
||||
# mirror image fails, the mirror will convert to a
|
||||
# non-mirrored device if there is only one remaining good
|
||||
# copy.
|
||||
#
|
||||
# "allocate" - Remove the faulty device and try to allocate space on
|
||||
# a new device to be a replacement for the failed device.
|
||||
# Using this policy for the log is fast and maintains the
|
||||
# ability to remember sync state through crashes/reboots.
|
||||
# Using this policy for a mirror device is slow, as it
|
||||
# requires the mirror to resynchronize the devices, but it
|
||||
# will preserve the mirror characteristic of the device.
|
||||
# This policy acts like "remove" if no suitable device and
|
||||
# space can be allocated for the replacement.
|
||||
#
|
||||
# "allocate_anywhere" - Not yet implemented. Useful to place the log device
|
||||
# temporarily on same physical volume as one of the mirror
|
||||
# images. This policy is not recommended for mirror devices
|
||||
# since it would break the redundant nature of the mirror. This
|
||||
# policy acts like "remove" if no suitable device and space can
|
||||
# be allocated for the replacement.
|
||||
|
||||
mirror_log_fault_policy = "allocate"
|
||||
mirror_image_fault_policy = "remove"
|
||||
|
||||
# 'snapshot_autoextend_threshold' and 'snapshot_autoextend_percent' define
|
||||
# how to handle automatic snapshot extension. The former defines when the
|
||||
# snapshot should be extended: when its space usage exceeds this many
|
||||
# percent. The latter defines how much extra space should be allocated for
|
||||
# the snapshot, in percent of its current size.
|
||||
#
|
||||
# For example, if you set snapshot_autoextend_threshold to 70 and
|
||||
# snapshot_autoextend_percent to 20, whenever a snapshot exceeds 70% usage,
|
||||
# it will be extended by another 20%. For a 1G snapshot, using up 700M will
|
||||
# trigger a resize to 1.2G. When the usage exceeds 840M, the snapshot will
|
||||
# be extended to 1.44G, and so on.
|
||||
#
|
||||
# Setting snapshot_autoextend_threshold to 100 disables automatic
|
||||
# extensions. The minimum value is 50 (A setting below 50 will be treated
|
||||
# as 50).
|
||||
|
||||
snapshot_autoextend_threshold = 100
|
||||
snapshot_autoextend_percent = 20
|
||||
|
||||
# 'thin_pool_autoextend_threshold' and 'thin_pool_autoextend_percent' define
|
||||
# how to handle automatic pool extension. The former defines when the
|
||||
# pool should be extended: when its space usage exceeds this many
|
||||
# percent. The latter defines how much extra space should be allocated for
|
||||
# the pool, in percent of its current size.
|
||||
#
|
||||
# For example, if you set thin_pool_autoextend_threshold to 70 and
|
||||
# thin_pool_autoextend_percent to 20, whenever a pool exceeds 70% usage,
|
||||
# it will be extended by another 20%. For a 1G pool, using up 700M will
|
||||
# trigger a resize to 1.2G. When the usage exceeds 840M, the pool will
|
||||
# be extended to 1.44G, and so on.
|
||||
#
|
||||
# Setting thin_pool_autoextend_threshold to 100 disables automatic
|
||||
# extensions. The minimum value is 50 (A setting below 50 will be treated
|
||||
# as 50).
|
||||
|
||||
thin_pool_autoextend_threshold = 100
|
||||
thin_pool_autoextend_percent = 20
|
||||
|
||||
# While activating devices, I/O to devices being (re)configured is
|
||||
# suspended, and as a precaution against deadlocks, LVM2 needs to pin
|
||||
# any memory it is using so it is not paged out. Groups of pages that
|
||||
# are known not to be accessed during activation need not be pinned
|
||||
# into memory. Each string listed in this setting is compared against
|
||||
# each line in /proc/self/maps, and the pages corresponding to any
|
||||
# lines that match are not pinned. On some systems locale-archive was
|
||||
# found to make up over 80% of the memory used by the process.
|
||||
# mlock_filter = [ "locale/locale-archive", "gconv/gconv-modules.cache" ]
|
||||
|
||||
# Set to 1 to revert to the default behaviour prior to version 2.02.62
|
||||
# which used mlockall() to pin the whole process's memory while activating
|
||||
# devices.
|
||||
use_mlockall = 0
|
||||
|
||||
# Monitoring is enabled by default when activating logical volumes.
|
||||
# Set to 0 to disable monitoring or use the --ignoremonitoring option.
|
||||
monitoring = 1
|
||||
|
||||
# When pvmove or lvconvert must wait for the kernel to finish
|
||||
# synchronising or merging data, they check and report progress
|
||||
# at intervals of this number of seconds. The default is 15 seconds.
|
||||
# If this is set to 0 and there is only one thing to wait for, there
|
||||
# are no progress reports, but the process is awoken immediately the
|
||||
# operation is complete.
|
||||
polling_interval = 15
|
||||
}
|
||||
|
||||
|
||||
####################
|
||||
# Advanced section #
|
||||
####################
|
||||
|
||||
# Metadata settings
|
||||
#
|
||||
# metadata {
|
||||
# Default number of copies of metadata to hold on each PV. 0, 1 or 2.
|
||||
# You might want to override it from the command line with 0
|
||||
# when running pvcreate on new PVs which are to be added to large VGs.
|
||||
|
||||
# pvmetadatacopies = 1
|
||||
|
||||
# Default number of copies of metadata to maintain for each VG.
|
||||
# If set to a non-zero value, LVM automatically chooses which of
|
||||
# the available metadata areas to use to achieve the requested
|
||||
# number of copies of the VG metadata. If you set a value larger
|
||||
# than the the total number of metadata areas available then
|
||||
# metadata is stored in them all.
|
||||
# The default value of 0 ("unmanaged") disables this automatic
|
||||
# management and allows you to control which metadata areas
|
||||
# are used at the individual PV level using 'pvchange
|
||||
# --metadataignore y/n'.
|
||||
|
||||
# vgmetadatacopies = 0
|
||||
|
||||
# Approximate default size of on-disk metadata areas in sectors.
|
||||
# You should increase this if you have large volume groups or
|
||||
# you want to retain a large on-disk history of your metadata changes.
|
||||
|
||||
# pvmetadatasize = 255
|
||||
|
||||
# List of directories holding live copies of text format metadata.
|
||||
# These directories must not be on logical volumes!
|
||||
# It's possible to use LVM2 with a couple of directories here,
|
||||
# preferably on different (non-LV) filesystems, and with no other
|
||||
# on-disk metadata (pvmetadatacopies = 0). Or this can be in
|
||||
# addition to on-disk metadata areas.
|
||||
# The feature was originally added to simplify testing and is not
|
||||
# supported under low memory situations - the machine could lock up.
|
||||
#
|
||||
# Never edit any files in these directories by hand unless you
|
||||
# you are absolutely sure you know what you are doing! Use
|
||||
# the supplied toolset to make changes (e.g. vgcfgrestore).
|
||||
|
||||
# dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ]
|
||||
#}
|
||||
|
||||
# Event daemon
|
||||
#
|
||||
dmeventd {
|
||||
# mirror_library is the library used when monitoring a mirror device.
|
||||
#
|
||||
# "libdevmapper-event-lvm2mirror.so" attempts to recover from
|
||||
# failures. It removes failed devices from a volume group and
|
||||
# reconfigures a mirror as necessary. If no mirror library is
|
||||
# provided, mirrors are not monitored through dmeventd.
|
||||
|
||||
mirror_library = "libdevmapper-event-lvm2mirror.so"
|
||||
|
||||
# snapshot_library is the library used when monitoring a snapshot device.
|
||||
#
|
||||
# "libdevmapper-event-lvm2snapshot.so" monitors the filling of
|
||||
# snapshots and emits a warning through syslog when the use of
|
||||
# the snapshot exceeds 80%. The warning is repeated when 85%, 90% and
|
||||
# 95% of the snapshot is filled.
|
||||
|
||||
snapshot_library = "libdevmapper-event-lvm2snapshot.so"
|
||||
|
||||
# thin_library is the library used when monitoring a thin device.
|
||||
#
|
||||
# "libdevmapper-event-lvm2thin.so" monitors the filling of
|
||||
# pool and emits a warning through syslog when the use of
|
||||
# the pool exceeds 80%. The warning is repeated when 85%, 90% and
|
||||
# 95% of the pool is filled.
|
||||
|
||||
thin_library = "libdevmapper-event-lvm2thin.so"
|
||||
|
||||
# Full path of the dmeventd binary.
|
||||
#
|
||||
# executable = "/sbin/dmeventd"
|
||||
}
|
102
config_samples/fuel_web/golden_fuelweb/cmp/nova/api-paste.ini
Normal file
102
config_samples/fuel_web/golden_fuelweb/cmp/nova/api-paste.ini
Normal file
@ -0,0 +1,102 @@
|
||||
############
|
||||
# Metadata #
|
||||
############
|
||||
[composite:metadata]
|
||||
use = egg:Paste#urlmap
|
||||
/: meta
|
||||
|
||||
[pipeline:meta]
|
||||
pipeline = ec2faultwrap logrequest metaapp
|
||||
|
||||
[app:metaapp]
|
||||
paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
|
||||
|
||||
#######
|
||||
# EC2 #
|
||||
#######
|
||||
|
||||
[composite:ec2]
|
||||
use = egg:Paste#urlmap
|
||||
/services/Cloud: ec2cloud
|
||||
|
||||
[composite:ec2cloud]
|
||||
use = call:nova.api.auth:pipeline_factory
|
||||
noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
|
||||
keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
|
||||
|
||||
[filter:ec2faultwrap]
|
||||
paste.filter_factory = nova.api.ec2:FaultWrapper.factory
|
||||
|
||||
[filter:logrequest]
|
||||
paste.filter_factory = nova.api.ec2:RequestLogging.factory
|
||||
|
||||
[filter:ec2lockout]
|
||||
paste.filter_factory = nova.api.ec2:Lockout.factory
|
||||
|
||||
[filter:ec2keystoneauth]
|
||||
paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
|
||||
|
||||
[filter:ec2noauth]
|
||||
paste.filter_factory = nova.api.ec2:NoAuth.factory
|
||||
|
||||
[filter:cloudrequest]
|
||||
controller = nova.api.ec2.cloud.CloudController
|
||||
paste.filter_factory = nova.api.ec2:Requestify.factory
|
||||
|
||||
[filter:authorizer]
|
||||
paste.filter_factory = nova.api.ec2:Authorizer.factory
|
||||
|
||||
[filter:validator]
|
||||
paste.filter_factory = nova.api.ec2:Validator.factory
|
||||
|
||||
[app:ec2executor]
|
||||
paste.app_factory = nova.api.ec2:Executor.factory
|
||||
|
||||
#############
|
||||
# Openstack #
|
||||
#############
|
||||
|
||||
[composite:osapi_compute]
|
||||
use = call:nova.api.openstack.urlmap:urlmap_factory
|
||||
/: oscomputeversions
|
||||
/v1.1: openstack_compute_api_v2
|
||||
/v2: openstack_compute_api_v2
|
||||
|
||||
[composite:openstack_compute_api_v2]
|
||||
use = call:nova.api.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
|
||||
|
||||
[filter:faultwrap]
|
||||
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
|
||||
|
||||
[filter:noauth]
|
||||
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
|
||||
|
||||
[filter:ratelimit]
|
||||
paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
|
||||
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory
|
||||
|
||||
[app:osapi_compute_app_v2]
|
||||
paste.app_factory = nova.api.openstack.compute:APIRouter.factory
|
||||
|
||||
[pipeline:oscomputeversions]
|
||||
pipeline = faultwrap oscomputeversionapp
|
||||
|
||||
[app:oscomputeversionapp]
|
||||
paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
|
||||
|
||||
##########
|
||||
# Shared #
|
||||
##########
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||
# Workaround for https://bugs.launchpad.net/nova/+bug/1154809
|
||||
auth_version = v2.0
|
21
config_samples/fuel_web/golden_fuelweb/cmp/nova/logging.conf
Normal file
21
config_samples/fuel_web/golden_fuelweb/cmp/nova/logging.conf
Normal file
@ -0,0 +1,21 @@
|
||||
[loggers]
|
||||
keys = root
|
||||
|
||||
[handlers]
|
||||
keys = syslog
|
||||
|
||||
[formatters]
|
||||
keys = default
|
||||
|
||||
[logger_root]
|
||||
level = DEBUG
|
||||
handlers = syslog
|
||||
qualname = nova
|
||||
|
||||
[handler_syslog]
|
||||
class = handlers.SysLogHandler
|
||||
args = ('/dev/log', handlers.SysLogHandler.LOG_LOCAL0)
|
||||
formatter = default
|
||||
|
||||
[formatter_default]
|
||||
format = nova %(name)s %(levelname)s %(message)s
|
72
config_samples/fuel_web/golden_fuelweb/cmp/nova/nova.conf
Normal file
72
config_samples/fuel_web/golden_fuelweb/cmp/nova/nova.conf
Normal file
@ -0,0 +1,72 @@
|
||||
[DEFAULT]
|
||||
logdir = /var/log/nova
|
||||
state_path = /var/lib/nova
|
||||
lock_path = /var/lib/nova/tmp
|
||||
volumes_dir = /etc/nova/volumes
|
||||
dhcpbridge = /usr/bin/nova-dhcpbridge
|
||||
dhcpbridge_flagfile = /etc/nova/nova.conf
|
||||
force_dhcp_release = true
|
||||
injected_network_template = /usr/share/nova/interfaces.template
|
||||
libvirt_nonblocking = True
|
||||
libvirt_inject_partition = -1
|
||||
network_manager = nova.network.manager.VlanManager
|
||||
iscsi_helper = tgtadm
|
||||
sql_connection = mysql://nova:jMsyf1wU@192.168.0.7/nova
|
||||
compute_driver = libvirt.LibvirtDriver
|
||||
firewall_driver = nova.virt.libvirt.firewall.IptablesFirewallDriver
|
||||
rpc_backend = nova.rpc.impl_kombu
|
||||
rootwrap_config = /etc/nova/rootwrap.conf
|
||||
debug=true
|
||||
vncserver_proxyclient_address=192.168.0.5
|
||||
rabbit_hosts=192.168.0.7:5672
|
||||
osapi_compute_listen=192.168.0.5
|
||||
ec2_listen=192.168.0.5
|
||||
glance_api_servers=192.168.0.7:9292
|
||||
rabbit_userid=nova
|
||||
rabbit_ha_queues=True
|
||||
rabbit_password=zrk9MfKV
|
||||
verbose=true
|
||||
logging_default_format_string=%(levelname)s %(name)s [-] %(instance)s %(message)s
|
||||
logging_context_format_string=%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s] %(instance)s %(message)s
|
||||
enabled_apis=metadata
|
||||
vnc_enabled=true
|
||||
rabbit_virtual_host=/
|
||||
image_service=nova.image.glance.GlanceImageService
|
||||
volume_api_class=nova.volume.cinder.API
|
||||
use_cow_images=true
|
||||
log_config=/etc/nova/logging.conf
|
||||
rabbit_port=5672
|
||||
vlan_start=103
|
||||
compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
|
||||
api_paste_config=/etc/nova/api-paste.ini
|
||||
novncproxy_base_url=http://240.0.1.7:6080/vnc_auto.html
|
||||
public_interface=eth0.100
|
||||
start_guests_on_host_boot=true
|
||||
service_down_time=60
|
||||
syslog_log_facility=LOCAL0
|
||||
vncserver_listen=192.168.0.5
|
||||
osapi_volume_listen=192.168.0.5
|
||||
metadata_listen=192.168.0.5
|
||||
auth_strategy=keystone
|
||||
fixed_range=10.0.0.0/24
|
||||
use_syslog=True
|
||||
dhcp_domain=novalocal
|
||||
metadata_host=192.168.0.5
|
||||
memcached_servers=controller-15:11211,controller-14:11211,controller-13:11211
|
||||
send_arp_for_ha=True
|
||||
multi_host=True
|
||||
allow_resize_to_same_host=True
|
||||
libvirt_type=qemu
|
||||
vlan_interface=eth0
|
||||
connection_type=libvirt
|
||||
|
||||
[keystone_authtoken]
|
||||
admin_tenant_name = services
|
||||
admin_user = nova
|
||||
admin_password = Zc1VlBC9
|
||||
auth_host = 192.168.0.7
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
signing_dir = /tmp/keystone-signing-nova
|
||||
signing_dirname=/tmp/keystone-signing-nova
|
||||
|
161
config_samples/fuel_web/golden_fuelweb/cmp/nova/policy.json
Normal file
161
config_samples/fuel_web/golden_fuelweb/cmp/nova/policy.json
Normal file
@ -0,0 +1,161 @@
|
||||
{
|
||||
"context_is_admin": "role:admin",
|
||||
"admin_or_owner": "is_admin:True or project_id:%(project_id)s",
|
||||
"default": "rule:admin_or_owner",
|
||||
|
||||
|
||||
"compute:create": "",
|
||||
"compute:create:attach_network": "",
|
||||
"compute:create:attach_volume": "",
|
||||
"compute:create:forced_host": "is_admin:True",
|
||||
"compute:get_all": "",
|
||||
"compute:get_all_tenants": "",
|
||||
|
||||
|
||||
"admin_api": "is_admin:True",
|
||||
"compute_extension:accounts": "rule:admin_api",
|
||||
"compute_extension:admin_actions": "rule:admin_api",
|
||||
"compute_extension:admin_actions:pause": "rule:admin_or_owner",
|
||||
"compute_extension:admin_actions:unpause": "rule:admin_or_owner",
|
||||
"compute_extension:admin_actions:suspend": "rule:admin_or_owner",
|
||||
"compute_extension:admin_actions:resume": "rule:admin_or_owner",
|
||||
"compute_extension:admin_actions:lock": "rule:admin_api",
|
||||
"compute_extension:admin_actions:unlock": "rule:admin_api",
|
||||
"compute_extension:admin_actions:resetNetwork": "rule:admin_api",
|
||||
"compute_extension:admin_actions:injectNetworkInfo": "rule:admin_api",
|
||||
"compute_extension:admin_actions:createBackup": "rule:admin_or_owner",
|
||||
"compute_extension:admin_actions:migrateLive": "rule:admin_api",
|
||||
"compute_extension:admin_actions:resetState": "rule:admin_api",
|
||||
"compute_extension:admin_actions:migrate": "rule:admin_api",
|
||||
"compute_extension:aggregates": "rule:admin_api",
|
||||
"compute_extension:agents": "rule:admin_api",
|
||||
"compute_extension:attach_interfaces": "",
|
||||
"compute_extension:baremetal_nodes": "rule:admin_api",
|
||||
"compute_extension:cells": "rule:admin_api",
|
||||
"compute_extension:certificates": "",
|
||||
"compute_extension:cloudpipe": "rule:admin_api",
|
||||
"compute_extension:cloudpipe_update": "rule:admin_api",
|
||||
"compute_extension:console_output": "",
|
||||
"compute_extension:consoles": "",
|
||||
"compute_extension:coverage_ext": "rule:admin_api",
|
||||
"compute_extension:createserverext": "",
|
||||
"compute_extension:deferred_delete": "",
|
||||
"compute_extension:disk_config": "",
|
||||
"compute_extension:evacuate": "rule:admin_api",
|
||||
"compute_extension:extended_server_attributes": "rule:admin_api",
|
||||
"compute_extension:extended_status": "",
|
||||
"compute_extension:extended_availability_zone": "",
|
||||
"compute_extension:extended_ips": "",
|
||||
"compute_extension:fixed_ips": "rule:admin_api",
|
||||
"compute_extension:flavor_access": "",
|
||||
"compute_extension:flavor_disabled": "",
|
||||
"compute_extension:flavor_rxtx": "",
|
||||
"compute_extension:flavor_swap": "",
|
||||
"compute_extension:flavorextradata": "",
|
||||
"compute_extension:flavorextraspecs:index": "",
|
||||
"compute_extension:flavorextraspecs:show": "",
|
||||
"compute_extension:flavorextraspecs:create": "rule:admin_api",
|
||||
"compute_extension:flavorextraspecs:update": "rule:admin_api",
|
||||
"compute_extension:flavorextraspecs:delete": "rule:admin_api",
|
||||
"compute_extension:flavormanage": "rule:admin_api",
|
||||
"compute_extension:floating_ip_dns": "",
|
||||
"compute_extension:floating_ip_pools": "",
|
||||
"compute_extension:floating_ips": "",
|
||||
"compute_extension:floating_ips_bulk": "rule:admin_api",
|
||||
"compute_extension:fping": "",
|
||||
"compute_extension:fping:all_tenants": "rule:admin_api",
|
||||
"compute_extension:hide_server_addresses": "is_admin:False",
|
||||
"compute_extension:hosts": "rule:admin_api",
|
||||
"compute_extension:hypervisors": "rule:admin_api",
|
||||
"compute_extension:image_size": "",
|
||||
"compute_extension:instance_actions": "",
|
||||
"compute_extension:instance_actions:events": "rule:admin_api",
|
||||
"compute_extension:instance_usage_audit_log": "rule:admin_api",
|
||||
"compute_extension:keypairs": "",
|
||||
"compute_extension:multinic": "",
|
||||
"compute_extension:networks": "rule:admin_api",
|
||||
"compute_extension:networks:view": "",
|
||||
"compute_extension:networks_associate": "rule:admin_api",
|
||||
"compute_extension:quotas:show": "",
|
||||
"compute_extension:quotas:update": "rule:admin_api",
|
||||
"compute_extension:quota_classes": "",
|
||||
"compute_extension:rescue": "",
|
||||
"compute_extension:security_group_default_rules": "rule:admin_api",
|
||||
"compute_extension:security_groups": "",
|
||||
"compute_extension:server_diagnostics": "rule:admin_api",
|
||||
"compute_extension:server_password": "",
|
||||
"compute_extension:services": "rule:admin_api",
|
||||
"compute_extension:simple_tenant_usage:show": "rule:admin_or_owner",
|
||||
"compute_extension:simple_tenant_usage:list": "rule:admin_api",
|
||||
"compute_extension:users": "rule:admin_api",
|
||||
"compute_extension:virtual_interfaces": "",
|
||||
"compute_extension:virtual_storage_arrays": "",
|
||||
"compute_extension:volumes": "",
|
||||
"compute_extension:volume_attachments:index": "",
|
||||
"compute_extension:volume_attachments:show": "",
|
||||
"compute_extension:volume_attachments:create": "",
|
||||
"compute_extension:volume_attachments:delete": "",
|
||||
"compute_extension:volumetypes": "",
|
||||
"compute_extension:availability_zone:list": "",
|
||||
"compute_extension:availability_zone:detail": "rule:admin_api",
|
||||
|
||||
|
||||
"volume:create": "",
|
||||
"volume:get_all": "",
|
||||
"volume:get_volume_metadata": "",
|
||||
"volume:get_snapshot": "",
|
||||
"volume:get_all_snapshots": "",
|
||||
|
||||
|
||||
"volume_extension:types_manage": "rule:admin_api",
|
||||
"volume_extension:types_extra_specs": "rule:admin_api",
|
||||
"volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
|
||||
"volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
|
||||
"volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
|
||||
|
||||
|
||||
"network:get_all": "",
|
||||
"network:get": "",
|
||||
"network:create": "",
|
||||
"network:delete": "",
|
||||
"network:associate": "",
|
||||
"network:disassociate": "",
|
||||
"network:get_vifs_by_instance": "",
|
||||
"network:allocate_for_instance": "",
|
||||
"network:deallocate_for_instance": "",
|
||||
"network:validate_networks": "",
|
||||
"network:get_instance_uuids_by_ip_filter": "",
|
||||
"network:get_instance_id_by_floating_address": "",
|
||||
"network:setup_networks_on_host": "",
|
||||
"network:get_backdoor_port": "",
|
||||
|
||||
"network:get_floating_ip": "",
|
||||
"network:get_floating_ip_pools": "",
|
||||
"network:get_floating_ip_by_address": "",
|
||||
"network:get_floating_ips_by_project": "",
|
||||
"network:get_floating_ips_by_fixed_address": "",
|
||||
"network:allocate_floating_ip": "",
|
||||
"network:deallocate_floating_ip": "",
|
||||
"network:associate_floating_ip": "",
|
||||
"network:disassociate_floating_ip": "",
|
||||
"network:release_floating_ip": "",
|
||||
"network:migrate_instance_start": "",
|
||||
"network:migrate_instance_finish": "",
|
||||
|
||||
"network:get_fixed_ip": "",
|
||||
"network:get_fixed_ip_by_address": "",
|
||||
"network:add_fixed_ip_to_instance": "",
|
||||
"network:remove_fixed_ip_from_instance": "",
|
||||
"network:add_network_to_project": "",
|
||||
"network:get_instance_nw_info": "",
|
||||
|
||||
"network:get_dns_domains": "",
|
||||
"network:add_dns_entry": "",
|
||||
"network:modify_dns_entry": "",
|
||||
"network:delete_dns_entry": "",
|
||||
"network:get_dns_entries_by_address": "",
|
||||
"network:get_dns_entries_by_name": "",
|
||||
"network:create_private_dns_domain": "",
|
||||
"network:create_public_dns_domain": "",
|
||||
"network:delete_dns_domain": ""
|
||||
}
|
4
config_samples/fuel_web/golden_fuelweb/cmp/nova/release
Normal file
4
config_samples/fuel_web/golden_fuelweb/cmp/nova/release
Normal file
@ -0,0 +1,4 @@
|
||||
[Nova]
|
||||
vendor = Red Hat Inc.
|
||||
product = OpenStack Nova
|
||||
package = mira.2
|
@ -0,0 +1,27 @@
|
||||
# Configuration for nova-rootwrap
|
||||
# This file should be owned by (and only-writeable by) the root user
|
||||
|
||||
[DEFAULT]
|
||||
# List of directories to load filter definitions from (separated by ',').
|
||||
# These directories MUST all be only writeable by root !
|
||||
filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap
|
||||
|
||||
# List of directories to search executables in, in case filters do not
|
||||
# explicitely specify a full path (separated by ',')
|
||||
# If not specified, defaults to system PATH environment variable.
|
||||
# These directories MUST all be only writeable by root !
|
||||
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
|
||||
|
||||
# Enable logging to syslog
|
||||
# Default value is False
|
||||
use_syslog=False
|
||||
|
||||
# Which syslog facility to use.
|
||||
# Valid values include auth, authpriv, syslog, user0, user1...
|
||||
# Default value is 'syslog'
|
||||
syslog_log_facility=syslog
|
||||
|
||||
# Which messages to log.
|
||||
# INFO means log all usage
|
||||
# ERROR means only log unsuccessful attempts
|
||||
syslog_log_level=ERROR
|
@ -0,0 +1,52 @@
|
||||
#############
|
||||
# OpenStack #
|
||||
#############
|
||||
|
||||
[composite:osapi_volume]
|
||||
use = call:cinder.api:root_app_factory
|
||||
/: apiversions
|
||||
/v1: openstack_volume_api_v1
|
||||
/v2: openstack_volume_api_v2
|
||||
|
||||
[composite:openstack_volume_api_v1]
|
||||
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth apiv1
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext apiv1
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv1
|
||||
|
||||
[composite:openstack_volume_api_v2]
|
||||
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth apiv2
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext apiv2
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv2
|
||||
|
||||
[filter:faultwrap]
|
||||
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
|
||||
|
||||
[filter:noauth]
|
||||
paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
|
||||
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
|
||||
|
||||
[app:apiv1]
|
||||
paste.app_factory = cinder.api.v1.router:APIRouter.factory
|
||||
|
||||
[app:apiv2]
|
||||
paste.app_factory = cinder.api.v2.router:APIRouter.factory
|
||||
|
||||
[pipeline:apiversions]
|
||||
pipeline = faultwrap osvolumeversionapp
|
||||
|
||||
[app:osvolumeversionapp]
|
||||
paste.app_factory = cinder.api.versions:Versions.factory
|
||||
|
||||
##########
|
||||
# Shared #
|
||||
##########
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
@ -0,0 +1,33 @@
|
||||
[DEFAULT]
|
||||
logdir = /var/log/cinder
|
||||
state_path = /var/lib/cinder
|
||||
lock_path = /var/lib/cinder/tmp
|
||||
volumes_dir = /etc/cinder/volumes
|
||||
iscsi_helper = tgtadm
|
||||
sql_connection = mysql://cinder:aJJbMNpG@192.168.0.7/cinder?charset=utf8
|
||||
rpc_backend = cinder.openstack.common.rpc.impl_kombu
|
||||
rootwrap_config = /etc/cinder/rootwrap.conf
|
||||
api_paste_config=/etc/cinder/api-paste.ini
|
||||
volume_group=cinder
|
||||
log_config=/etc/cinder/logging.conf
|
||||
rabbit_userid=nova
|
||||
bind_host=0.0.0.0
|
||||
auth_strategy=keystone
|
||||
osapi_volume_listen=0.0.0.0
|
||||
iscsi_ip_address=172.16.0.6
|
||||
rabbit_virtual_host=/
|
||||
rabbit_hosts=controller-15:5672,controller-14:5672,controller-13:5672
|
||||
verbose=true
|
||||
rabbit_ha_queues=True
|
||||
rabbit_password=zrk9MfKV
|
||||
rabbit_port=5672
|
||||
|
||||
[keystone_authtoken]
|
||||
admin_tenant_name = services
|
||||
admin_user = cinder
|
||||
admin_password = LCBarOJB
|
||||
auth_host = 127.0.0.1
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
signing_dirname = /tmp/keystone-signing-cinder
|
||||
signing_dir=/tmp/keystone-signing-cinder
|
@ -0,0 +1,21 @@
|
||||
[loggers]
|
||||
keys = root
|
||||
|
||||
[handlers]
|
||||
keys = syslog
|
||||
|
||||
[formatters]
|
||||
keys = default
|
||||
|
||||
[logger_root]
|
||||
level = DEBUG
|
||||
handlers = syslog
|
||||
qualname = cinder
|
||||
|
||||
[handler_syslog]
|
||||
class = handlers.SysLogHandler
|
||||
args = ('/dev/log', handlers.SysLogHandler.LOG_LOCAL3)
|
||||
formatter = default
|
||||
|
||||
[formatter_default]
|
||||
format = cinder %(name)s %(levelname)s %(message)s
|
@ -0,0 +1,34 @@
|
||||
{
|
||||
"context_is_admin": [["role:admin"]],
|
||||
"admin_or_owner": [["is_admin:True"], ["project_id:%(project_id)s"]],
|
||||
"default": [["rule:admin_or_owner"]],
|
||||
|
||||
"admin_api": [["is_admin:True"]],
|
||||
|
||||
"volume:create": [],
|
||||
"volume:get_all": [],
|
||||
"volume:get_volume_metadata": [],
|
||||
"volume:get_snapshot": [],
|
||||
"volume:get_all_snapshots": [],
|
||||
|
||||
"volume_extension:types_manage": [["rule:admin_api"]],
|
||||
"volume_extension:types_extra_specs": [["rule:admin_api"]],
|
||||
"volume_extension:extended_snapshot_attributes": [],
|
||||
"volume_extension:volume_image_metadata": [],
|
||||
|
||||
"volume_extension:quotas:show": [],
|
||||
"volume_extension:quotas:update_for_project": [["rule:admin_api"]],
|
||||
"volume_extension:quotas:update_for_user": [["rule:admin_or_projectadmin"]],
|
||||
"volume_extension:quota_classes": [],
|
||||
|
||||
"volume_extension:volume_admin_actions:reset_status": [["rule:admin_api"]],
|
||||
"volume_extension:snapshot_admin_actions:reset_status": [["rule:admin_api"]],
|
||||
"volume_extension:volume_admin_actions:force_delete": [["rule:admin_api"]],
|
||||
"volume_extension:snapshot_admin_actions:force_delete": [["rule:admin_api"]],
|
||||
|
||||
"volume_extension:volume_host_attribute": [["rule:admin_api"]],
|
||||
"volume_extension:volume_tenant_attribute": [["rule:admin_api"]],
|
||||
"volume_extension:hosts": [["rule:admin_api"]],
|
||||
"volume_extension:services": [["rule:admin_api"]],
|
||||
"volume:services": [["rule:admin_api"]]
|
||||
}
|
@ -0,0 +1,27 @@
|
||||
# Configuration for cinder-rootwrap
|
||||
# This file should be owned by (and only-writeable by) the root user
|
||||
|
||||
[DEFAULT]
|
||||
# List of directories to load filter definitions from (separated by ',').
|
||||
# These directories MUST all be only writeable by root !
|
||||
filters_path=/etc/cinder/rootwrap.d,/usr/share/cinder/rootwrap
|
||||
|
||||
# List of directories to search executables in, in case filters do not
|
||||
# explicitely specify a full path (separated by ',')
|
||||
# If not specified, defaults to system PATH environment variable.
|
||||
# These directories MUST all be only writeable by root !
|
||||
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
|
||||
|
||||
# Enable logging to syslog
|
||||
# Default value is False
|
||||
use_syslog=False
|
||||
|
||||
# Which syslog facility to use.
|
||||
# Valid values include auth, authpriv, syslog, user0, user1...
|
||||
# Default value is 'syslog'
|
||||
syslog_log_facility=syslog
|
||||
|
||||
# Which messages to log.
|
||||
# INFO means log all usage
|
||||
# ERROR means only log unsuccessful attempts
|
||||
syslog_log_level=ERROR
|
@ -0,0 +1,36 @@
|
||||
# Generated by LVM2 version 2.02.98(2)-RHEL6 (2012-10-15): Mon Sep 16 13:31:17 2013
|
||||
|
||||
contents = "Text Format Volume Group"
|
||||
version = 1
|
||||
|
||||
description = "Created *before* executing '/sbin/vgs --noheadings -o name --config 'log{command_names=0 prefix=\" \"}''"
|
||||
|
||||
creation_host = "cinder-17.domain.tld" # Linux cinder-17.domain.tld 2.6.32-358.6.2.el6.x86_64 #1 SMP Thu May 16 20:59:36 UTC 2013 x86_64
|
||||
creation_time = 1379338277 # Mon Sep 16 13:31:17 2013
|
||||
|
||||
cinder {
|
||||
id = "JkhZ33-QPjO-1JQb-I6v0-ryhN-N5Eo-yRXato"
|
||||
seqno = 1
|
||||
format = "lvm2" # informational
|
||||
status = ["RESIZEABLE", "READ", "WRITE"]
|
||||
flags = []
|
||||
extent_size = 65536 # 32 Megabytes
|
||||
max_lv = 0
|
||||
max_pv = 0
|
||||
metadata_copies = 0
|
||||
|
||||
physical_volumes {
|
||||
|
||||
pv0 {
|
||||
id = "xOUdQg-oWXY-q9wX-2wJH-0eYK-jD5t-SOdWuV"
|
||||
device = "/dev/sda3" # Hint only
|
||||
|
||||
status = ["ALLOCATABLE"]
|
||||
flags = []
|
||||
dev_size = 3801088 # 1.8125 Gigabytes
|
||||
pe_start = 2048
|
||||
pe_count = 57 # 1.78125 Gigabytes
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,80 @@
|
||||
# Generated by LVM2 version 2.02.98(2)-RHEL6 (2012-10-15): Mon Sep 16 13:31:17 2013
|
||||
|
||||
contents = "Text Format Volume Group"
|
||||
version = 1
|
||||
|
||||
description = "Created *before* executing '/sbin/vgs --noheadings -o name --config 'log{command_names=0 prefix=\" \"}''"
|
||||
|
||||
creation_host = "cinder-17.domain.tld" # Linux cinder-17.domain.tld 2.6.32-358.6.2.el6.x86_64 #1 SMP Thu May 16 20:59:36 UTC 2013 x86_64
|
||||
creation_time = 1379338277 # Mon Sep 16 13:31:17 2013
|
||||
|
||||
os {
|
||||
id = "uO3dCZ-rc1v-AFiY-np8m-dBrA-ABCJ-32tTbx"
|
||||
seqno = 3
|
||||
format = "lvm2" # informational
|
||||
status = ["RESIZEABLE", "READ", "WRITE"]
|
||||
flags = []
|
||||
extent_size = 65536 # 32 Megabytes
|
||||
max_lv = 0
|
||||
max_pv = 0
|
||||
metadata_copies = 0
|
||||
|
||||
physical_volumes {
|
||||
|
||||
pv0 {
|
||||
id = "e9BcjU-6mIp-df6W-H46z-Fd70-2JmT-in6sEe"
|
||||
device = "/dev/sda2" # Hint only
|
||||
|
||||
status = ["ALLOCATABLE"]
|
||||
flags = []
|
||||
dev_size = 29321216 # 13.9814 Gigabytes
|
||||
pe_start = 2048
|
||||
pe_count = 447 # 13.9688 Gigabytes
|
||||
}
|
||||
}
|
||||
|
||||
logical_volumes {
|
||||
|
||||
root {
|
||||
id = "X0t3ds-557Y-3E9D-6KTT-d5fc-l0s1-W3hyLl"
|
||||
status = ["READ", "WRITE", "VISIBLE"]
|
||||
flags = []
|
||||
creation_host = "cinder-17.domain.tld"
|
||||
creation_time = 1379337901 # 2013-09-16 13:25:01 +0000
|
||||
segment_count = 1
|
||||
|
||||
segment1 {
|
||||
start_extent = 0
|
||||
extent_count = 320 # 10 Gigabytes
|
||||
|
||||
type = "striped"
|
||||
stripe_count = 1 # linear
|
||||
|
||||
stripes = [
|
||||
"pv0", 0
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
swap {
|
||||
id = "c3tvxZ-wxk4-bX5H-MbOP-QXsP-BCEk-KjB120"
|
||||
status = ["READ", "WRITE", "VISIBLE"]
|
||||
flags = []
|
||||
creation_host = "cinder-17.domain.tld"
|
||||
creation_time = 1379337908 # 2013-09-16 13:25:08 +0000
|
||||
segment_count = 1
|
||||
|
||||
segment1 {
|
||||
start_extent = 0
|
||||
extent_count = 126 # 3.9375 Gigabytes
|
||||
|
||||
type = "striped"
|
||||
stripe_count = 1 # linear
|
||||
|
||||
stripes = [
|
||||
"pv0", 320
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,36 @@
|
||||
# Generated by LVM2 version 2.02.98(2)-RHEL6 (2012-10-15): Mon Sep 16 13:31:17 2013
|
||||
|
||||
contents = "Text Format Volume Group"
|
||||
version = 1
|
||||
|
||||
description = "Created *after* executing '/sbin/vgs --noheadings -o name --config 'log{command_names=0 prefix=\" \"}''"
|
||||
|
||||
creation_host = "cinder-17.domain.tld" # Linux cinder-17.domain.tld 2.6.32-358.6.2.el6.x86_64 #1 SMP Thu May 16 20:59:36 UTC 2013 x86_64
|
||||
creation_time = 1379338277 # Mon Sep 16 13:31:17 2013
|
||||
|
||||
cinder {
|
||||
id = "JkhZ33-QPjO-1JQb-I6v0-ryhN-N5Eo-yRXato"
|
||||
seqno = 1
|
||||
format = "lvm2" # informational
|
||||
status = ["RESIZEABLE", "READ", "WRITE"]
|
||||
flags = []
|
||||
extent_size = 65536 # 32 Megabytes
|
||||
max_lv = 0
|
||||
max_pv = 0
|
||||
metadata_copies = 0
|
||||
|
||||
physical_volumes {
|
||||
|
||||
pv0 {
|
||||
id = "xOUdQg-oWXY-q9wX-2wJH-0eYK-jD5t-SOdWuV"
|
||||
device = "/dev/sda3" # Hint only
|
||||
|
||||
status = ["ALLOCATABLE"]
|
||||
flags = []
|
||||
dev_size = 3801088 # 1.8125 Gigabytes
|
||||
pe_start = 2048
|
||||
pe_count = 57 # 1.78125 Gigabytes
|
||||
}
|
||||
}
|
||||
|
||||
}
|
80
config_samples/fuel_web/golden_fuelweb/cndr/lvm/backup/os
Normal file
80
config_samples/fuel_web/golden_fuelweb/cndr/lvm/backup/os
Normal file
@ -0,0 +1,80 @@
|
||||
# Generated by LVM2 version 2.02.98(2)-RHEL6 (2012-10-15): Mon Sep 16 13:31:17 2013
|
||||
|
||||
contents = "Text Format Volume Group"
|
||||
version = 1
|
||||
|
||||
description = "Created *after* executing '/sbin/vgs --noheadings -o name --config 'log{command_names=0 prefix=\" \"}''"
|
||||
|
||||
creation_host = "cinder-17.domain.tld" # Linux cinder-17.domain.tld 2.6.32-358.6.2.el6.x86_64 #1 SMP Thu May 16 20:59:36 UTC 2013 x86_64
|
||||
creation_time = 1379338277 # Mon Sep 16 13:31:17 2013
|
||||
|
||||
os {
|
||||
id = "uO3dCZ-rc1v-AFiY-np8m-dBrA-ABCJ-32tTbx"
|
||||
seqno = 3
|
||||
format = "lvm2" # informational
|
||||
status = ["RESIZEABLE", "READ", "WRITE"]
|
||||
flags = []
|
||||
extent_size = 65536 # 32 Megabytes
|
||||
max_lv = 0
|
||||
max_pv = 0
|
||||
metadata_copies = 0
|
||||
|
||||
physical_volumes {
|
||||
|
||||
pv0 {
|
||||
id = "e9BcjU-6mIp-df6W-H46z-Fd70-2JmT-in6sEe"
|
||||
device = "/dev/sda2" # Hint only
|
||||
|
||||
status = ["ALLOCATABLE"]
|
||||
flags = []
|
||||
dev_size = 29321216 # 13.9814 Gigabytes
|
||||
pe_start = 2048
|
||||
pe_count = 447 # 13.9688 Gigabytes
|
||||
}
|
||||
}
|
||||
|
||||
logical_volumes {
|
||||
|
||||
root {
|
||||
id = "X0t3ds-557Y-3E9D-6KTT-d5fc-l0s1-W3hyLl"
|
||||
status = ["READ", "WRITE", "VISIBLE"]
|
||||
flags = []
|
||||
creation_host = "cinder-17.domain.tld"
|
||||
creation_time = 1379337901 # 2013-09-16 13:25:01 +0000
|
||||
segment_count = 1
|
||||
|
||||
segment1 {
|
||||
start_extent = 0
|
||||
extent_count = 320 # 10 Gigabytes
|
||||
|
||||
type = "striped"
|
||||
stripe_count = 1 # linear
|
||||
|
||||
stripes = [
|
||||
"pv0", 0
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
swap {
|
||||
id = "c3tvxZ-wxk4-bX5H-MbOP-QXsP-BCEk-KjB120"
|
||||
status = ["READ", "WRITE", "VISIBLE"]
|
||||
flags = []
|
||||
creation_host = "cinder-17.domain.tld"
|
||||
creation_time = 1379337908 # 2013-09-16 13:25:08 +0000
|
||||
segment_count = 1
|
||||
|
||||
segment1 {
|
||||
start_extent = 0
|
||||
extent_count = 126 # 3.9375 Gigabytes
|
||||
|
||||
type = "striped"
|
||||
stripe_count = 1 # linear
|
||||
|
||||
stripes = [
|
||||
"pv0", 320
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
843
config_samples/fuel_web/golden_fuelweb/cndr/lvm/lvm.conf
Normal file
843
config_samples/fuel_web/golden_fuelweb/cndr/lvm/lvm.conf
Normal file
@ -0,0 +1,843 @@
|
||||
# This is an example configuration file for the LVM2 system.
|
||||
# It contains the default settings that would be used if there was no
|
||||
# /etc/lvm/lvm.conf file.
|
||||
#
|
||||
# Refer to 'man lvm.conf' for further information including the file layout.
|
||||
#
|
||||
# To put this file in a different directory and override /etc/lvm set
|
||||
# the environment variable LVM_SYSTEM_DIR before running the tools.
|
||||
#
|
||||
# N.B. Take care that each setting only appears once if uncommenting
|
||||
# example settings in this file.
|
||||
|
||||
|
||||
# This section allows you to configure which block devices should
|
||||
# be used by the LVM system.
|
||||
devices {
|
||||
|
||||
# Where do you want your volume groups to appear ?
|
||||
dir = "/dev"
|
||||
|
||||
# An array of directories that contain the device nodes you wish
|
||||
# to use with LVM2.
|
||||
scan = [ "/dev" ]
|
||||
|
||||
# If set, the cache of block device nodes with all associated symlinks
|
||||
# will be constructed out of the existing udev database content.
|
||||
# This avoids using and opening any inapplicable non-block devices or
|
||||
# subdirectories found in the device directory. This setting is applied
|
||||
# to udev-managed device directory only, other directories will be scanned
|
||||
# fully. LVM2 needs to be compiled with udev support for this setting to
|
||||
# take effect. N.B. Any device node or symlink not managed by udev in
|
||||
# udev directory will be ignored with this setting on.
|
||||
obtain_device_list_from_udev = 1
|
||||
|
||||
# If several entries in the scanned directories correspond to the
|
||||
# same block device and the tools need to display a name for device,
|
||||
# all the pathnames are matched against each item in the following
|
||||
# list of regular expressions in turn and the first match is used.
|
||||
# preferred_names = [ ]
|
||||
|
||||
# Try to avoid using undescriptive /dev/dm-N names, if present.
|
||||
preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
|
||||
|
||||
# A filter that tells LVM2 to only use a restricted set of devices.
|
||||
# The filter consists of an array of regular expressions. These
|
||||
# expressions can be delimited by a character of your choice, and
|
||||
# prefixed with either an 'a' (for accept) or 'r' (for reject).
|
||||
# The first expression found to match a device name determines if
|
||||
# the device will be accepted or rejected (ignored). Devices that
|
||||
# don't match any patterns are accepted.
|
||||
|
||||
# Be careful if there there are symbolic links or multiple filesystem
|
||||
# entries for the same device as each name is checked separately against
|
||||
# the list of patterns. The effect is that if the first pattern in the
|
||||
# list to match a name is an 'a' pattern for any of the names, the device
|
||||
# is accepted; otherwise if the first pattern in the list to match a name
|
||||
# is an 'r' pattern for any of the names it is rejected; otherwise it is
|
||||
# accepted.
|
||||
|
||||
# Don't have more than one filter line active at once: only one gets used.
|
||||
|
||||
# Run vgscan after you change this parameter to ensure that
|
||||
# the cache file gets regenerated (see below).
|
||||
# If it doesn't do what you expect, check the output of 'vgscan -vvvv'.
|
||||
|
||||
|
||||
# By default we accept every block device:
|
||||
filter = [ "a/.*/" ]
|
||||
|
||||
# Exclude the cdrom drive
|
||||
# filter = [ "r|/dev/cdrom|" ]
|
||||
|
||||
# When testing I like to work with just loopback devices:
|
||||
# filter = [ "a/loop/", "r/.*/" ]
|
||||
|
||||
# Or maybe all loops and ide drives except hdc:
|
||||
# filter =[ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
|
||||
|
||||
# Use anchors if you want to be really specific
|
||||
# filter = [ "a|^/dev/hda8$|", "r/.*/" ]
|
||||
|
||||
# Since "filter" is often overriden from command line, it is not suitable
|
||||
# for system-wide device filtering (udev rules, lvmetad). To hide devices
|
||||
# from LVM-specific udev processing and/or from lvmetad, you need to set
|
||||
# global_filter. The syntax is the same as for normal "filter"
|
||||
# above. Devices that fail the global_filter are not even opened by LVM.
|
||||
|
||||
# global_filter = []
|
||||
|
||||
# The results of the filtering are cached on disk to avoid
|
||||
# rescanning dud devices (which can take a very long time).
|
||||
# By default this cache is stored in the /etc/lvm/cache directory
|
||||
# in a file called '.cache'.
|
||||
# It is safe to delete the contents: the tools regenerate it.
|
||||
# (The old setting 'cache' is still respected if neither of
|
||||
# these new ones is present.)
|
||||
# N.B. If obtain_device_list_from_udev is set to 1 the list of
|
||||
# devices is instead obtained from udev and any existing .cache
|
||||
# file is removed.
|
||||
cache_dir = "/etc/lvm/cache"
|
||||
cache_file_prefix = ""
|
||||
|
||||
# You can turn off writing this cache file by setting this to 0.
|
||||
write_cache_state = 1
|
||||
|
||||
# Advanced settings.
|
||||
|
||||
# List of pairs of additional acceptable block device types found
|
||||
# in /proc/devices with maximum (non-zero) number of partitions.
|
||||
# types = [ "fd", 16 ]
|
||||
|
||||
# If sysfs is mounted (2.6 kernels) restrict device scanning to
|
||||
# the block devices it believes are valid.
|
||||
# 1 enables; 0 disables.
|
||||
sysfs_scan = 1
|
||||
|
||||
# By default, LVM2 will ignore devices used as component paths
|
||||
# of device-mapper multipath devices.
|
||||
# 1 enables; 0 disables.
|
||||
multipath_component_detection = 1
|
||||
|
||||
# By default, LVM2 will ignore devices used as components of
|
||||
# software RAID (md) devices by looking for md superblocks.
|
||||
# 1 enables; 0 disables.
|
||||
md_component_detection = 1
|
||||
|
||||
# By default, if a PV is placed directly upon an md device, LVM2
|
||||
# will align its data blocks with the md device's stripe-width.
|
||||
# 1 enables; 0 disables.
|
||||
md_chunk_alignment = 1
|
||||
|
||||
# Default alignment of the start of a data area in MB. If set to 0,
|
||||
# a value of 64KB will be used. Set to 1 for 1MiB, 2 for 2MiB, etc.
|
||||
# default_data_alignment = 1
|
||||
|
||||
# By default, the start of a PV's data area will be a multiple of
|
||||
# the 'minimum_io_size' or 'optimal_io_size' exposed in sysfs.
|
||||
# - minimum_io_size - the smallest request the device can perform
|
||||
# w/o incurring a read-modify-write penalty (e.g. MD's chunk size)
|
||||
# - optimal_io_size - the device's preferred unit of receiving I/O
|
||||
# (e.g. MD's stripe width)
|
||||
# minimum_io_size is used if optimal_io_size is undefined (0).
|
||||
# If md_chunk_alignment is enabled, that detects the optimal_io_size.
|
||||
# This setting takes precedence over md_chunk_alignment.
|
||||
# 1 enables; 0 disables.
|
||||
data_alignment_detection = 1
|
||||
|
||||
# Alignment (in KB) of start of data area when creating a new PV.
|
||||
# md_chunk_alignment and data_alignment_detection are disabled if set.
|
||||
# Set to 0 for the default alignment (see: data_alignment_default)
|
||||
# or page size, if larger.
|
||||
data_alignment = 0
|
||||
|
||||
# By default, the start of the PV's aligned data area will be shifted by
|
||||
# the 'alignment_offset' exposed in sysfs. This offset is often 0 but
|
||||
# may be non-zero; e.g.: certain 4KB sector drives that compensate for
|
||||
# windows partitioning will have an alignment_offset of 3584 bytes
|
||||
# (sector 7 is the lowest aligned logical block, the 4KB sectors start
|
||||
# at LBA -1, and consequently sector 63 is aligned on a 4KB boundary).
|
||||
# But note that pvcreate --dataalignmentoffset will skip this detection.
|
||||
# 1 enables; 0 disables.
|
||||
data_alignment_offset_detection = 1
|
||||
|
||||
# If, while scanning the system for PVs, LVM2 encounters a device-mapper
|
||||
# device that has its I/O suspended, it waits for it to become accessible.
|
||||
# Set this to 1 to skip such devices. This should only be needed
|
||||
# in recovery situations.
|
||||
ignore_suspended_devices = 0
|
||||
|
||||
# During each LVM operation errors received from each device are counted.
|
||||
# If the counter of a particular device exceeds the limit set here, no
|
||||
# further I/O is sent to that device for the remainder of the respective
|
||||
# operation. Setting the parameter to 0 disables the counters altogether.
|
||||
disable_after_error_count = 0
|
||||
|
||||
# Allow use of pvcreate --uuid without requiring --restorefile.
|
||||
require_restorefile_with_uuid = 1
|
||||
|
||||
# Minimum size (in KB) of block devices which can be used as PVs.
|
||||
# In a clustered environment all nodes must use the same value.
|
||||
# Any value smaller than 512KB is ignored.
|
||||
|
||||
# Ignore devices smaller than 2MB such as floppy drives.
|
||||
pv_min_size = 2048
|
||||
|
||||
# The original built-in setting was 512 up to and including version 2.02.84.
|
||||
# pv_min_size = 512
|
||||
|
||||
# Issue discards to a logical volumes's underlying physical volume(s) when
|
||||
# the logical volume is no longer using the physical volumes' space (e.g.
|
||||
# lvremove, lvreduce, etc). Discards inform the storage that a region is
|
||||
# no longer in use. Storage that supports discards advertise the protocol
|
||||
# specific way discards should be issued by the kernel (TRIM, UNMAP, or
|
||||
# WRITE SAME with UNMAP bit set). Not all storage will support or benefit
|
||||
# from discards but SSDs and thinly provisioned LUNs generally do. If set
|
||||
# to 1, discards will only be issued if both the storage and kernel provide
|
||||
# support.
|
||||
# 1 enables; 0 disables.
|
||||
issue_discards = 0
|
||||
}
|
||||
|
||||
# This section allows you to configure the way in which LVM selects
|
||||
# free space for its Logical Volumes.
|
||||
allocation {
|
||||
|
||||
# When searching for free space to extend an LV, the "cling"
|
||||
# allocation policy will choose space on the same PVs as the last
|
||||
# segment of the existing LV. If there is insufficient space and a
|
||||
# list of tags is defined here, it will check whether any of them are
|
||||
# attached to the PVs concerned and then seek to match those PV tags
|
||||
# between existing extents and new extents.
|
||||
# Use the special tag "@*" as a wildcard to match any PV tag.
|
||||
|
||||
# Example: LVs are mirrored between two sites within a single VG.
|
||||
# PVs are tagged with either @site1 or @site2 to indicate where
|
||||
# they are situated.
|
||||
|
||||
# cling_tag_list = [ "@site1", "@site2" ]
|
||||
# cling_tag_list = [ "@*" ]
|
||||
|
||||
# Changes made in version 2.02.85 extended the reach of the 'cling'
|
||||
# policies to detect more situations where data can be grouped
|
||||
# onto the same disks. Set this to 0 to revert to the previous
|
||||
# algorithm.
|
||||
maximise_cling = 1
|
||||
|
||||
# Set to 1 to guarantee that mirror logs will always be placed on
|
||||
# different PVs from the mirror images. This was the default
|
||||
# until version 2.02.85.
|
||||
mirror_logs_require_separate_pvs = 0
|
||||
|
||||
# Set to 1 to guarantee that thin pool metadata will always
|
||||
# be placed on different PVs from the pool data.
|
||||
thin_pool_metadata_require_separate_pvs = 0
|
||||
|
||||
# Specify the minimal chunk size (in KB) for thin pool volumes.
|
||||
# Use of the larger chunk size may improve perfomance for plain
|
||||
# thin volumes, however using them for snapshot volumes is less efficient,
|
||||
# as it consumes more space and takes extra time for copying.
|
||||
# When unset, lvm tries to estimate chunk size starting from 64KB
|
||||
# Supported values are in range from 64 to 1048576.
|
||||
# thin_pool_chunk_size = 64
|
||||
|
||||
# Specify discards behavior of the thin pool volume.
|
||||
# Select one of "ignore", "nopassdown", "passdown"
|
||||
# thin_pool_discards = "passdown"
|
||||
|
||||
# Set to 0, to disable zeroing of thin pool data chunks before their
|
||||
# first use.
|
||||
# N.B. zeroing larger thin pool chunk size degrades performance.
|
||||
# thin_pool_zero = 1
|
||||
}
|
||||
|
||||
# This section that allows you to configure the nature of the
|
||||
# information that LVM2 reports.
|
||||
log {
|
||||
|
||||
# Controls the messages sent to stdout or stderr.
|
||||
# There are three levels of verbosity, 3 being the most verbose.
|
||||
verbose = 0
|
||||
|
||||
# Set to 1 to suppress all non-essential messages from stdout.
|
||||
# This has the same effect as -qq.
|
||||
# When this is set, the following commands still produce output:
|
||||
# dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
|
||||
# pvs, version, vgcfgrestore -l, vgdisplay, vgs.
|
||||
# Non-essential messages are shifted from log level 4 to log level 5
|
||||
# for syslog and lvm2_log_fn purposes.
|
||||
# Any 'yes' or 'no' questions not overridden by other arguments
|
||||
# are suppressed and default to 'no'.
|
||||
silent = 0
|
||||
|
||||
# Should we send log messages through syslog?
|
||||
# 1 is yes; 0 is no.
|
||||
syslog = 1
|
||||
|
||||
# Should we log error and debug messages to a file?
|
||||
# By default there is no log file.
|
||||
#file = "/var/log/lvm2.log"
|
||||
|
||||
# Should we overwrite the log file each time the program is run?
|
||||
# By default we append.
|
||||
overwrite = 0
|
||||
|
||||
# What level of log messages should we send to the log file and/or syslog?
|
||||
# There are 6 syslog-like log levels currently in use - 2 to 7 inclusive.
|
||||
# 7 is the most verbose (LOG_DEBUG).
|
||||
level = 0
|
||||
|
||||
# Format of output messages
|
||||
# Whether or not (1 or 0) to indent messages according to their severity
|
||||
indent = 1
|
||||
|
||||
# Whether or not (1 or 0) to display the command name on each line output
|
||||
command_names = 0
|
||||
|
||||
# A prefix to use before the message text (but after the command name,
|
||||
# if selected). Default is two spaces, so you can see/grep the severity
|
||||
# of each message.
|
||||
prefix = " "
|
||||
|
||||
# To make the messages look similar to the original LVM tools use:
|
||||
# indent = 0
|
||||
# command_names = 1
|
||||
# prefix = " -- "
|
||||
|
||||
# Set this if you want log messages during activation.
|
||||
# Don't use this in low memory situations (can deadlock).
|
||||
# activation = 0
|
||||
}
|
||||
|
||||
# Configuration of metadata backups and archiving. In LVM2 when we
|
||||
# talk about a 'backup' we mean making a copy of the metadata for the
|
||||
# *current* system. The 'archive' contains old metadata configurations.
|
||||
# Backups are stored in a human readeable text format.
|
||||
backup {
|
||||
|
||||
# Should we maintain a backup of the current metadata configuration ?
|
||||
# Use 1 for Yes; 0 for No.
|
||||
# Think very hard before turning this off!
|
||||
backup = 1
|
||||
|
||||
# Where shall we keep it ?
|
||||
# Remember to back up this directory regularly!
|
||||
backup_dir = "/etc/lvm/backup"
|
||||
|
||||
# Should we maintain an archive of old metadata configurations.
|
||||
# Use 1 for Yes; 0 for No.
|
||||
# On by default. Think very hard before turning this off.
|
||||
archive = 1
|
||||
|
||||
# Where should archived files go ?
|
||||
# Remember to back up this directory regularly!
|
||||
archive_dir = "/etc/lvm/archive"
|
||||
|
||||
# What is the minimum number of archive files you wish to keep ?
|
||||
retain_min = 10
|
||||
|
||||
# What is the minimum time you wish to keep an archive file for ?
|
||||
retain_days = 30
|
||||
}
|
||||
|
||||
# Settings for the running LVM2 in shell (readline) mode.
|
||||
shell {
|
||||
|
||||
# Number of lines of history to store in ~/.lvm_history
|
||||
history_size = 100
|
||||
}
|
||||
|
||||
|
||||
# Miscellaneous global LVM2 settings
|
||||
global {
|
||||
|
||||
# The file creation mask for any files and directories created.
|
||||
# Interpreted as octal if the first digit is zero.
|
||||
umask = 077
|
||||
|
||||
# Allow other users to read the files
|
||||
#umask = 022
|
||||
|
||||
# Enabling test mode means that no changes to the on disk metadata
|
||||
# will be made. Equivalent to having the -t option on every
|
||||
# command. Defaults to off.
|
||||
test = 0
|
||||
|
||||
# Default value for --units argument
|
||||
units = "h"
|
||||
|
||||
# Since version 2.02.54, the tools distinguish between powers of
|
||||
# 1024 bytes (e.g. KiB, MiB, GiB) and powers of 1000 bytes (e.g.
|
||||
# KB, MB, GB).
|
||||
# If you have scripts that depend on the old behaviour, set this to 0
|
||||
# temporarily until you update them.
|
||||
si_unit_consistency = 1
|
||||
|
||||
# Whether or not to communicate with the kernel device-mapper.
|
||||
# Set to 0 if you want to use the tools to manipulate LVM metadata
|
||||
# without activating any logical volumes.
|
||||
# If the device-mapper kernel driver is not present in your kernel
|
||||
# setting this to 0 should suppress the error messages.
|
||||
activation = 1
|
||||
|
||||
# If we can't communicate with device-mapper, should we try running
|
||||
# the LVM1 tools?
|
||||
# This option only applies to 2.4 kernels and is provided to help you
|
||||
# switch between device-mapper kernels and LVM1 kernels.
|
||||
# The LVM1 tools need to be installed with .lvm1 suffices
|
||||
# e.g. vgscan.lvm1 and they will stop working after you start using
|
||||
# the new lvm2 on-disk metadata format.
|
||||
# The default value is set when the tools are built.
|
||||
# fallback_to_lvm1 = 0
|
||||
|
||||
# The default metadata format that commands should use - "lvm1" or "lvm2".
|
||||
# The command line override is -M1 or -M2.
|
||||
# Defaults to "lvm2".
|
||||
# format = "lvm2"
|
||||
|
||||
# Location of proc filesystem
|
||||
proc = "/proc"
|
||||
|
||||
# Type of locking to use. Defaults to local file-based locking (1).
|
||||
# Turn locking off by setting to 0 (dangerous: risks metadata corruption
|
||||
# if LVM2 commands get run concurrently).
|
||||
# Type 2 uses the external shared library locking_library.
|
||||
# Type 3 uses built-in clustered locking.
|
||||
# Type 4 uses read-only locking which forbids any operations that might
|
||||
# change metadata.
|
||||
locking_type = 1
|
||||
|
||||
# Set to 0 to fail when a lock request cannot be satisfied immediately.
|
||||
wait_for_locks = 1
|
||||
|
||||
# If using external locking (type 2) and initialisation fails,
|
||||
# with this set to 1 an attempt will be made to use the built-in
|
||||
# clustered locking.
|
||||
# If you are using a customised locking_library you should set this to 0.
|
||||
fallback_to_clustered_locking = 1
|
||||
|
||||
# If an attempt to initialise type 2 or type 3 locking failed, perhaps
|
||||
# because cluster components such as clvmd are not running, with this set
|
||||
# to 1 an attempt will be made to use local file-based locking (type 1).
|
||||
# If this succeeds, only commands against local volume groups will proceed.
|
||||
# Volume Groups marked as clustered will be ignored.
|
||||
fallback_to_local_locking = 1
|
||||
|
||||
# Local non-LV directory that holds file-based locks while commands are
|
||||
# in progress. A directory like /tmp that may get wiped on reboot is OK.
|
||||
locking_dir = "/var/lock/lvm"
|
||||
|
||||
# Whenever there are competing read-only and read-write access requests for
|
||||
# a volume group's metadata, instead of always granting the read-only
|
||||
# requests immediately, delay them to allow the read-write requests to be
|
||||
# serviced. Without this setting, write access may be stalled by a high
|
||||
# volume of read-only requests.
|
||||
# NB. This option only affects locking_type = 1 viz. local file-based
|
||||
# locking.
|
||||
prioritise_write_locks = 1
|
||||
|
||||
# Other entries can go here to allow you to load shared libraries
|
||||
# e.g. if support for LVM1 metadata was compiled as a shared library use
|
||||
# format_libraries = "liblvm2format1.so"
|
||||
# Full pathnames can be given.
|
||||
|
||||
# Search this directory first for shared libraries.
|
||||
# library_dir = "/lib"
|
||||
|
||||
# The external locking library to load if locking_type is set to 2.
|
||||
# locking_library = "liblvm2clusterlock.so"
|
||||
|
||||
# Treat any internal errors as fatal errors, aborting the process that
|
||||
# encountered the internal error. Please only enable for debugging.
|
||||
abort_on_internal_errors = 0
|
||||
|
||||
# Check whether CRC is matching when parsed VG is used multiple times.
|
||||
# This is useful to catch unexpected internal cached volume group
|
||||
# structure modification. Please only enable for debugging.
|
||||
detect_internal_vg_cache_corruption = 0
|
||||
|
||||
# If set to 1, no operations that change on-disk metadata will be permitted.
|
||||
# Additionally, read-only commands that encounter metadata in need of repair
|
||||
# will still be allowed to proceed exactly as if the repair had been
|
||||
# performed (except for the unchanged vg_seqno).
|
||||
# Inappropriate use could mess up your system, so seek advice first!
|
||||
metadata_read_only = 0
|
||||
|
||||
# 'mirror_segtype_default' defines which segtype will be used when the
|
||||
# shorthand '-m' option is used for mirroring. The possible options are:
|
||||
#
|
||||
# "mirror" - The original RAID1 implementation provided by LVM2/DM. It is
|
||||
# characterized by a flexible log solution (core, disk, mirrored)
|
||||
# and by the necessity to block I/O while reconfiguring in the
|
||||
# event of a failure.
|
||||
#
|
||||
# There is an inherent race in the dmeventd failure handling
|
||||
# logic with snapshots of devices using this type of RAID1 that
|
||||
# in the worst case could cause a deadlock.
|
||||
# Ref: https://bugzilla.redhat.com/show_bug.cgi?id=817130#c10
|
||||
#
|
||||
# "raid1" - This implementation leverages MD's RAID1 personality through
|
||||
# device-mapper. It is characterized by a lack of log options.
|
||||
# (A log is always allocated for every device and they are placed
|
||||
# on the same device as the image - no separate devices are
|
||||
# required.) This mirror implementation does not require I/O
|
||||
# to be blocked in the kernel in the event of a failure.
|
||||
# This mirror implementation is not cluster-aware and cannot be
|
||||
# used in a shared (active/active) fashion in a cluster.
|
||||
#
|
||||
# Specify the '--type <mirror|raid1>' option to override this default
|
||||
# setting.
|
||||
mirror_segtype_default = "mirror"
|
||||
|
||||
# The default format for displaying LV names in lvdisplay was changed
|
||||
# in version 2.02.89 to show the LV name and path separately.
|
||||
# Previously this was always shown as /dev/vgname/lvname even when that
|
||||
# was never a valid path in the /dev filesystem.
|
||||
# Set to 1 to reinstate the previous format.
|
||||
#
|
||||
# lvdisplay_shows_full_device_path = 0
|
||||
|
||||
# Whether to use (trust) a running instance of lvmetad. If this is set to
|
||||
# 0, all commands fall back to the usual scanning mechanisms. When set to 1
|
||||
# *and* when lvmetad is running (it is not auto-started), the volume group
|
||||
# metadata and PV state flags are obtained from the lvmetad instance and no
|
||||
# scanning is done by the individual commands. In a setup with lvmetad,
|
||||
# lvmetad udev rules *must* be set up for LVM to work correctly. Without
|
||||
# proper udev rules, all changes in block device configuration will be
|
||||
# *ignored* until a manual 'pvscan --cache' is performed.
|
||||
#
|
||||
# If lvmetad has been running while use_lvmetad was 0, it MUST be stopped
|
||||
# before changing use_lvmetad to 1 and started again afterwards.
|
||||
use_lvmetad = 0
|
||||
|
||||
# Full path of the utility called to check that a thin metadata device
|
||||
# is in a state that allows it to be used.
|
||||
# Each time a thin pool needs to be activated or after it is deactivated
|
||||
# this utility is executed. The activation will only proceed if the utility
|
||||
# has an exit status of 0.
|
||||
# Set to "" to skip this check. (Not recommended.)
|
||||
# The thin tools are available as part of the device-mapper-persistent-data
|
||||
# package from https://github.com/jthornber/thin-provisioning-tools.
|
||||
#
|
||||
thin_check_executable = "/usr/sbin/thin_check"
|
||||
|
||||
# String with options passed with thin_check command. By default,
|
||||
# option '-q' is for quiet output.
|
||||
thin_check_options = [ "-q" ]
|
||||
|
||||
# If set, given features are not used by thin driver.
|
||||
# This can be helpful not just for testing, but i.e. allows to avoid
|
||||
# using problematic implementation of some thin feature.
|
||||
# Features:
|
||||
# block_size
|
||||
# discards
|
||||
# discards_non_power_2
|
||||
#
|
||||
# thin_disabled_features = [ "discards", "block_size" ]
|
||||
}
|
||||
|
||||
activation {
|
||||
# Set to 1 to perform internal checks on the operations issued to
|
||||
# libdevmapper. Useful for debugging problems with activation.
|
||||
# Some of the checks may be expensive, so it's best to use this
|
||||
# only when there seems to be a problem.
|
||||
checks = 0
|
||||
|
||||
# Set to 0 to disable udev synchronisation (if compiled into the binaries).
|
||||
# Processes will not wait for notification from udev.
|
||||
# They will continue irrespective of any possible udev processing
|
||||
# in the background. You should only use this if udev is not running
|
||||
# or has rules that ignore the devices LVM2 creates.
|
||||
# The command line argument --nodevsync takes precedence over this setting.
|
||||
# If set to 1 when udev is not running, and there are LVM2 processes
|
||||
# waiting for udev, run 'dmsetup udevcomplete_all' manually to wake them up.
|
||||
udev_sync = 1
|
||||
|
||||
# Set to 0 to disable the udev rules installed by LVM2 (if built with
|
||||
# --enable-udev_rules). LVM2 will then manage the /dev nodes and symlinks
|
||||
# for active logical volumes directly itself.
|
||||
# N.B. Manual intervention may be required if this setting is changed
|
||||
# while any logical volumes are active.
|
||||
udev_rules = 1
|
||||
|
||||
# Set to 1 for LVM2 to verify operations performed by udev. This turns on
|
||||
# additional checks (and if necessary, repairs) on entries in the device
|
||||
# directory after udev has completed processing its events.
|
||||
# Useful for diagnosing problems with LVM2/udev interactions.
|
||||
verify_udev_operations = 0
|
||||
|
||||
# If set to 1 and if deactivation of an LV fails, perhaps because
|
||||
# a process run from a quick udev rule temporarily opened the device,
|
||||
# retry the operation for a few seconds before failing.
|
||||
retry_deactivation = 1
|
||||
|
||||
# How to fill in missing stripes if activating an incomplete volume.
|
||||
# Using "error" will make inaccessible parts of the device return
|
||||
# I/O errors on access. You can instead use a device path, in which
|
||||
# case, that device will be used to in place of missing stripes.
|
||||
# But note that using anything other than "error" with mirrored
|
||||
# or snapshotted volumes is likely to result in data corruption.
|
||||
missing_stripe_filler = "error"
|
||||
|
||||
# The linear target is an optimised version of the striped target
|
||||
# that only handles a single stripe. Set this to 0 to disable this
|
||||
# optimisation and always use the striped target.
|
||||
use_linear_target = 1
|
||||
|
||||
# How much stack (in KB) to reserve for use while devices suspended
|
||||
# Prior to version 2.02.89 this used to be set to 256KB
|
||||
reserved_stack = 64
|
||||
|
||||
# How much memory (in KB) to reserve for use while devices suspended
|
||||
reserved_memory = 8192
|
||||
|
||||
# Nice value used while devices suspended
|
||||
process_priority = -18
|
||||
|
||||
# If volume_list is defined, each LV is only activated if there is a
|
||||
# match against the list.
|
||||
# "vgname" and "vgname/lvname" are matched exactly.
|
||||
# "@tag" matches any tag set in the LV or VG.
|
||||
# "@*" matches if any tag defined on the host is also set in the LV or VG
|
||||
#
|
||||
# volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
|
||||
|
||||
# If auto_activation_volume_list is defined, each LV that is to be
|
||||
# activated is checked against the list while using the autoactivation
|
||||
# option (--activate ay/-a ay), and if it matches, it is activated.
|
||||
# "vgname" and "vgname/lvname" are matched exactly.
|
||||
# "@tag" matches any tag set in the LV or VG.
|
||||
# "@*" matches if any tag defined on the host is also set in the LV or VG
|
||||
#
|
||||
# auto_activation_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
|
||||
|
||||
# If read_only_volume_list is defined, each LV that is to be activated
|
||||
# is checked against the list, and if it matches, it as activated
|
||||
# in read-only mode. (This overrides '--permission rw' stored in the
|
||||
# metadata.)
|
||||
# "vgname" and "vgname/lvname" are matched exactly.
|
||||
# "@tag" matches any tag set in the LV or VG.
|
||||
# "@*" matches if any tag defined on the host is also set in the LV or VG
|
||||
#
|
||||
# read_only_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
|
||||
|
||||
# Size (in KB) of each copy operation when mirroring
|
||||
mirror_region_size = 512
|
||||
|
||||
# Setting to use when there is no readahead value stored in the metadata.
|
||||
#
|
||||
# "none" - Disable readahead.
|
||||
# "auto" - Use default value chosen by kernel.
|
||||
readahead = "auto"
|
||||
|
||||
# 'raid_fault_policy' defines how a device failure in a RAID logical
|
||||
# volume is handled. This includes logical volumes that have the following
|
||||
# segment types: raid1, raid4, raid5*, and raid6*.
|
||||
#
|
||||
# In the event of a failure, the following policies will determine what
|
||||
# actions are performed during the automated response to failures (when
|
||||
# dmeventd is monitoring the RAID logical volume) and when 'lvconvert' is
|
||||
# called manually with the options '--repair' and '--use-policies'.
|
||||
#
|
||||
# "warn" - Use the system log to warn the user that a device in the RAID
|
||||
# logical volume has failed. It is left to the user to run
|
||||
# 'lvconvert --repair' manually to remove or replace the failed
|
||||
# device. As long as the number of failed devices does not
|
||||
# exceed the redundancy of the logical volume (1 device for
|
||||
# raid4/5, 2 for raid6, etc) the logical volume will remain
|
||||
# usable.
|
||||
#
|
||||
# "allocate" - Attempt to use any extra physical volumes in the volume
|
||||
# group as spares and replace faulty devices.
|
||||
#
|
||||
raid_fault_policy = "warn"
|
||||
|
||||
# 'mirror_image_fault_policy' and 'mirror_log_fault_policy' define
|
||||
# how a device failure affecting a mirror (of "mirror" segment type) is
|
||||
# handled. A mirror is composed of mirror images (copies) and a log.
|
||||
# A disk log ensures that a mirror does not need to be re-synced
|
||||
# (all copies made the same) every time a machine reboots or crashes.
|
||||
#
|
||||
# In the event of a failure, the specified policy will be used to determine
|
||||
# what happens. This applies to automatic repairs (when the mirror is being
|
||||
# monitored by dmeventd) and to manual lvconvert --repair when
|
||||
# --use-policies is given.
|
||||
#
|
||||
# "remove" - Simply remove the faulty device and run without it. If
|
||||
# the log device fails, the mirror would convert to using
|
||||
# an in-memory log. This means the mirror will not
|
||||
# remember its sync status across crashes/reboots and
|
||||
# the entire mirror will be re-synced. If a
|
||||
# mirror image fails, the mirror will convert to a
|
||||
# non-mirrored device if there is only one remaining good
|
||||
# copy.
|
||||
#
|
||||
# "allocate" - Remove the faulty device and try to allocate space on
|
||||
# a new device to be a replacement for the failed device.
|
||||
# Using this policy for the log is fast and maintains the
|
||||
# ability to remember sync state through crashes/reboots.
|
||||
# Using this policy for a mirror device is slow, as it
|
||||
# requires the mirror to resynchronize the devices, but it
|
||||
# will preserve the mirror characteristic of the device.
|
||||
# This policy acts like "remove" if no suitable device and
|
||||
# space can be allocated for the replacement.
|
||||
#
|
||||
# "allocate_anywhere" - Not yet implemented. Useful to place the log device
|
||||
# temporarily on same physical volume as one of the mirror
|
||||
# images. This policy is not recommended for mirror devices
|
||||
# since it would break the redundant nature of the mirror. This
|
||||
# policy acts like "remove" if no suitable device and space can
|
||||
# be allocated for the replacement.
|
||||
|
||||
mirror_log_fault_policy = "allocate"
|
||||
mirror_image_fault_policy = "remove"
|
||||
|
||||
# 'snapshot_autoextend_threshold' and 'snapshot_autoextend_percent' define
|
||||
# how to handle automatic snapshot extension. The former defines when the
|
||||
# snapshot should be extended: when its space usage exceeds this many
|
||||
# percent. The latter defines how much extra space should be allocated for
|
||||
# the snapshot, in percent of its current size.
|
||||
#
|
||||
# For example, if you set snapshot_autoextend_threshold to 70 and
|
||||
# snapshot_autoextend_percent to 20, whenever a snapshot exceeds 70% usage,
|
||||
# it will be extended by another 20%. For a 1G snapshot, using up 700M will
|
||||
# trigger a resize to 1.2G. When the usage exceeds 840M, the snapshot will
|
||||
# be extended to 1.44G, and so on.
|
||||
#
|
||||
# Setting snapshot_autoextend_threshold to 100 disables automatic
|
||||
# extensions. The minimum value is 50 (A setting below 50 will be treated
|
||||
# as 50).
|
||||
|
||||
snapshot_autoextend_threshold = 100
|
||||
snapshot_autoextend_percent = 20
|
||||
|
||||
# 'thin_pool_autoextend_threshold' and 'thin_pool_autoextend_percent' define
|
||||
# how to handle automatic pool extension. The former defines when the
|
||||
# pool should be extended: when its space usage exceeds this many
|
||||
# percent. The latter defines how much extra space should be allocated for
|
||||
# the pool, in percent of its current size.
|
||||
#
|
||||
# For example, if you set thin_pool_autoextend_threshold to 70 and
|
||||
# thin_pool_autoextend_percent to 20, whenever a pool exceeds 70% usage,
|
||||
# it will be extended by another 20%. For a 1G pool, using up 700M will
|
||||
# trigger a resize to 1.2G. When the usage exceeds 840M, the pool will
|
||||
# be extended to 1.44G, and so on.
|
||||
#
|
||||
# Setting thin_pool_autoextend_threshold to 100 disables automatic
|
||||
# extensions. The minimum value is 50 (A setting below 50 will be treated
|
||||
# as 50).
|
||||
|
||||
thin_pool_autoextend_threshold = 100
|
||||
thin_pool_autoextend_percent = 20
|
||||
|
||||
# While activating devices, I/O to devices being (re)configured is
|
||||
# suspended, and as a precaution against deadlocks, LVM2 needs to pin
|
||||
# any memory it is using so it is not paged out. Groups of pages that
|
||||
# are known not to be accessed during activation need not be pinned
|
||||
# into memory. Each string listed in this setting is compared against
|
||||
# each line in /proc/self/maps, and the pages corresponding to any
|
||||
# lines that match are not pinned. On some systems locale-archive was
|
||||
# found to make up over 80% of the memory used by the process.
|
||||
# mlock_filter = [ "locale/locale-archive", "gconv/gconv-modules.cache" ]
|
||||
|
||||
# Set to 1 to revert to the default behaviour prior to version 2.02.62
|
||||
# which used mlockall() to pin the whole process's memory while activating
|
||||
# devices.
|
||||
use_mlockall = 0
|
||||
|
||||
# Monitoring is enabled by default when activating logical volumes.
|
||||
# Set to 0 to disable monitoring or use the --ignoremonitoring option.
|
||||
monitoring = 1
|
||||
|
||||
# When pvmove or lvconvert must wait for the kernel to finish
|
||||
# synchronising or merging data, they check and report progress
|
||||
# at intervals of this number of seconds. The default is 15 seconds.
|
||||
# If this is set to 0 and there is only one thing to wait for, there
|
||||
# are no progress reports, but the process is awoken immediately the
|
||||
# operation is complete.
|
||||
polling_interval = 15
|
||||
}
|
||||
|
||||
|
||||
####################
|
||||
# Advanced section #
|
||||
####################
|
||||
|
||||
# Metadata settings
|
||||
#
|
||||
# metadata {
|
||||
# Default number of copies of metadata to hold on each PV. 0, 1 or 2.
|
||||
# You might want to override it from the command line with 0
|
||||
# when running pvcreate on new PVs which are to be added to large VGs.
|
||||
|
||||
# pvmetadatacopies = 1
|
||||
|
||||
# Default number of copies of metadata to maintain for each VG.
|
||||
# If set to a non-zero value, LVM automatically chooses which of
|
||||
# the available metadata areas to use to achieve the requested
|
||||
# number of copies of the VG metadata. If you set a value larger
|
||||
# than the the total number of metadata areas available then
|
||||
# metadata is stored in them all.
|
||||
# The default value of 0 ("unmanaged") disables this automatic
|
||||
# management and allows you to control which metadata areas
|
||||
# are used at the individual PV level using 'pvchange
|
||||
# --metadataignore y/n'.
|
||||
|
||||
# vgmetadatacopies = 0
|
||||
|
||||
# Approximate default size of on-disk metadata areas in sectors.
|
||||
# You should increase this if you have large volume groups or
|
||||
# you want to retain a large on-disk history of your metadata changes.
|
||||
|
||||
# pvmetadatasize = 255
|
||||
|
||||
# List of directories holding live copies of text format metadata.
|
||||
# These directories must not be on logical volumes!
|
||||
# It's possible to use LVM2 with a couple of directories here,
|
||||
# preferably on different (non-LV) filesystems, and with no other
|
||||
# on-disk metadata (pvmetadatacopies = 0). Or this can be in
|
||||
# addition to on-disk metadata areas.
|
||||
# The feature was originally added to simplify testing and is not
|
||||
# supported under low memory situations - the machine could lock up.
|
||||
#
|
||||
# Never edit any files in these directories by hand unless you
|
||||
# you are absolutely sure you know what you are doing! Use
|
||||
# the supplied toolset to make changes (e.g. vgcfgrestore).
|
||||
|
||||
# dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ]
|
||||
#}
|
||||
|
||||
# Event daemon
|
||||
#
|
||||
dmeventd {
|
||||
# mirror_library is the library used when monitoring a mirror device.
|
||||
#
|
||||
# "libdevmapper-event-lvm2mirror.so" attempts to recover from
|
||||
# failures. It removes failed devices from a volume group and
|
||||
# reconfigures a mirror as necessary. If no mirror library is
|
||||
# provided, mirrors are not monitored through dmeventd.
|
||||
|
||||
mirror_library = "libdevmapper-event-lvm2mirror.so"
|
||||
|
||||
# snapshot_library is the library used when monitoring a snapshot device.
|
||||
#
|
||||
# "libdevmapper-event-lvm2snapshot.so" monitors the filling of
|
||||
# snapshots and emits a warning through syslog when the use of
|
||||
# the snapshot exceeds 80%. The warning is repeated when 85%, 90% and
|
||||
# 95% of the snapshot is filled.
|
||||
|
||||
snapshot_library = "libdevmapper-event-lvm2snapshot.so"
|
||||
|
||||
# thin_library is the library used when monitoring a thin device.
|
||||
#
|
||||
# "libdevmapper-event-lvm2thin.so" monitors the filling of
|
||||
# pool and emits a warning through syslog when the use of
|
||||
# the pool exceeds 80%. The warning is repeated when 85%, 90% and
|
||||
# 95% of the pool is filled.
|
||||
|
||||
thin_library = "libdevmapper-event-lvm2thin.so"
|
||||
|
||||
# Full path of the dmeventd binary.
|
||||
#
|
||||
# executable = "/sbin/dmeventd"
|
||||
}
|
@ -0,0 +1,52 @@
|
||||
#############
|
||||
# OpenStack #
|
||||
#############
|
||||
|
||||
[composite:osapi_volume]
|
||||
use = call:cinder.api:root_app_factory
|
||||
/: apiversions
|
||||
/v1: openstack_volume_api_v1
|
||||
/v2: openstack_volume_api_v2
|
||||
|
||||
[composite:openstack_volume_api_v1]
|
||||
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth apiv1
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext apiv1
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv1
|
||||
|
||||
[composite:openstack_volume_api_v2]
|
||||
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth apiv2
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext apiv2
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv2
|
||||
|
||||
[filter:faultwrap]
|
||||
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
|
||||
|
||||
[filter:noauth]
|
||||
paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
|
||||
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
|
||||
|
||||
[app:apiv1]
|
||||
paste.app_factory = cinder.api.v1.router:APIRouter.factory
|
||||
|
||||
[app:apiv2]
|
||||
paste.app_factory = cinder.api.v2.router:APIRouter.factory
|
||||
|
||||
[pipeline:apiversions]
|
||||
pipeline = faultwrap osvolumeversionapp
|
||||
|
||||
[app:osvolumeversionapp]
|
||||
paste.app_factory = cinder.api.versions:Versions.factory
|
||||
|
||||
##########
|
||||
# Shared #
|
||||
##########
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
@ -0,0 +1,31 @@
|
||||
[DEFAULT]
|
||||
logdir = /var/log/cinder
|
||||
state_path = /var/lib/cinder
|
||||
lock_path = /var/lib/cinder/tmp
|
||||
volumes_dir = /etc/cinder/volumes
|
||||
iscsi_helper = tgtadm
|
||||
sql_connection = mysql://cinder:aJJbMNpG@192.168.0.7/cinder?charset=utf8
|
||||
rpc_backend = cinder.openstack.common.rpc.impl_kombu
|
||||
rootwrap_config = /etc/cinder/rootwrap.conf
|
||||
api_paste_config=/etc/cinder/api-paste.ini
|
||||
log_config=/etc/cinder/logging.conf
|
||||
rabbit_userid=nova
|
||||
bind_host=192.168.0.2
|
||||
auth_strategy=keystone
|
||||
osapi_volume_listen=192.168.0.2
|
||||
rabbit_virtual_host=/
|
||||
rabbit_hosts=192.168.0.7:5672
|
||||
verbose=true
|
||||
rabbit_ha_queues=True
|
||||
rabbit_password=zrk9MfKV
|
||||
rabbit_port=5672
|
||||
|
||||
[keystone_authtoken]
|
||||
admin_tenant_name = services
|
||||
admin_user = cinder
|
||||
admin_password = LCBarOJB
|
||||
auth_host = 192.168.0.7
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
signing_dirname = /tmp/keystone-signing-cinder
|
||||
signing_dir=/tmp/keystone-signing-cinder
|
@ -0,0 +1,21 @@
|
||||
[loggers]
|
||||
keys = root
|
||||
|
||||
[handlers]
|
||||
keys = syslog
|
||||
|
||||
[formatters]
|
||||
keys = default
|
||||
|
||||
[logger_root]
|
||||
level = DEBUG
|
||||
handlers = syslog
|
||||
qualname = cinder
|
||||
|
||||
[handler_syslog]
|
||||
class = handlers.SysLogHandler
|
||||
args = ('/dev/log', handlers.SysLogHandler.LOG_LOCAL3)
|
||||
formatter = default
|
||||
|
||||
[formatter_default]
|
||||
format = cinder %(name)s %(levelname)s %(message)s
|
@ -0,0 +1,34 @@
|
||||
{
|
||||
"context_is_admin": [["role:admin"]],
|
||||
"admin_or_owner": [["is_admin:True"], ["project_id:%(project_id)s"]],
|
||||
"default": [["rule:admin_or_owner"]],
|
||||
|
||||
"admin_api": [["is_admin:True"]],
|
||||
|
||||
"volume:create": [],
|
||||
"volume:get_all": [],
|
||||
"volume:get_volume_metadata": [],
|
||||
"volume:get_snapshot": [],
|
||||
"volume:get_all_snapshots": [],
|
||||
|
||||
"volume_extension:types_manage": [["rule:admin_api"]],
|
||||
"volume_extension:types_extra_specs": [["rule:admin_api"]],
|
||||
"volume_extension:extended_snapshot_attributes": [],
|
||||
"volume_extension:volume_image_metadata": [],
|
||||
|
||||
"volume_extension:quotas:show": [],
|
||||
"volume_extension:quotas:update_for_project": [["rule:admin_api"]],
|
||||
"volume_extension:quotas:update_for_user": [["rule:admin_or_projectadmin"]],
|
||||
"volume_extension:quota_classes": [],
|
||||
|
||||
"volume_extension:volume_admin_actions:reset_status": [["rule:admin_api"]],
|
||||
"volume_extension:snapshot_admin_actions:reset_status": [["rule:admin_api"]],
|
||||
"volume_extension:volume_admin_actions:force_delete": [["rule:admin_api"]],
|
||||
"volume_extension:snapshot_admin_actions:force_delete": [["rule:admin_api"]],
|
||||
|
||||
"volume_extension:volume_host_attribute": [["rule:admin_api"]],
|
||||
"volume_extension:volume_tenant_attribute": [["rule:admin_api"]],
|
||||
"volume_extension:hosts": [["rule:admin_api"]],
|
||||
"volume_extension:services": [["rule:admin_api"]],
|
||||
"volume:services": [["rule:admin_api"]]
|
||||
}
|
@ -0,0 +1,27 @@
|
||||
# Configuration for cinder-rootwrap
|
||||
# This file should be owned by (and only-writeable by) the root user
|
||||
|
||||
[DEFAULT]
|
||||
# List of directories to load filter definitions from (separated by ',').
|
||||
# These directories MUST all be only writeable by root !
|
||||
filters_path=/etc/cinder/rootwrap.d,/usr/share/cinder/rootwrap
|
||||
|
||||
# List of directories to search executables in, in case filters do not
|
||||
# explicitely specify a full path (separated by ',')
|
||||
# If not specified, defaults to system PATH environment variable.
|
||||
# These directories MUST all be only writeable by root !
|
||||
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
|
||||
|
||||
# Enable logging to syslog
|
||||
# Default value is False
|
||||
use_syslog=False
|
||||
|
||||
# Which syslog facility to use.
|
||||
# Valid values include auth, authpriv, syslog, user0, user1...
|
||||
# Default value is 'syslog'
|
||||
syslog_log_facility=syslog
|
||||
|
||||
# Which messages to log.
|
||||
# INFO means log all usage
|
||||
# ERROR means only log unsuccessful attempts
|
||||
syslog_log_level=ERROR
|
@ -0,0 +1,57 @@
|
||||
# Use this pipeline for no auth or image caching - DEFAULT
|
||||
[pipeline:glance-api]
|
||||
pipeline = versionnegotiation unauthenticated-context rootapp
|
||||
|
||||
# Use this pipeline for image caching and no auth
|
||||
[pipeline:glance-api-caching]
|
||||
pipeline = versionnegotiation unauthenticated-context cache rootapp
|
||||
|
||||
# Use this pipeline for caching w/ management interface but no auth
|
||||
[pipeline:glance-api-cachemanagement]
|
||||
pipeline = versionnegotiation unauthenticated-context cache cachemanage rootapp
|
||||
|
||||
# Use this pipeline for keystone auth
|
||||
[pipeline:glance-api-keystone]
|
||||
pipeline = versionnegotiation authtoken context rootapp
|
||||
|
||||
# Use this pipeline for keystone auth with image caching
|
||||
[pipeline:glance-api-keystone+caching]
|
||||
pipeline = versionnegotiation authtoken context cache rootapp
|
||||
|
||||
# Use this pipeline for keystone auth with caching and cache management
|
||||
[pipeline:glance-api-keystone+cachemanagement]
|
||||
pipeline = versionnegotiation authtoken context cache cachemanage rootapp
|
||||
|
||||
[composite:rootapp]
|
||||
paste.composite_factory = glance.api:root_app_factory
|
||||
/: apiversions
|
||||
/v1: apiv1app
|
||||
/v2: apiv2app
|
||||
|
||||
[app:apiversions]
|
||||
paste.app_factory = glance.api.versions:create_resource
|
||||
|
||||
[app:apiv1app]
|
||||
paste.app_factory = glance.api.v1.router:API.factory
|
||||
|
||||
[app:apiv2app]
|
||||
paste.app_factory = glance.api.v2.router:API.factory
|
||||
|
||||
[filter:versionnegotiation]
|
||||
paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
|
||||
|
||||
[filter:cache]
|
||||
paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
|
||||
|
||||
[filter:cachemanage]
|
||||
paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory
|
||||
|
||||
[filter:context]
|
||||
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
|
||||
|
||||
[filter:unauthenticated-context]
|
||||
paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||
delay_auth_decision = true
|
@ -0,0 +1,364 @@
|
||||
[DEFAULT]
|
||||
# Show more verbose log output (sets INFO log level output)
|
||||
#verbose = False
|
||||
verbose = true
|
||||
|
||||
# Show debugging output in logs (sets DEBUG log level output)
|
||||
#debug = False
|
||||
debug = true
|
||||
|
||||
# Which backend scheme should Glance use by default is not specified
|
||||
# in a request to add a new image to Glance? Known schemes are determined
|
||||
# by the known_stores option below.
|
||||
# Default: 'file'
|
||||
default_store = swift
|
||||
|
||||
# List of which store classes and store class locations are
|
||||
# currently known to glance at startup.
|
||||
#known_stores = glance.store.filesystem.Store,
|
||||
# glance.store.http.Store,
|
||||
# glance.store.rbd.Store,
|
||||
# glance.store.s3.Store,
|
||||
# glance.store.swift.Store,
|
||||
|
||||
|
||||
# Maximum image size (in bytes) that may be uploaded through the
|
||||
# Glance API server. Defaults to 1 TB.
|
||||
# WARNING: this value should only be increased after careful consideration
|
||||
# and must be set to a value under 8 EB (9223372036854775808).
|
||||
#image_size_cap = 1099511627776
|
||||
|
||||
# Address to bind the API server
|
||||
bind_host = 192.168.0.2
|
||||
|
||||
# Port the bind the API server to
|
||||
bind_port = 9292
|
||||
|
||||
# Log to this file. Make sure you do not set the same log
|
||||
# file for both the API and registry servers!
|
||||
log_file = /var/log/glance/api.log
|
||||
|
||||
# Backlog requests when creating socket
|
||||
backlog = 4096
|
||||
|
||||
# TCP_KEEPIDLE value in seconds when creating socket.
|
||||
# Not supported on OS X.
|
||||
#tcp_keepidle = 600
|
||||
|
||||
# SQLAlchemy connection string for the reference implementation
|
||||
# registry server. Any valid SQLAlchemy connection string is fine.
|
||||
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
|
||||
sql_connection = mysql://glance:mKJzWqLK@192.168.0.7/glance
|
||||
|
||||
# Period in seconds after which SQLAlchemy should reestablish its connection
|
||||
# to the database.
|
||||
#
|
||||
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
|
||||
# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
|
||||
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
|
||||
# before MySQL can drop the connection.
|
||||
sql_idle_timeout = 3600
|
||||
|
||||
# Number of Glance API worker processes to start.
|
||||
# On machines with more than one CPU increasing this value
|
||||
# may improve performance (especially if using SSL with
|
||||
# compression turned on). It is typically recommended to set
|
||||
# this value to the number of CPUs present on your machine.
|
||||
workers = 1
|
||||
|
||||
# Role used to identify an authenticated user as administrator
|
||||
#admin_role = admin
|
||||
|
||||
# Allow unauthenticated users to access the API with read-only
|
||||
# privileges. This only applies when using ContextMiddleware.
|
||||
#allow_anonymous_access = False
|
||||
|
||||
# Allow access to version 1 of glance api
|
||||
#enable_v1_api = True
|
||||
|
||||
# Allow access to version 2 of glance api
|
||||
#enable_v2_api = True
|
||||
|
||||
# Return the URL that references where the data is stored on
|
||||
# the backend storage system. For example, if using the
|
||||
# file system store a URL of 'file:///path/to/image' will
|
||||
# be returned to the user in the 'direct_url' meta-data field.
|
||||
# The default value is false.
|
||||
#show_image_direct_url = False
|
||||
|
||||
# ================= Syslog Options ============================
|
||||
|
||||
# Send logs to syslog (/dev/log) instead of to file specified
|
||||
# by `log_file`
|
||||
#use_syslog = False
|
||||
use_syslog = False
|
||||
|
||||
# Facility to use. If unset defaults to LOG_USER.
|
||||
#syslog_log_facility = LOG_LOCAL0
|
||||
|
||||
# ================= SSL Options ===============================
|
||||
|
||||
# Certificate file to use when starting API server securely
|
||||
#cert_file = /path/to/certfile
|
||||
|
||||
# Private key file to use when starting API server securely
|
||||
#key_file = /path/to/keyfile
|
||||
|
||||
# CA certificate file to use to verify connecting clients
|
||||
#ca_file = /path/to/cafile
|
||||
|
||||
# ================= Security Options ==========================
|
||||
|
||||
# AES key for encrypting store 'location' metadata, including
|
||||
# -- if used -- Swift or S3 credentials
|
||||
# Should be set to a random string of length 16, 24 or 32 bytes
|
||||
#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
|
||||
|
||||
# ============ Registry Options ===============================
|
||||
|
||||
# Address to find the registry server
|
||||
registry_host = 192.168.0.7
|
||||
|
||||
# Port the registry server is listening on
|
||||
registry_port = 9191
|
||||
|
||||
# What protocol to use when connecting to the registry server?
|
||||
# Set to https for secure HTTP communication
|
||||
registry_client_protocol = http
|
||||
|
||||
# The path to the key file to use in SSL connections to the
|
||||
# registry server, if any. Alternately, you may set the
|
||||
# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file
|
||||
#registry_client_key_file = /path/to/key/file
|
||||
|
||||
# The path to the cert file to use in SSL connections to the
|
||||
# registry server, if any. Alternately, you may set the
|
||||
# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file
|
||||
#registry_client_cert_file = /path/to/cert/file
|
||||
|
||||
# The path to the certifying authority cert file to use in SSL connections
|
||||
# to the registry server, if any. Alternately, you may set the
|
||||
# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file
|
||||
#registry_client_ca_file = /path/to/ca/file
|
||||
|
||||
# When using SSL in connections to the registry server, do not require
|
||||
# validation via a certifying authority. This is the registry's equivalent of
|
||||
# specifying --insecure on the command line using glanceclient for the API
|
||||
# Default: False
|
||||
#registry_client_insecure = False
|
||||
|
||||
# The period of time, in seconds, that the API server will wait for a registry
|
||||
# request to complete. A value of '0' implies no timeout.
|
||||
# Default: 600
|
||||
#registry_client_timeout = 600
|
||||
|
||||
# Whether to automatically create the database tables.
|
||||
# Default: False
|
||||
#db_auto_create = False
|
||||
|
||||
# ============ Notification System Options =====================
|
||||
|
||||
# Notifications can be sent when images are create, updated or deleted.
|
||||
# There are three methods of sending notifications, logging (via the
|
||||
# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid
|
||||
# message queue), or noop (no notifications sent, the default)
|
||||
notifier_strategy = noop
|
||||
|
||||
# Configuration options if sending notifications via rabbitmq (these are
|
||||
# the defaults)
|
||||
rabbit_host = localhost
|
||||
rabbit_port = 5672
|
||||
rabbit_use_ssl = false
|
||||
rabbit_userid = guest
|
||||
rabbit_password = guest
|
||||
rabbit_virtual_host = /
|
||||
rabbit_notification_exchange = glance
|
||||
rabbit_notification_topic = notifications
|
||||
rabbit_durable_queues = False
|
||||
|
||||
# Configuration options if sending notifications via Qpid (these are
|
||||
# the defaults)
|
||||
qpid_notification_exchange = glance
|
||||
qpid_notification_topic = notifications
|
||||
qpid_host = localhost
|
||||
qpid_port = 5672
|
||||
qpid_username =
|
||||
qpid_password =
|
||||
qpid_sasl_mechanisms =
|
||||
qpid_reconnect_timeout = 0
|
||||
qpid_reconnect_limit = 0
|
||||
qpid_reconnect_interval_min = 0
|
||||
qpid_reconnect_interval_max = 0
|
||||
qpid_reconnect_interval = 0
|
||||
qpid_heartbeat = 5
|
||||
# Set to 'ssl' to enable SSL
|
||||
qpid_protocol = tcp
|
||||
qpid_tcp_nodelay = True
|
||||
|
||||
# ============ Filesystem Store Options ========================
|
||||
|
||||
# Directory that the Filesystem backend store
|
||||
# writes image data to
|
||||
filesystem_store_datadir = /var/lib/glance/images/
|
||||
|
||||
# ============ Swift Store Options =============================
|
||||
|
||||
# Version of the authentication service to use
|
||||
# Valid versions are '2' for keystone and '1' for swauth and rackspace
|
||||
swift_store_auth_version = 2
|
||||
|
||||
# Address where the Swift authentication service lives
|
||||
# Valid schemes are 'http://' and 'https://'
|
||||
# If no scheme specified, default to 'https://'
|
||||
# For swauth, use something like '127.0.0.1:8080/v1.0/'
|
||||
swift_store_auth_address = http://192.168.0.7:5000/v2.0/
|
||||
|
||||
# User to authenticate against the Swift authentication service
|
||||
# If you use Swift authentication service, set it to 'account':'user'
|
||||
# where 'account' is a Swift storage account and 'user'
|
||||
# is a user in that account
|
||||
swift_store_user = services:glance
|
||||
|
||||
# Auth key for the user authenticating against the
|
||||
# Swift authentication service
|
||||
swift_store_key = Bzlunhw0
|
||||
|
||||
# Container within the account that the account should use
|
||||
# for storing images in Swift
|
||||
swift_store_container = glance
|
||||
|
||||
# Do we create the container if it does not exist?
|
||||
swift_store_create_container_on_put = True
|
||||
|
||||
# What size, in MB, should Glance start chunking image files
|
||||
# and do a large object manifest in Swift? By default, this is
|
||||
# the maximum object size in Swift, which is 5GB
|
||||
swift_store_large_object_size = 5120
|
||||
|
||||
# When doing a large object manifest, what size, in MB, should
|
||||
# Glance write chunks to Swift? This amount of data is written
|
||||
# to a temporary disk buffer during the process of chunking
|
||||
# the image file, and the default is 200MB
|
||||
swift_store_large_object_chunk_size = 200
|
||||
|
||||
# Whether to use ServiceNET to communicate with the Swift storage servers.
|
||||
# (If you aren't RACKSPACE, leave this False!)
|
||||
#
|
||||
# To use ServiceNET for authentication, prefix hostname of
|
||||
# `swift_store_auth_address` with 'snet-'.
|
||||
# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
|
||||
swift_enable_snet = False
|
||||
|
||||
# If set to True enables multi-tenant storage mode which causes Glance images
|
||||
# to be stored in tenant specific Swift accounts.
|
||||
#swift_store_multi_tenant = False
|
||||
|
||||
# A list of swift ACL strings that will be applied as both read and
|
||||
# write ACLs to the containers created by Glance in multi-tenant
|
||||
# mode. This grants the specified tenants/users read and write access
|
||||
# to all newly created image objects. The standard swift ACL string
|
||||
# formats are allowed, including:
|
||||
# <tenant_id>:<username>
|
||||
# <tenant_name>:<username>
|
||||
# *:<username>
|
||||
# Multiple ACLs can be combined using a comma separated list, for
|
||||
# example: swift_store_admin_tenants = service:glance,*:admin
|
||||
#swift_store_admin_tenants =
|
||||
|
||||
# The region of the swift endpoint to be used for single tenant. This setting
|
||||
# is only necessary if the tenant has multiple swift endpoints.
|
||||
#swift_store_region =
|
||||
|
||||
# ============ S3 Store Options =============================
|
||||
|
||||
# Address where the S3 authentication service lives
|
||||
# Valid schemes are 'http://' and 'https://'
|
||||
# If no scheme specified, default to 'http://'
|
||||
s3_store_host = 127.0.0.1:8080/v1.0/
|
||||
|
||||
# User to authenticate against the S3 authentication service
|
||||
s3_store_access_key = <20-char AWS access key>
|
||||
|
||||
# Auth key for the user authenticating against the
|
||||
# S3 authentication service
|
||||
s3_store_secret_key = <40-char AWS secret key>
|
||||
|
||||
# Container within the account that the account should use
|
||||
# for storing images in S3. Note that S3 has a flat namespace,
|
||||
# so you need a unique bucket name for your glance images. An
|
||||
# easy way to do this is append your AWS access key to "glance".
|
||||
# S3 buckets in AWS *must* be lowercased, so remember to lowercase
|
||||
# your AWS access key if you use it in your bucket name below!
|
||||
s3_store_bucket = <lowercased 20-char aws access key>glance
|
||||
|
||||
# Do we create the bucket if it does not exist?
|
||||
s3_store_create_bucket_on_put = False
|
||||
|
||||
# When sending images to S3, the data will first be written to a
|
||||
# temporary buffer on disk. By default the platform's temporary directory
|
||||
# will be used. If required, an alternative directory can be specified here.
|
||||
#s3_store_object_buffer_dir = /path/to/dir
|
||||
|
||||
# When forming a bucket url, boto will either set the bucket name as the
|
||||
# subdomain or as the first token of the path. Amazon's S3 service will
|
||||
# accept it as the subdomain, but Swift's S3 middleware requires it be
|
||||
# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'.
|
||||
#s3_store_bucket_url_format = subdomain
|
||||
|
||||
# ============ RBD Store Options =============================
|
||||
|
||||
# Ceph configuration file path
|
||||
# If using cephx authentication, this file should
|
||||
# include a reference to the right keyring
|
||||
# in a client.<USER> section
|
||||
rbd_store_ceph_conf = /etc/ceph/ceph.conf
|
||||
|
||||
# RADOS user to authenticate as (only applicable if using cephx)
|
||||
rbd_store_user = glance
|
||||
|
||||
# RADOS pool in which images are stored
|
||||
rbd_store_pool = images
|
||||
|
||||
# Images will be chunked into objects of this size (in megabytes).
|
||||
# For best performance, this should be a power of two
|
||||
rbd_store_chunk_size = 8
|
||||
|
||||
# ============ Delayed Delete Options =============================
|
||||
|
||||
# Turn on/off delayed delete
|
||||
delayed_delete = False
|
||||
|
||||
# Delayed delete time in seconds
|
||||
scrub_time = 43200
|
||||
|
||||
# Directory that the scrubber will use to remind itself of what to delete
|
||||
# Make sure this is also set in glance-scrubber.conf
|
||||
scrubber_datadir = /var/lib/glance/scrubber
|
||||
|
||||
# =============== Image Cache Options =============================
|
||||
|
||||
# Base directory that the Image Cache uses
|
||||
image_cache_dir = /var/lib/glance/image-cache/
|
||||
log_config=/etc/glance/logging.conf
|
||||
|
||||
[keystone_authtoken]
|
||||
auth_host = 192.168.0.7
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = services
|
||||
admin_user = glance
|
||||
admin_password = Bzlunhw0
|
||||
signing_dirname=/tmp/keystone-signing-glance
|
||||
auth_uri=http://192.168.0.7:35357
|
||||
signing_dir=/tmp/keystone-signing-glance
|
||||
|
||||
[paste_deploy]
|
||||
# Name of the paste configuration file that defines the available pipelines
|
||||
#config_file = glance-api-paste.ini
|
||||
|
||||
# Partial name of a pipeline in your paste configuration file with the
|
||||
# service name removed. For example, if your paste section name is
|
||||
# [pipeline:glance-api-keystone], you would configure the flavor below
|
||||
# as 'keystone'.
|
||||
#flavor=
|
||||
flavor=keystone+cachemanagement
|
@ -0,0 +1,149 @@
|
||||
[DEFAULT]
|
||||
# Show more verbose log output (sets INFO log level output)
|
||||
#verbose = False
|
||||
verbose = true
|
||||
|
||||
# Show debugging output in logs (sets DEBUG log level output)
|
||||
#debug = False
|
||||
debug = true
|
||||
|
||||
log_file = /var/log/glance/image-cache.log
|
||||
|
||||
# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
|
||||
#use_syslog = False
|
||||
use_syslog = False
|
||||
|
||||
# Directory that the Image Cache writes data to
|
||||
image_cache_dir = /var/lib/glance/image-cache/
|
||||
|
||||
# Number of seconds after which we should consider an incomplete image to be
|
||||
# stalled and eligible for reaping
|
||||
image_cache_stall_time = 86400
|
||||
|
||||
# image_cache_invalid_entry_grace_period - seconds
|
||||
#
|
||||
# If an exception is raised as we're writing to the cache, the cache-entry is
|
||||
# deemed invalid and moved to <image_cache_datadir>/invalid so that it can be
|
||||
# inspected for debugging purposes.
|
||||
#
|
||||
# This is number of seconds to leave these invalid images around before they
|
||||
# are elibible to be reaped.
|
||||
image_cache_invalid_entry_grace_period = 3600
|
||||
|
||||
# Max cache size in bytes
|
||||
image_cache_max_size = 10737418240
|
||||
|
||||
# Address to find the registry server
|
||||
registry_host = 192.168.0.7
|
||||
|
||||
# Port the registry server is listening on
|
||||
registry_port = 9191
|
||||
|
||||
# Auth settings if using Keystone
|
||||
# auth_url = http://127.0.0.1:5000/v2.0/
|
||||
auth_url = http://192.168.0.7:35357
|
||||
# admin_tenant_name = %SERVICE_TENANT_NAME%
|
||||
admin_tenant_name = services
|
||||
# admin_user = %SERVICE_USER%
|
||||
admin_user = glance
|
||||
# admin_password = %SERVICE_PASSWORD%
|
||||
admin_password = Bzlunhw0
|
||||
|
||||
# List of which store classes and store class locations are
|
||||
# currently known to glance at startup.
|
||||
# known_stores = glance.store.filesystem.Store,
|
||||
# glance.store.http.Store,
|
||||
# glance.store.rbd.Store,
|
||||
# glance.store.s3.Store,
|
||||
# glance.store.swift.Store,
|
||||
|
||||
# ============ Filesystem Store Options ========================
|
||||
|
||||
# Directory that the Filesystem backend store
|
||||
# writes image data to
|
||||
filesystem_store_datadir = /var/lib/glance/images/
|
||||
|
||||
# ============ Swift Store Options =============================
|
||||
|
||||
# Version of the authentication service to use
|
||||
# Valid versions are '2' for keystone and '1' for swauth and rackspace
|
||||
swift_store_auth_version = 2
|
||||
|
||||
# Address where the Swift authentication service lives
|
||||
# Valid schemes are 'http://' and 'https://'
|
||||
# If no scheme specified, default to 'https://'
|
||||
# For swauth, use something like '127.0.0.1:8080/v1.0/'
|
||||
swift_store_auth_address = 127.0.0.1:5000/v2.0/
|
||||
|
||||
# User to authenticate against the Swift authentication service
|
||||
# If you use Swift authentication service, set it to 'account':'user'
|
||||
# where 'account' is a Swift storage account and 'user'
|
||||
# is a user in that account
|
||||
swift_store_user = jdoe:jdoe
|
||||
|
||||
# Auth key for the user authenticating against the
|
||||
# Swift authentication service
|
||||
swift_store_key = a86850deb2742ec3cb41518e26aa2d89
|
||||
|
||||
# Container within the account that the account should use
|
||||
# for storing images in Swift
|
||||
swift_store_container = glance
|
||||
|
||||
# Do we create the container if it does not exist?
|
||||
swift_store_create_container_on_put = False
|
||||
|
||||
# What size, in MB, should Glance start chunking image files
|
||||
# and do a large object manifest in Swift? By default, this is
|
||||
# the maximum object size in Swift, which is 5GB
|
||||
swift_store_large_object_size = 5120
|
||||
|
||||
# When doing a large object manifest, what size, in MB, should
|
||||
# Glance write chunks to Swift? This amount of data is written
|
||||
# to a temporary disk buffer during the process of chunking
|
||||
# the image file, and the default is 200MB
|
||||
swift_store_large_object_chunk_size = 200
|
||||
|
||||
# Whether to use ServiceNET to communicate with the Swift storage servers.
|
||||
# (If you aren't RACKSPACE, leave this False!)
|
||||
#
|
||||
# To use ServiceNET for authentication, prefix hostname of
|
||||
# `swift_store_auth_address` with 'snet-'.
|
||||
# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
|
||||
swift_enable_snet = False
|
||||
|
||||
# ============ S3 Store Options =============================
|
||||
|
||||
# Address where the S3 authentication service lives
|
||||
# Valid schemes are 'http://' and 'https://'
|
||||
# If no scheme specified, default to 'http://'
|
||||
s3_store_host = 127.0.0.1:8080/v1.0/
|
||||
|
||||
# User to authenticate against the S3 authentication service
|
||||
s3_store_access_key = <20-char AWS access key>
|
||||
|
||||
# Auth key for the user authenticating against the
|
||||
# S3 authentication service
|
||||
s3_store_secret_key = <40-char AWS secret key>
|
||||
|
||||
# Container within the account that the account should use
|
||||
# for storing images in S3. Note that S3 has a flat namespace,
|
||||
# so you need a unique bucket name for your glance images. An
|
||||
# easy way to do this is append your AWS access key to "glance".
|
||||
# S3 buckets in AWS *must* be lowercased, so remember to lowercase
|
||||
# your AWS access key if you use it in your bucket name below!
|
||||
s3_store_bucket = <lowercased 20-char aws access key>glance
|
||||
|
||||
# Do we create the bucket if it does not exist?
|
||||
s3_store_create_bucket_on_put = False
|
||||
|
||||
# When sending images to S3, the data will first be written to a
|
||||
# temporary buffer on disk. By default the platform's temporary directory
|
||||
# will be used. If required, an alternative directory can be specified here.
|
||||
# s3_store_object_buffer_dir = /path/to/dir
|
||||
|
||||
# ================= Security Options ==========================
|
||||
|
||||
# AES key for encrypting store 'location' metadata, including
|
||||
# -- if used -- Swift or S3 credentials
|
||||
# Should be set to a random string of length 16, 24 or 32 bytes
|
||||
# metadata_encryption_key = <16, 24 or 32 char registry metadata key>
|
@ -0,0 +1,19 @@
|
||||
# Use this pipeline for no auth - DEFAULT
|
||||
[pipeline:glance-registry]
|
||||
pipeline = unauthenticated-context registryapp
|
||||
|
||||
# Use this pipeline for keystone auth
|
||||
[pipeline:glance-registry-keystone]
|
||||
pipeline = authtoken context registryapp
|
||||
|
||||
[app:registryapp]
|
||||
paste.app_factory = glance.registry.api.v1:API.factory
|
||||
|
||||
[filter:context]
|
||||
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
|
||||
|
||||
[filter:unauthenticated-context]
|
||||
paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
@ -0,0 +1,97 @@
|
||||
[DEFAULT]
|
||||
# Show more verbose log output (sets INFO log level output)
|
||||
#verbose = False
|
||||
verbose = true
|
||||
|
||||
# Show debugging output in logs (sets DEBUG log level output)
|
||||
#debug = False
|
||||
debug = true
|
||||
|
||||
# Address to bind the registry server
|
||||
bind_host = 192.168.0.2
|
||||
|
||||
# Port the bind the registry server to
|
||||
bind_port = 9191
|
||||
|
||||
# Log to this file. Make sure you do not set the same log
|
||||
# file for both the API and registry servers!
|
||||
log_file = /var/log/glance/registry.log
|
||||
|
||||
# Backlog requests when creating socket
|
||||
backlog = 4096
|
||||
|
||||
# TCP_KEEPIDLE value in seconds when creating socket.
|
||||
# Not supported on OS X.
|
||||
#tcp_keepidle = 600
|
||||
|
||||
# SQLAlchemy connection string for the reference implementation
|
||||
# registry server. Any valid SQLAlchemy connection string is fine.
|
||||
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
|
||||
sql_connection = mysql://glance:mKJzWqLK@192.168.0.7/glance
|
||||
|
||||
# Period in seconds after which SQLAlchemy should reestablish its connection
|
||||
# to the database.
|
||||
#
|
||||
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
|
||||
# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
|
||||
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
|
||||
# before MySQL can drop the connection.
|
||||
sql_idle_timeout = 3600
|
||||
|
||||
# Limit the api to return `param_limit_max` items in a call to a container. If
|
||||
# a larger `limit` query param is provided, it will be reduced to this value.
|
||||
api_limit_max = 1000
|
||||
|
||||
# If a `limit` query param is not provided in an api request, it will
|
||||
# default to `limit_param_default`
|
||||
limit_param_default = 25
|
||||
|
||||
# Role used to identify an authenticated user as administrator
|
||||
#admin_role = admin
|
||||
|
||||
# Whether to automatically create the database tables.
|
||||
# Default: False
|
||||
#db_auto_create = False
|
||||
|
||||
# ================= Syslog Options ============================
|
||||
|
||||
# Send logs to syslog (/dev/log) instead of to file specified
|
||||
# by `log_file`
|
||||
#use_syslog = False
|
||||
use_syslog = False
|
||||
|
||||
# Facility to use. If unset defaults to LOG_USER.
|
||||
#syslog_log_facility = LOG_LOCAL1
|
||||
|
||||
# ================= SSL Options ===============================
|
||||
|
||||
# Certificate file to use when starting registry server securely
|
||||
#cert_file = /path/to/certfile
|
||||
|
||||
# Private key file to use when starting registry server securely
|
||||
#key_file = /path/to/keyfile
|
||||
|
||||
# CA certificate file to use to verify connecting clients
|
||||
#ca_file = /path/to/cafile
|
||||
log_config=/etc/glance/logging.conf
|
||||
|
||||
[keystone_authtoken]
|
||||
auth_host = 192.168.0.7
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = services
|
||||
admin_user = glance
|
||||
admin_password = Bzlunhw0
|
||||
signing_dir=/tmp/keystone-signing-glance
|
||||
signing_dirname=/tmp/keystone-signing-glance
|
||||
|
||||
[paste_deploy]
|
||||
# Name of the paste configuration file that defines the available pipelines
|
||||
#config_file = glance-registry-paste.ini
|
||||
|
||||
# Partial name of a pipeline in your paste configuration file with the
|
||||
# service name removed. For example, if your paste section name is
|
||||
# [pipeline:glance-registry-keystone], you would configure the flavor below
|
||||
# as 'keystone'.
|
||||
#flavor=
|
||||
flavor=keystone
|
@ -0,0 +1,40 @@
|
||||
[DEFAULT]
|
||||
# Show more verbose log output (sets INFO log level output)
|
||||
#verbose = False
|
||||
|
||||
# Show debugging output in logs (sets DEBUG log level output)
|
||||
#debug = False
|
||||
|
||||
# Log to this file. Make sure you do not set the same log
|
||||
# file for both the API and registry servers!
|
||||
log_file = /var/log/glance/scrubber.log
|
||||
|
||||
# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
|
||||
#use_syslog = False
|
||||
|
||||
# Should we run our own loop or rely on cron/scheduler to run us
|
||||
daemon = False
|
||||
|
||||
# Loop time between checking for new items to schedule for delete
|
||||
wakeup_time = 300
|
||||
|
||||
# Directory that the scrubber will use to remind itself of what to delete
|
||||
# Make sure this is also set in glance-api.conf
|
||||
scrubber_datadir = /var/lib/glance/scrubber
|
||||
|
||||
# Only one server in your deployment should be designated the cleanup host
|
||||
cleanup_scrubber = False
|
||||
|
||||
# pending_delete items older than this time are candidates for cleanup
|
||||
cleanup_scrubber_time = 86400
|
||||
|
||||
# Address to find the registry server for cleanups
|
||||
registry_host = 0.0.0.0
|
||||
|
||||
# Port the registry server is listening on
|
||||
registry_port = 9191
|
||||
|
||||
# AES key for encrypting store 'location' metadata, including
|
||||
# -- if used -- Swift or S3 credentials
|
||||
# Should be set to a random string of length 16, 24 or 32 bytes
|
||||
#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
|
@ -0,0 +1,21 @@
|
||||
[loggers]
|
||||
keys = root
|
||||
|
||||
[handlers]
|
||||
keys = syslog
|
||||
|
||||
[formatters]
|
||||
keys = default
|
||||
|
||||
[logger_root]
|
||||
level = DEBUG
|
||||
handlers = syslog
|
||||
qualname = glance
|
||||
|
||||
[handler_syslog]
|
||||
class = handlers.SysLogHandler
|
||||
args = ('/dev/log', handlers.SysLogHandler.LOG_LOCAL2)
|
||||
formatter = default
|
||||
|
||||
[formatter_default]
|
||||
format = glance %(name)s %(levelname)s %(message)s
|
@ -0,0 +1,4 @@
|
||||
{
|
||||
"default": "",
|
||||
"manage_image_cache": "role:admin"
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
{
|
||||
"kernel_id": {
|
||||
"type": "string",
|
||||
"pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
|
||||
"description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image."
|
||||
},
|
||||
"ramdisk_id": {
|
||||
"type": "string",
|
||||
"pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
|
||||
"description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image."
|
||||
},
|
||||
"instance_uuid": {
|
||||
"type": "string",
|
||||
"description": "ID of instance used to create this image."
|
||||
},
|
||||
"architecture": {
|
||||
"description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
|
||||
"type": "string"
|
||||
},
|
||||
"os_distro": {
|
||||
"description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
|
||||
"type": "string"
|
||||
},
|
||||
"os_version": {
|
||||
"description": "Operating system version as specified by the distributor",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
@ -0,0 +1,59 @@
|
||||
# This file is managed by Puppet. DO NOT EDIT.
|
||||
global_defs {
|
||||
notification_email {
|
||||
root@domain.tld
|
||||
|
||||
}
|
||||
notification_email_from keepalived@domain.tld
|
||||
smtp_server localhost
|
||||
smtp_connect_timeout 30
|
||||
router_id controller-13
|
||||
}
|
||||
|
||||
vrrp_instance 4 {
|
||||
virtual_router_id 4
|
||||
|
||||
# for electing MASTER, highest priority wins.
|
||||
priority 101
|
||||
state MASTER
|
||||
|
||||
interface eth0.100
|
||||
|
||||
virtual_ipaddress {
|
||||
240.0.1.7 label eth0.100:ka
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
vrrp_instance 5 {
|
||||
virtual_router_id 5
|
||||
|
||||
# for electing MASTER, highest priority wins.
|
||||
priority 101
|
||||
state MASTER
|
||||
|
||||
interface eth0.101
|
||||
|
||||
virtual_ipaddress {
|
||||
192.168.0.7 label eth0.101:ka
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,27 @@
|
||||
# config for TemplatedCatalog, using camelCase because I don't want to do
|
||||
# translations for keystone compat
|
||||
catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v2.0
|
||||
catalog.RegionOne.identity.adminURL = http://localhost:$(admin_port)s/v2.0
|
||||
catalog.RegionOne.identity.internalURL = http://localhost:$(public_port)s/v2.0
|
||||
catalog.RegionOne.identity.name = Identity Service
|
||||
|
||||
# fake compute service for now to help novaclient tests work
|
||||
catalog.RegionOne.compute.publicURL = http://localhost:$(compute_port)s/v1.1/$(tenant_id)s
|
||||
catalog.RegionOne.compute.adminURL = http://localhost:$(compute_port)s/v1.1/$(tenant_id)s
|
||||
catalog.RegionOne.compute.internalURL = http://localhost:$(compute_port)s/v1.1/$(tenant_id)s
|
||||
catalog.RegionOne.compute.name = Compute Service
|
||||
|
||||
catalog.RegionOne.volume.publicURL = http://localhost:8776/v1/$(tenant_id)s
|
||||
catalog.RegionOne.volume.adminURL = http://localhost:8776/v1/$(tenant_id)s
|
||||
catalog.RegionOne.volume.internalURL = http://localhost:8776/v1/$(tenant_id)s
|
||||
catalog.RegionOne.volume.name = Volume Service
|
||||
|
||||
catalog.RegionOne.ec2.publicURL = http://localhost:8773/services/Cloud
|
||||
catalog.RegionOne.ec2.adminURL = http://localhost:8773/services/Admin
|
||||
catalog.RegionOne.ec2.internalURL = http://localhost:8773/services/Cloud
|
||||
catalog.RegionOne.ec2.name = EC2 Service
|
||||
|
||||
catalog.RegionOne.image.publicURL = http://localhost:9292/v1
|
||||
catalog.RegionOne.image.adminURL = http://localhost:9292/v1
|
||||
catalog.RegionOne.image.internalURL = http://localhost:9292/v1
|
||||
catalog.RegionOne.image.name = Image Service
|
@ -0,0 +1,320 @@
|
||||
[DEFAULT]
|
||||
log_file = /var/log/keystone/keystone.log
|
||||
# A "shared secret" between keystone and other openstack services
|
||||
# admin_token = ADMIN
|
||||
admin_token = 6Cx19zRq
|
||||
|
||||
# The IP address of the network interface to listen on
|
||||
# bind_host = 0.0.0.0
|
||||
bind_host = 192.168.0.2
|
||||
|
||||
# The port number which the public service listens on
|
||||
# public_port = 5000
|
||||
public_port = 5000
|
||||
|
||||
# The port number which the public admin listens on
|
||||
# admin_port = 35357
|
||||
admin_port = 35357
|
||||
|
||||
# The base endpoint URLs for keystone that are advertised to clients
|
||||
# (NOTE: this does NOT affect how keystone listens for connections)
|
||||
# public_endpoint = http://localhost:%(public_port)d/
|
||||
# admin_endpoint = http://localhost:%(admin_port)d/
|
||||
|
||||
# The port number which the OpenStack Compute service listens on
|
||||
# compute_port = 8774
|
||||
compute_port = 3000
|
||||
|
||||
# Path to your policy definition containing identity actions
|
||||
# policy_file = policy.json
|
||||
|
||||
# Rule to check if no matching policy definition is found
|
||||
# FIXME(dolph): This should really be defined as [policy] default_rule
|
||||
# policy_default_rule = admin_required
|
||||
|
||||
# Role for migrating membership relationships
|
||||
# During a SQL upgrade, the following values will be used to create a new role
|
||||
# that will replace records in the user_tenant_membership table with explicit
|
||||
# role grants. After migration, the member_role_id will be used in the API
|
||||
# add_user_to_project, and member_role_name will be ignored.
|
||||
# member_role_id = 9fe2ff9ee4384b1894a90878d3e92bab
|
||||
# member_role_name = _member_
|
||||
|
||||
# === Logging Options ===
|
||||
# Print debugging output
|
||||
# (includes plaintext request logging, potentially including passwords)
|
||||
# debug = False
|
||||
debug = true
|
||||
|
||||
# Print more verbose output
|
||||
# verbose = False
|
||||
verbose = true
|
||||
|
||||
# Name of log file to output to. If not set, logging will go to stdout.
|
||||
# log_file = keystone.log
|
||||
|
||||
# The directory to keep log files in (will be prepended to --logfile)
|
||||
# log_dir = /var/log/keystone
|
||||
|
||||
# Use syslog for logging.
|
||||
# use_syslog = False
|
||||
|
||||
# syslog facility to receive log lines
|
||||
# syslog_log_facility = LOG_USER
|
||||
|
||||
# If this option is specified, the logging configuration file specified is
|
||||
# used and overrides any other logging options specified. Please see the
|
||||
# Python logging module documentation for details on logging configuration
|
||||
# files.
|
||||
# log_config = logging.conf
|
||||
log_config = /etc/keystone/logging.conf
|
||||
|
||||
# A logging.Formatter log message format string which may use any of the
|
||||
# available logging.LogRecord attributes.
|
||||
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
|
||||
|
||||
# Format string for %(asctime)s in log records.
|
||||
# log_date_format = %Y-%m-%d %H:%M:%S
|
||||
|
||||
# onready allows you to send a notification when the process is ready to serve
|
||||
# For example, to have it notify using systemd, one could set shell command:
|
||||
# onready = systemd-notify --ready
|
||||
# or a module with notify() method:
|
||||
# onready = keystone.common.systemd
|
||||
|
||||
[sql]
|
||||
connection = mysql://keystone:cg5UvHsO@192.168.0.7/keystone
|
||||
# The SQLAlchemy connection string used to connect to the database
|
||||
# connection = sqlite:///keystone.db
|
||||
|
||||
# the timeout before idle sql connections are reaped
|
||||
# idle_timeout = 200
|
||||
idle_timeout = 200
|
||||
|
||||
[identity]
|
||||
driver = keystone.identity.backends.sql.Identity
|
||||
# driver = keystone.identity.backends.sql.Identity
|
||||
|
||||
# This references the domain to use for all Identity API v2 requests (which are
|
||||
# not aware of domains). A domain with this ID will be created for you by
|
||||
# keystone-manage db_sync in migration 008. The domain referenced by this ID
|
||||
# cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API.
|
||||
# There is nothing special about this domain, other than the fact that it must
|
||||
# exist to order to maintain support for your v2 clients.
|
||||
# default_domain_id = default
|
||||
|
||||
[trust]
|
||||
# driver = keystone.trust.backends.sql.Trust
|
||||
|
||||
# delegation and impersonation features can be optionally disabled
|
||||
# enabled = True
|
||||
|
||||
[catalog]
|
||||
template_file = /etc/keystone/default_catalog.templates
|
||||
driver = keystone.catalog.backends.sql.Catalog
|
||||
# dynamic, sql-based backend (supports API/CLI-based management commands)
|
||||
# driver = keystone.catalog.backends.sql.Catalog
|
||||
|
||||
# static, file-based backend (does *NOT* support any management commands)
|
||||
# driver = keystone.catalog.backends.templated.TemplatedCatalog
|
||||
|
||||
# template_file = default_catalog.templates
|
||||
|
||||
[token]
|
||||
driver = keystone.token.backends.sql.Token
|
||||
# driver = keystone.token.backends.kvs.Token
|
||||
|
||||
# Amount of time a token should remain valid (in seconds)
|
||||
# expiration = 86400
|
||||
|
||||
[policy]
|
||||
# driver = keystone.policy.backends.sql.Policy
|
||||
driver = keystone.policy.backends.rules.Policy
|
||||
|
||||
[ec2]
|
||||
driver = keystone.contrib.ec2.backends.sql.Ec2
|
||||
# driver = keystone.contrib.ec2.backends.kvs.Ec2
|
||||
|
||||
[ssl]
|
||||
#enable = True
|
||||
#certfile = /etc/keystone/ssl/certs/keystone.pem
|
||||
#keyfile = /etc/keystone/ssl/private/keystonekey.pem
|
||||
#ca_certs = /etc/keystone/ssl/certs/ca.pem
|
||||
#cert_required = True
|
||||
|
||||
[signing]
|
||||
#token_format = PKI
|
||||
token_format = UUID
|
||||
#certfile = /etc/keystone/ssl/certs/signing_cert.pem
|
||||
#keyfile = /etc/keystone/ssl/private/signing_key.pem
|
||||
#ca_certs = /etc/keystone/ssl/certs/ca.pem
|
||||
#key_size = 1024
|
||||
#valid_days = 3650
|
||||
#ca_password = None
|
||||
|
||||
[ldap]
|
||||
# url = ldap://localhost
|
||||
# user = dc=Manager,dc=example,dc=com
|
||||
# password = None
|
||||
# suffix = cn=example,cn=com
|
||||
# use_dumb_member = False
|
||||
# allow_subtree_delete = False
|
||||
# dumb_member = cn=dumb,dc=example,dc=com
|
||||
|
||||
# Maximum results per page; a value of zero ('0') disables paging (default)
|
||||
# page_size = 0
|
||||
|
||||
# The LDAP dereferencing option for queries. This can be either 'never',
|
||||
# 'searching', 'always', 'finding' or 'default'. The 'default' option falls
|
||||
# back to using default dereferencing configured by your ldap.conf.
|
||||
# alias_dereferencing = default
|
||||
|
||||
# The LDAP scope for queries, this can be either 'one'
|
||||
# (onelevel/singleLevel) or 'sub' (subtree/wholeSubtree)
|
||||
# query_scope = one
|
||||
|
||||
# user_tree_dn = ou=Users,dc=example,dc=com
|
||||
# user_filter =
|
||||
# user_objectclass = inetOrgPerson
|
||||
# user_domain_id_attribute = businessCategory
|
||||
# user_id_attribute = cn
|
||||
# user_name_attribute = sn
|
||||
# user_mail_attribute = email
|
||||
# user_pass_attribute = userPassword
|
||||
# user_enabled_attribute = enabled
|
||||
# user_enabled_mask = 0
|
||||
# user_enabled_default = True
|
||||
# user_attribute_ignore = tenant_id,tenants
|
||||
# user_allow_create = True
|
||||
# user_allow_update = True
|
||||
# user_allow_delete = True
|
||||
# user_enabled_emulation = False
|
||||
# user_enabled_emulation_dn =
|
||||
|
||||
# tenant_tree_dn = ou=Groups,dc=example,dc=com
|
||||
# tenant_filter =
|
||||
# tenant_objectclass = groupOfNames
|
||||
# tenant_domain_id_attribute = businessCategory
|
||||
# tenant_id_attribute = cn
|
||||
# tenant_member_attribute = member
|
||||
# tenant_name_attribute = ou
|
||||
# tenant_desc_attribute = desc
|
||||
# tenant_enabled_attribute = enabled
|
||||
# tenant_attribute_ignore =
|
||||
# tenant_allow_create = True
|
||||
# tenant_allow_update = True
|
||||
# tenant_allow_delete = True
|
||||
# tenant_enabled_emulation = False
|
||||
# tenant_enabled_emulation_dn =
|
||||
|
||||
# role_tree_dn = ou=Roles,dc=example,dc=com
|
||||
# role_filter =
|
||||
# role_objectclass = organizationalRole
|
||||
# role_id_attribute = cn
|
||||
# role_name_attribute = ou
|
||||
# role_member_attribute = roleOccupant
|
||||
# role_attribute_ignore =
|
||||
# role_allow_create = True
|
||||
# role_allow_update = True
|
||||
# role_allow_delete = True
|
||||
|
||||
# group_tree_dn =
|
||||
# group_filter =
|
||||
# group_objectclass = groupOfNames
|
||||
# group_id_attribute = cn
|
||||
# group_name_attribute = ou
|
||||
# group_member_attribute = member
|
||||
# group_desc_attribute = desc
|
||||
# group_attribute_ignore =
|
||||
# group_allow_create = True
|
||||
# group_allow_update = True
|
||||
# group_allow_delete = True
|
||||
|
||||
[auth]
|
||||
methods = password,token
|
||||
password = keystone.auth.plugins.password.Password
|
||||
token = keystone.auth.plugins.token.Token
|
||||
|
||||
[filter:debug]
|
||||
paste.filter_factory = keystone.common.wsgi:Debug.factory
|
||||
|
||||
[filter:token_auth]
|
||||
paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
|
||||
|
||||
[filter:admin_token_auth]
|
||||
paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
|
||||
|
||||
[filter:xml_body]
|
||||
paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
|
||||
|
||||
[filter:json_body]
|
||||
paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
|
||||
|
||||
[filter:user_crud_extension]
|
||||
paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
|
||||
|
||||
[filter:crud_extension]
|
||||
paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
|
||||
|
||||
[filter:ec2_extension]
|
||||
paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
|
||||
|
||||
[filter:s3_extension]
|
||||
paste.filter_factory = keystone.contrib.s3:S3Extension.factory
|
||||
|
||||
[filter:url_normalize]
|
||||
paste.filter_factory = keystone.middleware:NormalizingFilter.factory
|
||||
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory
|
||||
|
||||
[filter:stats_monitoring]
|
||||
paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory
|
||||
|
||||
[filter:stats_reporting]
|
||||
paste.filter_factory = keystone.contrib.stats:StatsExtension.factory
|
||||
|
||||
[filter:access_log]
|
||||
paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory
|
||||
|
||||
[app:public_service]
|
||||
paste.app_factory = keystone.service:public_app_factory
|
||||
|
||||
[app:service_v3]
|
||||
paste.app_factory = keystone.service:v3_app_factory
|
||||
|
||||
[app:admin_service]
|
||||
paste.app_factory = keystone.service:admin_app_factory
|
||||
|
||||
[pipeline:public_api]
|
||||
pipeline = stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service
|
||||
|
||||
[pipeline:admin_api]
|
||||
pipeline = stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service
|
||||
|
||||
[pipeline:api_v3]
|
||||
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension service_v3
|
||||
|
||||
[app:public_version_service]
|
||||
paste.app_factory = keystone.service:public_version_app_factory
|
||||
|
||||
[app:admin_version_service]
|
||||
paste.app_factory = keystone.service:admin_version_app_factory
|
||||
|
||||
[pipeline:public_version_api]
|
||||
pipeline = stats_monitoring url_normalize xml_body public_version_service
|
||||
|
||||
[pipeline:admin_version_api]
|
||||
pipeline = stats_monitoring url_normalize xml_body admin_version_service
|
||||
|
||||
[composite:main]
|
||||
use = egg:Paste#urlmap
|
||||
/v2.0 = public_api
|
||||
/v3 = api_v3
|
||||
/ = public_version_api
|
||||
|
||||
[composite:admin]
|
||||
use = egg:Paste#urlmap
|
||||
/v2.0 = admin_api
|
||||
/v3 = api_v3
|
||||
/ = admin_version_api
|
@ -0,0 +1,21 @@
|
||||
[loggers]
|
||||
keys = root
|
||||
|
||||
[handlers]
|
||||
keys = syslog
|
||||
|
||||
[formatters]
|
||||
keys = default
|
||||
|
||||
[logger_root]
|
||||
level = DEBUG
|
||||
handlers = syslog
|
||||
qualname = keystone
|
||||
|
||||
[handler_syslog]
|
||||
class = handlers.SysLogHandler
|
||||
args = ('/dev/log', handlers.SysLogHandler.LOG_LOCAL1)
|
||||
formatter = default
|
||||
|
||||
[formatter_default]
|
||||
format = keystone %(name)s %(levelname)s %(message)s
|
@ -0,0 +1,85 @@
|
||||
{
|
||||
"admin_required": [["role:admin"], ["is_admin:1"]],
|
||||
"owner" : [["user_id:%(user_id)s"]],
|
||||
"admin_or_owner": [["rule:admin_required"], ["rule:owner"]],
|
||||
|
||||
"default": [["rule:admin_required"]],
|
||||
|
||||
"identity:get_service": [["rule:admin_required"]],
|
||||
"identity:list_services": [["rule:admin_required"]],
|
||||
"identity:create_service": [["rule:admin_required"]],
|
||||
"identity:update_service": [["rule:admin_required"]],
|
||||
"identity:delete_service": [["rule:admin_required"]],
|
||||
|
||||
"identity:get_endpoint": [["rule:admin_required"]],
|
||||
"identity:list_endpoints": [["rule:admin_required"]],
|
||||
"identity:create_endpoint": [["rule:admin_required"]],
|
||||
"identity:update_endpoint": [["rule:admin_required"]],
|
||||
"identity:delete_endpoint": [["rule:admin_required"]],
|
||||
|
||||
"identity:get_domain": [["rule:admin_required"]],
|
||||
"identity:list_domains": [["rule:admin_required"]],
|
||||
"identity:create_domain": [["rule:admin_required"]],
|
||||
"identity:update_domain": [["rule:admin_required"]],
|
||||
"identity:delete_domain": [["rule:admin_required"]],
|
||||
|
||||
"identity:get_project": [["rule:admin_required"]],
|
||||
"identity:list_projects": [["rule:admin_required"]],
|
||||
"identity:list_user_projects": [["rule:admin_or_owner"]],
|
||||
"identity:create_project": [["rule:admin_or_owner"]],
|
||||
"identity:update_project": [["rule:admin_required"]],
|
||||
"identity:delete_project": [["rule:admin_required"]],
|
||||
|
||||
"identity:get_user": [["rule:admin_required"]],
|
||||
"identity:list_users": [["rule:admin_required"]],
|
||||
"identity:create_user": [["rule:admin_required"]],
|
||||
"identity:update_user": [["rule:admin_or_owner"]],
|
||||
"identity:delete_user": [["rule:admin_required"]],
|
||||
|
||||
"identity:get_group": [["rule:admin_required"]],
|
||||
"identity:list_groups": [["rule:admin_required"]],
|
||||
"identity:create_group": [["rule:admin_required"]],
|
||||
"identity:update_group": [["rule:admin_required"]],
|
||||
"identity:delete_group": [["rule:admin_required"]],
|
||||
"identity:list_users_in_group": [["rule:admin_required"]],
|
||||
"identity:remove_user_from_group": [["rule:admin_required"]],
|
||||
"identity:check_user_in_group": [["rule:admin_required"]],
|
||||
"identity:add_user_to_group": [["rule:admin_required"]],
|
||||
|
||||
"identity:get_credential": [["rule:admin_required"]],
|
||||
"identity:list_credentials": [["rule:admin_required"]],
|
||||
"identity:create_credential": [["rule:admin_required"]],
|
||||
"identity:update_credential": [["rule:admin_required"]],
|
||||
"identity:delete_credential": [["rule:admin_required"]],
|
||||
|
||||
"identity:get_role": [["rule:admin_required"]],
|
||||
"identity:list_roles": [["rule:admin_required"]],
|
||||
"identity:create_role": [["rule:admin_required"]],
|
||||
"identity:update_role": [["rule:admin_required"]],
|
||||
"identity:delete_role": [["rule:admin_required"]],
|
||||
|
||||
"identity:check_grant": [["rule:admin_required"]],
|
||||
"identity:list_grants": [["rule:admin_required"]],
|
||||
"identity:create_grant": [["rule:admin_required"]],
|
||||
"identity:revoke_grant": [["rule:admin_required"]],
|
||||
|
||||
"identity:get_policy": [["rule:admin_required"]],
|
||||
"identity:list_policies": [["rule:admin_required"]],
|
||||
"identity:create_policy": [["rule:admin_required"]],
|
||||
"identity:update_policy": [["rule:admin_required"]],
|
||||
"identity:delete_policy": [["rule:admin_required"]],
|
||||
|
||||
"identity:check_token": [["rule:admin_required"]],
|
||||
"identity:validate_token": [["rule:admin_required"]],
|
||||
"identity:revocation_list": [["rule:admin_required"]],
|
||||
"identity:revoke_token": [["rule:admin_required"],
|
||||
["user_id:%(user_id)s"]],
|
||||
|
||||
"identity:create_trust": [["user_id:%(trust.trustor_user_id)s"]],
|
||||
"identity:get_trust": [["rule:admin_or_owner"]],
|
||||
"identity:list_trusts": [["@"]],
|
||||
"identity:list_roles_for_trust": [["@"]],
|
||||
"identity:check_role_for_trust": [["@"]],
|
||||
"identity:get_role_for_trust": [["@"]],
|
||||
"identity:delete_trust": [["@"]]
|
||||
}
|
@ -0,0 +1,18 @@
|
||||
#
|
||||
# This can be used to setup URI aliases for frequently
|
||||
# used connection URIs. Aliases may contain only the
|
||||
# characters a-Z, 0-9, _, -.
|
||||
#
|
||||
# Following the '=' may be any valid libvirt connection
|
||||
# URI, including arbitrary parameters
|
||||
|
||||
#uri_aliases = [
|
||||
# "hail=qemu+ssh://root@hail.cloud.example.com/system",
|
||||
# "sleet=qemu+ssh://root@sleet.cloud.example.com/system",
|
||||
#]
|
||||
|
||||
#
|
||||
# This can be used to prevent probing of the hypervisor
|
||||
# driver when no URI is supplied by the application.
|
||||
|
||||
#uri_default = "qemu:///system"
|
@ -0,0 +1,80 @@
|
||||
# Generated by LVM2 version 2.02.98(2)-RHEL6 (2012-10-15): Mon Sep 16 13:29:54 2013
|
||||
|
||||
contents = "Text Format Volume Group"
|
||||
version = 1
|
||||
|
||||
description = "Created *before* executing '/sbin/vgs --noheadings -o name --config 'log{command_names=0 prefix=\" \"}''"
|
||||
|
||||
creation_host = "controller-13.domain.tld" # Linux controller-13.domain.tld 2.6.32-358.6.2.el6.x86_64 #1 SMP Thu May 16 20:59:36 UTC 2013 x86_64
|
||||
creation_time = 1379338194 # Mon Sep 16 13:29:54 2013
|
||||
|
||||
os {
|
||||
id = "VIjRLX-xF0U-D3ge-eBtO-mRbK-2Ojf-ou2LIt"
|
||||
seqno = 3
|
||||
format = "lvm2" # informational
|
||||
status = ["RESIZEABLE", "READ", "WRITE"]
|
||||
flags = []
|
||||
extent_size = 65536 # 32 Megabytes
|
||||
max_lv = 0
|
||||
max_pv = 0
|
||||
metadata_copies = 0
|
||||
|
||||
physical_volumes {
|
||||
|
||||
pv0 {
|
||||
id = "Odqxed-6TFL-FydT-JHUC-mEDU-pj7G-w6qgHf"
|
||||
device = "/dev/sda2" # Hint only
|
||||
|
||||
status = ["ALLOCATABLE"]
|
||||
flags = []
|
||||
dev_size = 29321216 # 13.9814 Gigabytes
|
||||
pe_start = 2048
|
||||
pe_count = 447 # 13.9688 Gigabytes
|
||||
}
|
||||
}
|
||||
|
||||
logical_volumes {
|
||||
|
||||
root {
|
||||
id = "p5Q4L3-DUZ1-H4Io-eftj-OObE-OFiC-mIpldy"
|
||||
status = ["READ", "WRITE", "VISIBLE"]
|
||||
flags = []
|
||||
creation_host = "controller-13.domain.tld"
|
||||
creation_time = 1379337811 # 2013-09-16 13:23:31 +0000
|
||||
segment_count = 1
|
||||
|
||||
segment1 {
|
||||
start_extent = 0
|
||||
extent_count = 320 # 10 Gigabytes
|
||||
|
||||
type = "striped"
|
||||
stripe_count = 1 # linear
|
||||
|
||||
stripes = [
|
||||
"pv0", 0
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
swap {
|
||||
id = "1nDiS3-10T0-JpQ5-n4El-JwXJ-b7qD-ZA0ZdX"
|
||||
status = ["READ", "WRITE", "VISIBLE"]
|
||||
flags = []
|
||||
creation_host = "controller-13.domain.tld"
|
||||
creation_time = 1379337815 # 2013-09-16 13:23:35 +0000
|
||||
segment_count = 1
|
||||
|
||||
segment1 {
|
||||
start_extent = 0
|
||||
extent_count = 126 # 3.9375 Gigabytes
|
||||
|
||||
type = "striped"
|
||||
stripe_count = 1 # linear
|
||||
|
||||
stripes = [
|
||||
"pv0", 320
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
80
config_samples/fuel_web/golden_fuelweb/cnt1/lvm/backup/os
Normal file
80
config_samples/fuel_web/golden_fuelweb/cnt1/lvm/backup/os
Normal file
@ -0,0 +1,80 @@
|
||||
# Generated by LVM2 version 2.02.98(2)-RHEL6 (2012-10-15): Mon Sep 16 13:29:54 2013
|
||||
|
||||
contents = "Text Format Volume Group"
|
||||
version = 1
|
||||
|
||||
description = "Created *after* executing '/sbin/vgs --noheadings -o name --config 'log{command_names=0 prefix=\" \"}''"
|
||||
|
||||
creation_host = "controller-13.domain.tld" # Linux controller-13.domain.tld 2.6.32-358.6.2.el6.x86_64 #1 SMP Thu May 16 20:59:36 UTC 2013 x86_64
|
||||
creation_time = 1379338194 # Mon Sep 16 13:29:54 2013
|
||||
|
||||
os {
|
||||
id = "VIjRLX-xF0U-D3ge-eBtO-mRbK-2Ojf-ou2LIt"
|
||||
seqno = 3
|
||||
format = "lvm2" # informational
|
||||
status = ["RESIZEABLE", "READ", "WRITE"]
|
||||
flags = []
|
||||
extent_size = 65536 # 32 Megabytes
|
||||
max_lv = 0
|
||||
max_pv = 0
|
||||
metadata_copies = 0
|
||||
|
||||
physical_volumes {
|
||||
|
||||
pv0 {
|
||||
id = "Odqxed-6TFL-FydT-JHUC-mEDU-pj7G-w6qgHf"
|
||||
device = "/dev/sda2" # Hint only
|
||||
|
||||
status = ["ALLOCATABLE"]
|
||||
flags = []
|
||||
dev_size = 29321216 # 13.9814 Gigabytes
|
||||
pe_start = 2048
|
||||
pe_count = 447 # 13.9688 Gigabytes
|
||||
}
|
||||
}
|
||||
|
||||
logical_volumes {
|
||||
|
||||
root {
|
||||
id = "p5Q4L3-DUZ1-H4Io-eftj-OObE-OFiC-mIpldy"
|
||||
status = ["READ", "WRITE", "VISIBLE"]
|
||||
flags = []
|
||||
creation_host = "controller-13.domain.tld"
|
||||
creation_time = 1379337811 # 2013-09-16 13:23:31 +0000
|
||||
segment_count = 1
|
||||
|
||||
segment1 {
|
||||
start_extent = 0
|
||||
extent_count = 320 # 10 Gigabytes
|
||||
|
||||
type = "striped"
|
||||
stripe_count = 1 # linear
|
||||
|
||||
stripes = [
|
||||
"pv0", 0
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
swap {
|
||||
id = "1nDiS3-10T0-JpQ5-n4El-JwXJ-b7qD-ZA0ZdX"
|
||||
status = ["READ", "WRITE", "VISIBLE"]
|
||||
flags = []
|
||||
creation_host = "controller-13.domain.tld"
|
||||
creation_time = 1379337815 # 2013-09-16 13:23:35 +0000
|
||||
segment_count = 1
|
||||
|
||||
segment1 {
|
||||
start_extent = 0
|
||||
extent_count = 126 # 3.9375 Gigabytes
|
||||
|
||||
type = "striped"
|
||||
stripe_count = 1 # linear
|
||||
|
||||
stripes = [
|
||||
"pv0", 320
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
843
config_samples/fuel_web/golden_fuelweb/cnt1/lvm/lvm.conf
Normal file
843
config_samples/fuel_web/golden_fuelweb/cnt1/lvm/lvm.conf
Normal file
@ -0,0 +1,843 @@
|
||||
# This is an example configuration file for the LVM2 system.
|
||||
# It contains the default settings that would be used if there was no
|
||||
# /etc/lvm/lvm.conf file.
|
||||
#
|
||||
# Refer to 'man lvm.conf' for further information including the file layout.
|
||||
#
|
||||
# To put this file in a different directory and override /etc/lvm set
|
||||
# the environment variable LVM_SYSTEM_DIR before running the tools.
|
||||
#
|
||||
# N.B. Take care that each setting only appears once if uncommenting
|
||||
# example settings in this file.
|
||||
|
||||
|
||||
# This section allows you to configure which block devices should
|
||||
# be used by the LVM system.
|
||||
devices {
|
||||
|
||||
# Where do you want your volume groups to appear ?
|
||||
dir = "/dev"
|
||||
|
||||
# An array of directories that contain the device nodes you wish
|
||||
# to use with LVM2.
|
||||
scan = [ "/dev" ]
|
||||
|
||||
# If set, the cache of block device nodes with all associated symlinks
|
||||
# will be constructed out of the existing udev database content.
|
||||
# This avoids using and opening any inapplicable non-block devices or
|
||||
# subdirectories found in the device directory. This setting is applied
|
||||
# to udev-managed device directory only, other directories will be scanned
|
||||
# fully. LVM2 needs to be compiled with udev support for this setting to
|
||||
# take effect. N.B. Any device node or symlink not managed by udev in
|
||||
# udev directory will be ignored with this setting on.
|
||||
obtain_device_list_from_udev = 1
|
||||
|
||||
# If several entries in the scanned directories correspond to the
|
||||
# same block device and the tools need to display a name for device,
|
||||
# all the pathnames are matched against each item in the following
|
||||
# list of regular expressions in turn and the first match is used.
|
||||
# preferred_names = [ ]
|
||||
|
||||
# Try to avoid using undescriptive /dev/dm-N names, if present.
|
||||
preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
|
||||
|
||||
# A filter that tells LVM2 to only use a restricted set of devices.
|
||||
# The filter consists of an array of regular expressions. These
|
||||
# expressions can be delimited by a character of your choice, and
|
||||
# prefixed with either an 'a' (for accept) or 'r' (for reject).
|
||||
# The first expression found to match a device name determines if
|
||||
# the device will be accepted or rejected (ignored). Devices that
|
||||
# don't match any patterns are accepted.
|
||||
|
||||
# Be careful if there there are symbolic links or multiple filesystem
|
||||
# entries for the same device as each name is checked separately against
|
||||
# the list of patterns. The effect is that if the first pattern in the
|
||||
# list to match a name is an 'a' pattern for any of the names, the device
|
||||
# is accepted; otherwise if the first pattern in the list to match a name
|
||||
# is an 'r' pattern for any of the names it is rejected; otherwise it is
|
||||
# accepted.
|
||||
|
||||
# Don't have more than one filter line active at once: only one gets used.
|
||||
|
||||
# Run vgscan after you change this parameter to ensure that
|
||||
# the cache file gets regenerated (see below).
|
||||
# If it doesn't do what you expect, check the output of 'vgscan -vvvv'.
|
||||
|
||||
|
||||
# By default we accept every block device:
|
||||
filter = [ "a/.*/" ]
|
||||
|
||||
# Exclude the cdrom drive
|
||||
# filter = [ "r|/dev/cdrom|" ]
|
||||
|
||||
# When testing I like to work with just loopback devices:
|
||||
# filter = [ "a/loop/", "r/.*/" ]
|
||||
|
||||
# Or maybe all loops and ide drives except hdc:
|
||||
# filter =[ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
|
||||
|
||||
# Use anchors if you want to be really specific
|
||||
# filter = [ "a|^/dev/hda8$|", "r/.*/" ]
|
||||
|
||||
# Since "filter" is often overriden from command line, it is not suitable
|
||||
# for system-wide device filtering (udev rules, lvmetad). To hide devices
|
||||
# from LVM-specific udev processing and/or from lvmetad, you need to set
|
||||
# global_filter. The syntax is the same as for normal "filter"
|
||||
# above. Devices that fail the global_filter are not even opened by LVM.
|
||||
|
||||
# global_filter = []
|
||||
|
||||
# The results of the filtering are cached on disk to avoid
|
||||
# rescanning dud devices (which can take a very long time).
|
||||
# By default this cache is stored in the /etc/lvm/cache directory
|
||||
# in a file called '.cache'.
|
||||
# It is safe to delete the contents: the tools regenerate it.
|
||||
# (The old setting 'cache' is still respected if neither of
|
||||
# these new ones is present.)
|
||||
# N.B. If obtain_device_list_from_udev is set to 1 the list of
|
||||
# devices is instead obtained from udev and any existing .cache
|
||||
# file is removed.
|
||||
cache_dir = "/etc/lvm/cache"
|
||||
cache_file_prefix = ""
|
||||
|
||||
# You can turn off writing this cache file by setting this to 0.
|
||||
write_cache_state = 1
|
||||
|
||||
# Advanced settings.
|
||||
|
||||
# List of pairs of additional acceptable block device types found
|
||||
# in /proc/devices with maximum (non-zero) number of partitions.
|
||||
# types = [ "fd", 16 ]
|
||||
|
||||
# If sysfs is mounted (2.6 kernels) restrict device scanning to
|
||||
# the block devices it believes are valid.
|
||||
# 1 enables; 0 disables.
|
||||
sysfs_scan = 1
|
||||
|
||||
# By default, LVM2 will ignore devices used as component paths
|
||||
# of device-mapper multipath devices.
|
||||
# 1 enables; 0 disables.
|
||||
multipath_component_detection = 1
|
||||
|
||||
# By default, LVM2 will ignore devices used as components of
|
||||
# software RAID (md) devices by looking for md superblocks.
|
||||
# 1 enables; 0 disables.
|
||||
md_component_detection = 1
|
||||
|
||||
# By default, if a PV is placed directly upon an md device, LVM2
|
||||
# will align its data blocks with the md device's stripe-width.
|
||||
# 1 enables; 0 disables.
|
||||
md_chunk_alignment = 1
|
||||
|
||||
# Default alignment of the start of a data area in MB. If set to 0,
|
||||
# a value of 64KB will be used. Set to 1 for 1MiB, 2 for 2MiB, etc.
|
||||
# default_data_alignment = 1
|
||||
|
||||
# By default, the start of a PV's data area will be a multiple of
|
||||
# the 'minimum_io_size' or 'optimal_io_size' exposed in sysfs.
|
||||
# - minimum_io_size - the smallest request the device can perform
|
||||
# w/o incurring a read-modify-write penalty (e.g. MD's chunk size)
|
||||
# - optimal_io_size - the device's preferred unit of receiving I/O
|
||||
# (e.g. MD's stripe width)
|
||||
# minimum_io_size is used if optimal_io_size is undefined (0).
|
||||
# If md_chunk_alignment is enabled, that detects the optimal_io_size.
|
||||
# This setting takes precedence over md_chunk_alignment.
|
||||
# 1 enables; 0 disables.
|
||||
data_alignment_detection = 1
|
||||
|
||||
# Alignment (in KB) of start of data area when creating a new PV.
|
||||
# md_chunk_alignment and data_alignment_detection are disabled if set.
|
||||
# Set to 0 for the default alignment (see: data_alignment_default)
|
||||
# or page size, if larger.
|
||||
data_alignment = 0
|
||||
|
||||
# By default, the start of the PV's aligned data area will be shifted by
|
||||
# the 'alignment_offset' exposed in sysfs. This offset is often 0 but
|
||||
# may be non-zero; e.g.: certain 4KB sector drives that compensate for
|
||||
# windows partitioning will have an alignment_offset of 3584 bytes
|
||||
# (sector 7 is the lowest aligned logical block, the 4KB sectors start
|
||||
# at LBA -1, and consequently sector 63 is aligned on a 4KB boundary).
|
||||
# But note that pvcreate --dataalignmentoffset will skip this detection.
|
||||
# 1 enables; 0 disables.
|
||||
data_alignment_offset_detection = 1
|
||||
|
||||
# If, while scanning the system for PVs, LVM2 encounters a device-mapper
|
||||
# device that has its I/O suspended, it waits for it to become accessible.
|
||||
# Set this to 1 to skip such devices. This should only be needed
|
||||
# in recovery situations.
|
||||
ignore_suspended_devices = 0
|
||||
|
||||
# During each LVM operation errors received from each device are counted.
|
||||
# If the counter of a particular device exceeds the limit set here, no
|
||||
# further I/O is sent to that device for the remainder of the respective
|
||||
# operation. Setting the parameter to 0 disables the counters altogether.
|
||||
disable_after_error_count = 0
|
||||
|
||||
# Allow use of pvcreate --uuid without requiring --restorefile.
|
||||
require_restorefile_with_uuid = 1
|
||||
|
||||
# Minimum size (in KB) of block devices which can be used as PVs.
|
||||
# In a clustered environment all nodes must use the same value.
|
||||
# Any value smaller than 512KB is ignored.
|
||||
|
||||
# Ignore devices smaller than 2MB such as floppy drives.
|
||||
pv_min_size = 2048
|
||||
|
||||
# The original built-in setting was 512 up to and including version 2.02.84.
|
||||
# pv_min_size = 512
|
||||
|
||||
# Issue discards to a logical volumes's underlying physical volume(s) when
|
||||
# the logical volume is no longer using the physical volumes' space (e.g.
|
||||
# lvremove, lvreduce, etc). Discards inform the storage that a region is
|
||||
# no longer in use. Storage that supports discards advertise the protocol
|
||||
# specific way discards should be issued by the kernel (TRIM, UNMAP, or
|
||||
# WRITE SAME with UNMAP bit set). Not all storage will support or benefit
|
||||
# from discards but SSDs and thinly provisioned LUNs generally do. If set
|
||||
# to 1, discards will only be issued if both the storage and kernel provide
|
||||
# support.
|
||||
# 1 enables; 0 disables.
|
||||
issue_discards = 0
|
||||
}
|
||||
|
||||
# This section allows you to configure the way in which LVM selects
|
||||
# free space for its Logical Volumes.
|
||||
allocation {
|
||||
|
||||
# When searching for free space to extend an LV, the "cling"
|
||||
# allocation policy will choose space on the same PVs as the last
|
||||
# segment of the existing LV. If there is insufficient space and a
|
||||
# list of tags is defined here, it will check whether any of them are
|
||||
# attached to the PVs concerned and then seek to match those PV tags
|
||||
# between existing extents and new extents.
|
||||
# Use the special tag "@*" as a wildcard to match any PV tag.
|
||||
|
||||
# Example: LVs are mirrored between two sites within a single VG.
|
||||
# PVs are tagged with either @site1 or @site2 to indicate where
|
||||
# they are situated.
|
||||
|
||||
# cling_tag_list = [ "@site1", "@site2" ]
|
||||
# cling_tag_list = [ "@*" ]
|
||||
|
||||
# Changes made in version 2.02.85 extended the reach of the 'cling'
|
||||
# policies to detect more situations where data can be grouped
|
||||
# onto the same disks. Set this to 0 to revert to the previous
|
||||
# algorithm.
|
||||
maximise_cling = 1
|
||||
|
||||
# Set to 1 to guarantee that mirror logs will always be placed on
|
||||
# different PVs from the mirror images. This was the default
|
||||
# until version 2.02.85.
|
||||
mirror_logs_require_separate_pvs = 0
|
||||
|
||||
# Set to 1 to guarantee that thin pool metadata will always
|
||||
# be placed on different PVs from the pool data.
|
||||
thin_pool_metadata_require_separate_pvs = 0
|
||||
|
||||
# Specify the minimal chunk size (in KB) for thin pool volumes.
|
||||
# Use of the larger chunk size may improve perfomance for plain
|
||||
# thin volumes, however using them for snapshot volumes is less efficient,
|
||||
# as it consumes more space and takes extra time for copying.
|
||||
# When unset, lvm tries to estimate chunk size starting from 64KB
|
||||
# Supported values are in range from 64 to 1048576.
|
||||
# thin_pool_chunk_size = 64
|
||||
|
||||
# Specify discards behavior of the thin pool volume.
|
||||
# Select one of "ignore", "nopassdown", "passdown"
|
||||
# thin_pool_discards = "passdown"
|
||||
|
||||
# Set to 0, to disable zeroing of thin pool data chunks before their
|
||||
# first use.
|
||||
# N.B. zeroing larger thin pool chunk size degrades performance.
|
||||
# thin_pool_zero = 1
|
||||
}
|
||||
|
||||
# This section that allows you to configure the nature of the
|
||||
# information that LVM2 reports.
|
||||
log {
|
||||
|
||||
# Controls the messages sent to stdout or stderr.
|
||||
# There are three levels of verbosity, 3 being the most verbose.
|
||||
verbose = 0
|
||||
|
||||
# Set to 1 to suppress all non-essential messages from stdout.
|
||||
# This has the same effect as -qq.
|
||||
# When this is set, the following commands still produce output:
|
||||
# dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
|
||||
# pvs, version, vgcfgrestore -l, vgdisplay, vgs.
|
||||
# Non-essential messages are shifted from log level 4 to log level 5
|
||||
# for syslog and lvm2_log_fn purposes.
|
||||
# Any 'yes' or 'no' questions not overridden by other arguments
|
||||
# are suppressed and default to 'no'.
|
||||
silent = 0
|
||||
|
||||
# Should we send log messages through syslog?
|
||||
# 1 is yes; 0 is no.
|
||||
syslog = 1
|
||||
|
||||
# Should we log error and debug messages to a file?
|
||||
# By default there is no log file.
|
||||
#file = "/var/log/lvm2.log"
|
||||
|
||||
# Should we overwrite the log file each time the program is run?
|
||||
# By default we append.
|
||||
overwrite = 0
|
||||
|
||||
# What level of log messages should we send to the log file and/or syslog?
|
||||
# There are 6 syslog-like log levels currently in use - 2 to 7 inclusive.
|
||||
# 7 is the most verbose (LOG_DEBUG).
|
||||
level = 0
|
||||
|
||||
# Format of output messages
|
||||
# Whether or not (1 or 0) to indent messages according to their severity
|
||||
indent = 1
|
||||
|
||||
# Whether or not (1 or 0) to display the command name on each line output
|
||||
command_names = 0
|
||||
|
||||
# A prefix to use before the message text (but after the command name,
|
||||
# if selected). Default is two spaces, so you can see/grep the severity
|
||||
# of each message.
|
||||
prefix = " "
|
||||
|
||||
# To make the messages look similar to the original LVM tools use:
|
||||
# indent = 0
|
||||
# command_names = 1
|
||||
# prefix = " -- "
|
||||
|
||||
# Set this if you want log messages during activation.
|
||||
# Don't use this in low memory situations (can deadlock).
|
||||
# activation = 0
|
||||
}
|
||||
|
||||
# Configuration of metadata backups and archiving. In LVM2 when we
|
||||
# talk about a 'backup' we mean making a copy of the metadata for the
|
||||
# *current* system. The 'archive' contains old metadata configurations.
|
||||
# Backups are stored in a human readeable text format.
|
||||
backup {
|
||||
|
||||
# Should we maintain a backup of the current metadata configuration ?
|
||||
# Use 1 for Yes; 0 for No.
|
||||
# Think very hard before turning this off!
|
||||
backup = 1
|
||||
|
||||
# Where shall we keep it ?
|
||||
# Remember to back up this directory regularly!
|
||||
backup_dir = "/etc/lvm/backup"
|
||||
|
||||
# Should we maintain an archive of old metadata configurations.
|
||||
# Use 1 for Yes; 0 for No.
|
||||
# On by default. Think very hard before turning this off.
|
||||
archive = 1
|
||||
|
||||
# Where should archived files go ?
|
||||
# Remember to back up this directory regularly!
|
||||
archive_dir = "/etc/lvm/archive"
|
||||
|
||||
# What is the minimum number of archive files you wish to keep ?
|
||||
retain_min = 10
|
||||
|
||||
# What is the minimum time you wish to keep an archive file for ?
|
||||
retain_days = 30
|
||||
}
|
||||
|
||||
# Settings for the running LVM2 in shell (readline) mode.
|
||||
shell {
|
||||
|
||||
# Number of lines of history to store in ~/.lvm_history
|
||||
history_size = 100
|
||||
}
|
||||
|
||||
|
||||
# Miscellaneous global LVM2 settings
|
||||
global {
|
||||
|
||||
# The file creation mask for any files and directories created.
|
||||
# Interpreted as octal if the first digit is zero.
|
||||
umask = 077
|
||||
|
||||
# Allow other users to read the files
|
||||
#umask = 022
|
||||
|
||||
# Enabling test mode means that no changes to the on disk metadata
|
||||
# will be made. Equivalent to having the -t option on every
|
||||
# command. Defaults to off.
|
||||
test = 0
|
||||
|
||||
# Default value for --units argument
|
||||
units = "h"
|
||||
|
||||
# Since version 2.02.54, the tools distinguish between powers of
|
||||
# 1024 bytes (e.g. KiB, MiB, GiB) and powers of 1000 bytes (e.g.
|
||||
# KB, MB, GB).
|
||||
# If you have scripts that depend on the old behaviour, set this to 0
|
||||
# temporarily until you update them.
|
||||
si_unit_consistency = 1
|
||||
|
||||
# Whether or not to communicate with the kernel device-mapper.
|
||||
# Set to 0 if you want to use the tools to manipulate LVM metadata
|
||||
# without activating any logical volumes.
|
||||
# If the device-mapper kernel driver is not present in your kernel
|
||||
# setting this to 0 should suppress the error messages.
|
||||
activation = 1
|
||||
|
||||
# If we can't communicate with device-mapper, should we try running
|
||||
# the LVM1 tools?
|
||||
# This option only applies to 2.4 kernels and is provided to help you
|
||||
# switch between device-mapper kernels and LVM1 kernels.
|
||||
# The LVM1 tools need to be installed with .lvm1 suffices
|
||||
# e.g. vgscan.lvm1 and they will stop working after you start using
|
||||
# the new lvm2 on-disk metadata format.
|
||||
# The default value is set when the tools are built.
|
||||
# fallback_to_lvm1 = 0
|
||||
|
||||
# The default metadata format that commands should use - "lvm1" or "lvm2".
|
||||
# The command line override is -M1 or -M2.
|
||||
# Defaults to "lvm2".
|
||||
# format = "lvm2"
|
||||
|
||||
# Location of proc filesystem
|
||||
proc = "/proc"
|
||||
|
||||
# Type of locking to use. Defaults to local file-based locking (1).
|
||||
# Turn locking off by setting to 0 (dangerous: risks metadata corruption
|
||||
# if LVM2 commands get run concurrently).
|
||||
# Type 2 uses the external shared library locking_library.
|
||||
# Type 3 uses built-in clustered locking.
|
||||
# Type 4 uses read-only locking which forbids any operations that might
|
||||
# change metadata.
|
||||
locking_type = 1
|
||||
|
||||
# Set to 0 to fail when a lock request cannot be satisfied immediately.
|
||||
wait_for_locks = 1
|
||||
|
||||
# If using external locking (type 2) and initialisation fails,
|
||||
# with this set to 1 an attempt will be made to use the built-in
|
||||
# clustered locking.
|
||||
# If you are using a customised locking_library you should set this to 0.
|
||||
fallback_to_clustered_locking = 1
|
||||
|
||||
# If an attempt to initialise type 2 or type 3 locking failed, perhaps
|
||||
# because cluster components such as clvmd are not running, with this set
|
||||
# to 1 an attempt will be made to use local file-based locking (type 1).
|
||||
# If this succeeds, only commands against local volume groups will proceed.
|
||||
# Volume Groups marked as clustered will be ignored.
|
||||
fallback_to_local_locking = 1
|
||||
|
||||
# Local non-LV directory that holds file-based locks while commands are
|
||||
# in progress. A directory like /tmp that may get wiped on reboot is OK.
|
||||
locking_dir = "/var/lock/lvm"
|
||||
|
||||
# Whenever there are competing read-only and read-write access requests for
|
||||
# a volume group's metadata, instead of always granting the read-only
|
||||
# requests immediately, delay them to allow the read-write requests to be
|
||||
# serviced. Without this setting, write access may be stalled by a high
|
||||
# volume of read-only requests.
|
||||
# NB. This option only affects locking_type = 1 viz. local file-based
|
||||
# locking.
|
||||
prioritise_write_locks = 1
|
||||
|
||||
# Other entries can go here to allow you to load shared libraries
|
||||
# e.g. if support for LVM1 metadata was compiled as a shared library use
|
||||
# format_libraries = "liblvm2format1.so"
|
||||
# Full pathnames can be given.
|
||||
|
||||
# Search this directory first for shared libraries.
|
||||
# library_dir = "/lib"
|
||||
|
||||
# The external locking library to load if locking_type is set to 2.
|
||||
# locking_library = "liblvm2clusterlock.so"
|
||||
|
||||
# Treat any internal errors as fatal errors, aborting the process that
|
||||
# encountered the internal error. Please only enable for debugging.
|
||||
abort_on_internal_errors = 0
|
||||
|
||||
# Check whether CRC is matching when parsed VG is used multiple times.
|
||||
# This is useful to catch unexpected internal cached volume group
|
||||
# structure modification. Please only enable for debugging.
|
||||
detect_internal_vg_cache_corruption = 0
|
||||
|
||||
# If set to 1, no operations that change on-disk metadata will be permitted.
|
||||
# Additionally, read-only commands that encounter metadata in need of repair
|
||||
# will still be allowed to proceed exactly as if the repair had been
|
||||
# performed (except for the unchanged vg_seqno).
|
||||
# Inappropriate use could mess up your system, so seek advice first!
|
||||
metadata_read_only = 0
|
||||
|
||||
# 'mirror_segtype_default' defines which segtype will be used when the
|
||||
# shorthand '-m' option is used for mirroring. The possible options are:
|
||||
#
|
||||
# "mirror" - The original RAID1 implementation provided by LVM2/DM. It is
|
||||
# characterized by a flexible log solution (core, disk, mirrored)
|
||||
# and by the necessity to block I/O while reconfiguring in the
|
||||
# event of a failure.
|
||||
#
|
||||
# There is an inherent race in the dmeventd failure handling
|
||||
# logic with snapshots of devices using this type of RAID1 that
|
||||
# in the worst case could cause a deadlock.
|
||||
# Ref: https://bugzilla.redhat.com/show_bug.cgi?id=817130#c10
|
||||
#
|
||||
# "raid1" - This implementation leverages MD's RAID1 personality through
|
||||
# device-mapper. It is characterized by a lack of log options.
|
||||
# (A log is always allocated for every device and they are placed
|
||||
# on the same device as the image - no separate devices are
|
||||
# required.) This mirror implementation does not require I/O
|
||||
# to be blocked in the kernel in the event of a failure.
|
||||
# This mirror implementation is not cluster-aware and cannot be
|
||||
# used in a shared (active/active) fashion in a cluster.
|
||||
#
|
||||
# Specify the '--type <mirror|raid1>' option to override this default
|
||||
# setting.
|
||||
mirror_segtype_default = "mirror"
|
||||
|
||||
# The default format for displaying LV names in lvdisplay was changed
|
||||
# in version 2.02.89 to show the LV name and path separately.
|
||||
# Previously this was always shown as /dev/vgname/lvname even when that
|
||||
# was never a valid path in the /dev filesystem.
|
||||
# Set to 1 to reinstate the previous format.
|
||||
#
|
||||
# lvdisplay_shows_full_device_path = 0
|
||||
|
||||
# Whether to use (trust) a running instance of lvmetad. If this is set to
|
||||
# 0, all commands fall back to the usual scanning mechanisms. When set to 1
|
||||
# *and* when lvmetad is running (it is not auto-started), the volume group
|
||||
# metadata and PV state flags are obtained from the lvmetad instance and no
|
||||
# scanning is done by the individual commands. In a setup with lvmetad,
|
||||
# lvmetad udev rules *must* be set up for LVM to work correctly. Without
|
||||
# proper udev rules, all changes in block device configuration will be
|
||||
# *ignored* until a manual 'pvscan --cache' is performed.
|
||||
#
|
||||
# If lvmetad has been running while use_lvmetad was 0, it MUST be stopped
|
||||
# before changing use_lvmetad to 1 and started again afterwards.
|
||||
use_lvmetad = 0
|
||||
|
||||
# Full path of the utility called to check that a thin metadata device
|
||||
# is in a state that allows it to be used.
|
||||
# Each time a thin pool needs to be activated or after it is deactivated
|
||||
# this utility is executed. The activation will only proceed if the utility
|
||||
# has an exit status of 0.
|
||||
# Set to "" to skip this check. (Not recommended.)
|
||||
# The thin tools are available as part of the device-mapper-persistent-data
|
||||
# package from https://github.com/jthornber/thin-provisioning-tools.
|
||||
#
|
||||
thin_check_executable = "/usr/sbin/thin_check"
|
||||
|
||||
# String with options passed with thin_check command. By default,
|
||||
# option '-q' is for quiet output.
|
||||
thin_check_options = [ "-q" ]
|
||||
|
||||
# If set, given features are not used by thin driver.
|
||||
# This can be helpful not just for testing, but i.e. allows to avoid
|
||||
# using problematic implementation of some thin feature.
|
||||
# Features:
|
||||
# block_size
|
||||
# discards
|
||||
# discards_non_power_2
|
||||
#
|
||||
# thin_disabled_features = [ "discards", "block_size" ]
|
||||
}
|
||||
|
||||
activation {
|
||||
# Set to 1 to perform internal checks on the operations issued to
|
||||
# libdevmapper. Useful for debugging problems with activation.
|
||||
# Some of the checks may be expensive, so it's best to use this
|
||||
# only when there seems to be a problem.
|
||||
checks = 0
|
||||
|
||||
# Set to 0 to disable udev synchronisation (if compiled into the binaries).
|
||||
# Processes will not wait for notification from udev.
|
||||
# They will continue irrespective of any possible udev processing
|
||||
# in the background. You should only use this if udev is not running
|
||||
# or has rules that ignore the devices LVM2 creates.
|
||||
# The command line argument --nodevsync takes precedence over this setting.
|
||||
# If set to 1 when udev is not running, and there are LVM2 processes
|
||||
# waiting for udev, run 'dmsetup udevcomplete_all' manually to wake them up.
|
||||
udev_sync = 1
|
||||
|
||||
# Set to 0 to disable the udev rules installed by LVM2 (if built with
|
||||
# --enable-udev_rules). LVM2 will then manage the /dev nodes and symlinks
|
||||
# for active logical volumes directly itself.
|
||||
# N.B. Manual intervention may be required if this setting is changed
|
||||
# while any logical volumes are active.
|
||||
udev_rules = 1
|
||||
|
||||
# Set to 1 for LVM2 to verify operations performed by udev. This turns on
|
||||
# additional checks (and if necessary, repairs) on entries in the device
|
||||
# directory after udev has completed processing its events.
|
||||
# Useful for diagnosing problems with LVM2/udev interactions.
|
||||
verify_udev_operations = 0
|
||||
|
||||
# If set to 1 and if deactivation of an LV fails, perhaps because
|
||||
# a process run from a quick udev rule temporarily opened the device,
|
||||
# retry the operation for a few seconds before failing.
|
||||
retry_deactivation = 1
|
||||
|
||||
# How to fill in missing stripes if activating an incomplete volume.
|
||||
# Using "error" will make inaccessible parts of the device return
|
||||
# I/O errors on access. You can instead use a device path, in which
|
||||
# case, that device will be used to in place of missing stripes.
|
||||
# But note that using anything other than "error" with mirrored
|
||||
# or snapshotted volumes is likely to result in data corruption.
|
||||
missing_stripe_filler = "error"
|
||||
|
||||
# The linear target is an optimised version of the striped target
|
||||
# that only handles a single stripe. Set this to 0 to disable this
|
||||
# optimisation and always use the striped target.
|
||||
use_linear_target = 1
|
||||
|
||||
# How much stack (in KB) to reserve for use while devices suspended
|
||||
# Prior to version 2.02.89 this used to be set to 256KB
|
||||
reserved_stack = 64
|
||||
|
||||
# How much memory (in KB) to reserve for use while devices suspended
|
||||
reserved_memory = 8192
|
||||
|
||||
# Nice value used while devices suspended
|
||||
process_priority = -18
|
||||
|
||||
# If volume_list is defined, each LV is only activated if there is a
|
||||
# match against the list.
|
||||
# "vgname" and "vgname/lvname" are matched exactly.
|
||||
# "@tag" matches any tag set in the LV or VG.
|
||||
# "@*" matches if any tag defined on the host is also set in the LV or VG
|
||||
#
|
||||
# volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
|
||||
|
||||
# If auto_activation_volume_list is defined, each LV that is to be
|
||||
# activated is checked against the list while using the autoactivation
|
||||
# option (--activate ay/-a ay), and if it matches, it is activated.
|
||||
# "vgname" and "vgname/lvname" are matched exactly.
|
||||
# "@tag" matches any tag set in the LV or VG.
|
||||
# "@*" matches if any tag defined on the host is also set in the LV or VG
|
||||
#
|
||||
# auto_activation_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
|
||||
|
||||
# If read_only_volume_list is defined, each LV that is to be activated
|
||||
# is checked against the list, and if it matches, it as activated
|
||||
# in read-only mode. (This overrides '--permission rw' stored in the
|
||||
# metadata.)
|
||||
# "vgname" and "vgname/lvname" are matched exactly.
|
||||
# "@tag" matches any tag set in the LV or VG.
|
||||
# "@*" matches if any tag defined on the host is also set in the LV or VG
|
||||
#
|
||||
# read_only_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
|
||||
|
||||
# Size (in KB) of each copy operation when mirroring
|
||||
mirror_region_size = 512
|
||||
|
||||
# Setting to use when there is no readahead value stored in the metadata.
|
||||
#
|
||||
# "none" - Disable readahead.
|
||||
# "auto" - Use default value chosen by kernel.
|
||||
readahead = "auto"
|
||||
|
||||
# 'raid_fault_policy' defines how a device failure in a RAID logical
|
||||
# volume is handled. This includes logical volumes that have the following
|
||||
# segment types: raid1, raid4, raid5*, and raid6*.
|
||||
#
|
||||
# In the event of a failure, the following policies will determine what
|
||||
# actions are performed during the automated response to failures (when
|
||||
# dmeventd is monitoring the RAID logical volume) and when 'lvconvert' is
|
||||
# called manually with the options '--repair' and '--use-policies'.
|
||||
#
|
||||
# "warn" - Use the system log to warn the user that a device in the RAID
|
||||
# logical volume has failed. It is left to the user to run
|
||||
# 'lvconvert --repair' manually to remove or replace the failed
|
||||
# device. As long as the number of failed devices does not
|
||||
# exceed the redundancy of the logical volume (1 device for
|
||||
# raid4/5, 2 for raid6, etc) the logical volume will remain
|
||||
# usable.
|
||||
#
|
||||
# "allocate" - Attempt to use any extra physical volumes in the volume
|
||||
# group as spares and replace faulty devices.
|
||||
#
|
||||
raid_fault_policy = "warn"
|
||||
|
||||
# 'mirror_image_fault_policy' and 'mirror_log_fault_policy' define
|
||||
# how a device failure affecting a mirror (of "mirror" segment type) is
|
||||
# handled. A mirror is composed of mirror images (copies) and a log.
|
||||
# A disk log ensures that a mirror does not need to be re-synced
|
||||
# (all copies made the same) every time a machine reboots or crashes.
|
||||
#
|
||||
# In the event of a failure, the specified policy will be used to determine
|
||||
# what happens. This applies to automatic repairs (when the mirror is being
|
||||
# monitored by dmeventd) and to manual lvconvert --repair when
|
||||
# --use-policies is given.
|
||||
#
|
||||
# "remove" - Simply remove the faulty device and run without it. If
|
||||
# the log device fails, the mirror would convert to using
|
||||
# an in-memory log. This means the mirror will not
|
||||
# remember its sync status across crashes/reboots and
|
||||
# the entire mirror will be re-synced. If a
|
||||
# mirror image fails, the mirror will convert to a
|
||||
# non-mirrored device if there is only one remaining good
|
||||
# copy.
|
||||
#
|
||||
# "allocate" - Remove the faulty device and try to allocate space on
|
||||
# a new device to be a replacement for the failed device.
|
||||
# Using this policy for the log is fast and maintains the
|
||||
# ability to remember sync state through crashes/reboots.
|
||||
# Using this policy for a mirror device is slow, as it
|
||||
# requires the mirror to resynchronize the devices, but it
|
||||
# will preserve the mirror characteristic of the device.
|
||||
# This policy acts like "remove" if no suitable device and
|
||||
# space can be allocated for the replacement.
|
||||
#
|
||||
# "allocate_anywhere" - Not yet implemented. Useful to place the log device
|
||||
# temporarily on same physical volume as one of the mirror
|
||||
# images. This policy is not recommended for mirror devices
|
||||
# since it would break the redundant nature of the mirror. This
|
||||
# policy acts like "remove" if no suitable device and space can
|
||||
# be allocated for the replacement.
|
||||
|
||||
mirror_log_fault_policy = "allocate"
|
||||
mirror_image_fault_policy = "remove"
|
||||
|
||||
# 'snapshot_autoextend_threshold' and 'snapshot_autoextend_percent' define
|
||||
# how to handle automatic snapshot extension. The former defines when the
|
||||
# snapshot should be extended: when its space usage exceeds this many
|
||||
# percent. The latter defines how much extra space should be allocated for
|
||||
# the snapshot, in percent of its current size.
|
||||
#
|
||||
# For example, if you set snapshot_autoextend_threshold to 70 and
|
||||
# snapshot_autoextend_percent to 20, whenever a snapshot exceeds 70% usage,
|
||||
# it will be extended by another 20%. For a 1G snapshot, using up 700M will
|
||||
# trigger a resize to 1.2G. When the usage exceeds 840M, the snapshot will
|
||||
# be extended to 1.44G, and so on.
|
||||
#
|
||||
# Setting snapshot_autoextend_threshold to 100 disables automatic
|
||||
# extensions. The minimum value is 50 (A setting below 50 will be treated
|
||||
# as 50).
|
||||
|
||||
snapshot_autoextend_threshold = 100
|
||||
snapshot_autoextend_percent = 20
|
||||
|
||||
# 'thin_pool_autoextend_threshold' and 'thin_pool_autoextend_percent' define
|
||||
# how to handle automatic pool extension. The former defines when the
|
||||
# pool should be extended: when its space usage exceeds this many
|
||||
# percent. The latter defines how much extra space should be allocated for
|
||||
# the pool, in percent of its current size.
|
||||
#
|
||||
# For example, if you set thin_pool_autoextend_threshold to 70 and
|
||||
# thin_pool_autoextend_percent to 20, whenever a pool exceeds 70% usage,
|
||||
# it will be extended by another 20%. For a 1G pool, using up 700M will
|
||||
# trigger a resize to 1.2G. When the usage exceeds 840M, the pool will
|
||||
# be extended to 1.44G, and so on.
|
||||
#
|
||||
# Setting thin_pool_autoextend_threshold to 100 disables automatic
|
||||
# extensions. The minimum value is 50 (A setting below 50 will be treated
|
||||
# as 50).
|
||||
|
||||
thin_pool_autoextend_threshold = 100
|
||||
thin_pool_autoextend_percent = 20
|
||||
|
||||
# While activating devices, I/O to devices being (re)configured is
|
||||
# suspended, and as a precaution against deadlocks, LVM2 needs to pin
|
||||
# any memory it is using so it is not paged out. Groups of pages that
|
||||
# are known not to be accessed during activation need not be pinned
|
||||
# into memory. Each string listed in this setting is compared against
|
||||
# each line in /proc/self/maps, and the pages corresponding to any
|
||||
# lines that match are not pinned. On some systems locale-archive was
|
||||
# found to make up over 80% of the memory used by the process.
|
||||
# mlock_filter = [ "locale/locale-archive", "gconv/gconv-modules.cache" ]
|
||||
|
||||
# Set to 1 to revert to the default behaviour prior to version 2.02.62
|
||||
# which used mlockall() to pin the whole process's memory while activating
|
||||
# devices.
|
||||
use_mlockall = 0
|
||||
|
||||
# Monitoring is enabled by default when activating logical volumes.
|
||||
# Set to 0 to disable monitoring or use the --ignoremonitoring option.
|
||||
monitoring = 1
|
||||
|
||||
# When pvmove or lvconvert must wait for the kernel to finish
|
||||
# synchronising or merging data, they check and report progress
|
||||
# at intervals of this number of seconds. The default is 15 seconds.
|
||||
# If this is set to 0 and there is only one thing to wait for, there
|
||||
# are no progress reports, but the process is awoken immediately the
|
||||
# operation is complete.
|
||||
polling_interval = 15
|
||||
}
|
||||
|
||||
|
||||
####################
|
||||
# Advanced section #
|
||||
####################
|
||||
|
||||
# Metadata settings
|
||||
#
|
||||
# metadata {
|
||||
# Default number of copies of metadata to hold on each PV. 0, 1 or 2.
|
||||
# You might want to override it from the command line with 0
|
||||
# when running pvcreate on new PVs which are to be added to large VGs.
|
||||
|
||||
# pvmetadatacopies = 1
|
||||
|
||||
# Default number of copies of metadata to maintain for each VG.
|
||||
# If set to a non-zero value, LVM automatically chooses which of
|
||||
# the available metadata areas to use to achieve the requested
|
||||
# number of copies of the VG metadata. If you set a value larger
|
||||
# than the the total number of metadata areas available then
|
||||
# metadata is stored in them all.
|
||||
# The default value of 0 ("unmanaged") disables this automatic
|
||||
# management and allows you to control which metadata areas
|
||||
# are used at the individual PV level using 'pvchange
|
||||
# --metadataignore y/n'.
|
||||
|
||||
# vgmetadatacopies = 0
|
||||
|
||||
# Approximate default size of on-disk metadata areas in sectors.
|
||||
# You should increase this if you have large volume groups or
|
||||
# you want to retain a large on-disk history of your metadata changes.
|
||||
|
||||
# pvmetadatasize = 255
|
||||
|
||||
# List of directories holding live copies of text format metadata.
|
||||
# These directories must not be on logical volumes!
|
||||
# It's possible to use LVM2 with a couple of directories here,
|
||||
# preferably on different (non-LV) filesystems, and with no other
|
||||
# on-disk metadata (pvmetadatacopies = 0). Or this can be in
|
||||
# addition to on-disk metadata areas.
|
||||
# The feature was originally added to simplify testing and is not
|
||||
# supported under low memory situations - the machine could lock up.
|
||||
#
|
||||
# Never edit any files in these directories by hand unless you
|
||||
# you are absolutely sure you know what you are doing! Use
|
||||
# the supplied toolset to make changes (e.g. vgcfgrestore).
|
||||
|
||||
# dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ]
|
||||
#}
|
||||
|
||||
# Event daemon
|
||||
#
|
||||
dmeventd {
|
||||
# mirror_library is the library used when monitoring a mirror device.
|
||||
#
|
||||
# "libdevmapper-event-lvm2mirror.so" attempts to recover from
|
||||
# failures. It removes failed devices from a volume group and
|
||||
# reconfigures a mirror as necessary. If no mirror library is
|
||||
# provided, mirrors are not monitored through dmeventd.
|
||||
|
||||
mirror_library = "libdevmapper-event-lvm2mirror.so"
|
||||
|
||||
# snapshot_library is the library used when monitoring a snapshot device.
|
||||
#
|
||||
# "libdevmapper-event-lvm2snapshot.so" monitors the filling of
|
||||
# snapshots and emits a warning through syslog when the use of
|
||||
# the snapshot exceeds 80%. The warning is repeated when 85%, 90% and
|
||||
# 95% of the snapshot is filled.
|
||||
|
||||
snapshot_library = "libdevmapper-event-lvm2snapshot.so"
|
||||
|
||||
# thin_library is the library used when monitoring a thin device.
|
||||
#
|
||||
# "libdevmapper-event-lvm2thin.so" monitors the filling of
|
||||
# pool and emits a warning through syslog when the use of
|
||||
# the pool exceeds 80%. The warning is repeated when 85%, 90% and
|
||||
# 95% of the pool is filled.
|
||||
|
||||
thin_library = "libdevmapper-event-lvm2thin.so"
|
||||
|
||||
# Full path of the dmeventd binary.
|
||||
#
|
||||
# executable = "/sbin/dmeventd"
|
||||
}
|
@ -0,0 +1,129 @@
|
||||
# This file contains wsrep-related mysqld options. It should be included
|
||||
# in the main MySQL configuration file.
|
||||
#
|
||||
# Options that need to be customized:
|
||||
# - wsrep_provider
|
||||
# - wsrep_cluster_address
|
||||
# - wsrep_sst_auth
|
||||
# The rest of defaults should work out of the box.
|
||||
|
||||
##
|
||||
## mysqld options _MANDATORY_ for correct opration of the cluster
|
||||
##
|
||||
[mysqld]
|
||||
|
||||
# (This must be substituted by wsrep_format)
|
||||
binlog_format=ROW
|
||||
|
||||
# Currently only InnoDB storage engine is supported
|
||||
default-storage-engine=innodb
|
||||
|
||||
# to avoid issues with 'bulk mode inserts' using autoinc
|
||||
innodb_autoinc_lock_mode=2
|
||||
|
||||
# This is a must for paralell applying
|
||||
innodb_locks_unsafe_for_binlog=1
|
||||
|
||||
# Query Cache is not supported with wsrep
|
||||
query_cache_size=0
|
||||
query_cache_type=0
|
||||
|
||||
# Override bind-address
|
||||
# In some systems bind-address defaults to 127.0.0.1, and with mysqldump SST
|
||||
# it will have (most likely) disastrous consequences on donor node
|
||||
bind-address=192.168.0.2
|
||||
port=3307
|
||||
|
||||
max_connections=2048
|
||||
|
||||
##
|
||||
## WSREP options
|
||||
##
|
||||
# Full path to wsrep provider library or 'none'
|
||||
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
|
||||
|
||||
# Provider specific configuration options
|
||||
# See http://www.codership.com/wiki/doku.php?id=faq
|
||||
|
||||
wsrep_provider_options="pc.ignore_sb = no;ist.recv_addr=192.168.0.2;gmcast.listen_addr=tcp://192.168.0.2:4567"
|
||||
|
||||
# Logical cluster name. Should be the same for all nodes.
|
||||
wsrep_cluster_name="openstack"
|
||||
|
||||
wsrep_cluster_address="gcomm://192.168.0.4:4567,192.168.0.3:4567"
|
||||
|
||||
# Human-readable node name (non-unique). Hostname by default.
|
||||
#wsrep_node_name=
|
||||
|
||||
# Base replication <address|hostname>[:port] of the node.
|
||||
# The values supplied will be used as defaults for state transfer receiving,
|
||||
# listening ports and so on. Default: address of the first network interface.
|
||||
wsrep_node_address=192.168.0.2
|
||||
|
||||
# Address for incoming client connections. Autodetect by default.
|
||||
#wsrep_node_incoming_address=
|
||||
|
||||
# How many threads will process writesets from other nodes
|
||||
wsrep_slave_threads=32
|
||||
|
||||
# DBUG options for wsrep provider
|
||||
#wsrep_dbug_option
|
||||
|
||||
# Generate fake primary keys for non-PK tables (required for multi-master
|
||||
# and parallel applying operation)
|
||||
wsrep_certify_nonPK=1
|
||||
|
||||
# Maximum number of rows in write set
|
||||
wsrep_max_ws_rows=131072
|
||||
|
||||
# Maximum size of write set
|
||||
wsrep_max_ws_size=1073741824
|
||||
|
||||
# to enable debug level logging, set this to 1
|
||||
wsrep_debug=0
|
||||
|
||||
# convert locking sessions into transactions
|
||||
wsrep_convert_LOCK_to_trx=0
|
||||
|
||||
# how many times to retry deadlocked autocommits
|
||||
wsrep_retry_autocommit=1
|
||||
|
||||
# change auto_increment_increment and auto_increment_offset automatically
|
||||
wsrep_auto_increment_control=1
|
||||
|
||||
# retry autoinc insert, which failed for duplicate key error
|
||||
wsrep_drupal_282555_workaround=0
|
||||
|
||||
# enable "strictly synchronous" semantics for read operations
|
||||
wsrep_causal_reads=0
|
||||
|
||||
# Command to call when node status or cluster membership changes.
|
||||
# Will be passed all or some of the following options:
|
||||
# --status - new status of this node
|
||||
# --uuid - UUID of the cluster
|
||||
# --primary - whether the component is primary or not ("yes"/"no")
|
||||
# --members - comma-separated list of members
|
||||
# --index - index of this node in the list
|
||||
wsrep_notify_cmd=
|
||||
|
||||
##
|
||||
## WSREP State Transfer options
|
||||
##
|
||||
|
||||
# State Snapshot Transfer method
|
||||
wsrep_sst_method=mysqldump
|
||||
|
||||
# Address on THIS node to receive SST at. DON'T SET IT TO DONOR ADDRESS!!!
|
||||
# (SST method dependent. Defaults to the first IP of the first interface)
|
||||
wsrep_sst_receive_address=192.168.0.2:3307
|
||||
|
||||
# SST authentication string. This will be used to send SST to joining nodes.
|
||||
# Depends on SST method. For mysqldump method it is root:<root password>
|
||||
wsrep_sst_auth=wsrep_sst:password
|
||||
|
||||
# Desired SST donor name.
|
||||
#wsrep_sst_donor=
|
||||
|
||||
# Protocol version to use
|
||||
# wsrep_protocol_version=
|
||||
skip-name-resolve
|
102
config_samples/fuel_web/golden_fuelweb/cnt1/nova/api-paste.ini
Normal file
102
config_samples/fuel_web/golden_fuelweb/cnt1/nova/api-paste.ini
Normal file
@ -0,0 +1,102 @@
|
||||
############
|
||||
# Metadata #
|
||||
############
|
||||
[composite:metadata]
|
||||
use = egg:Paste#urlmap
|
||||
/: meta
|
||||
|
||||
[pipeline:meta]
|
||||
pipeline = ec2faultwrap logrequest metaapp
|
||||
|
||||
[app:metaapp]
|
||||
paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
|
||||
|
||||
#######
|
||||
# EC2 #
|
||||
#######
|
||||
|
||||
[composite:ec2]
|
||||
use = egg:Paste#urlmap
|
||||
/services/Cloud: ec2cloud
|
||||
|
||||
[composite:ec2cloud]
|
||||
use = call:nova.api.auth:pipeline_factory
|
||||
noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
|
||||
keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
|
||||
|
||||
[filter:ec2faultwrap]
|
||||
paste.filter_factory = nova.api.ec2:FaultWrapper.factory
|
||||
|
||||
[filter:logrequest]
|
||||
paste.filter_factory = nova.api.ec2:RequestLogging.factory
|
||||
|
||||
[filter:ec2lockout]
|
||||
paste.filter_factory = nova.api.ec2:Lockout.factory
|
||||
|
||||
[filter:ec2keystoneauth]
|
||||
paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
|
||||
|
||||
[filter:ec2noauth]
|
||||
paste.filter_factory = nova.api.ec2:NoAuth.factory
|
||||
|
||||
[filter:cloudrequest]
|
||||
controller = nova.api.ec2.cloud.CloudController
|
||||
paste.filter_factory = nova.api.ec2:Requestify.factory
|
||||
|
||||
[filter:authorizer]
|
||||
paste.filter_factory = nova.api.ec2:Authorizer.factory
|
||||
|
||||
[filter:validator]
|
||||
paste.filter_factory = nova.api.ec2:Validator.factory
|
||||
|
||||
[app:ec2executor]
|
||||
paste.app_factory = nova.api.ec2:Executor.factory
|
||||
|
||||
#############
|
||||
# Openstack #
|
||||
#############
|
||||
|
||||
[composite:osapi_compute]
|
||||
use = call:nova.api.openstack.urlmap:urlmap_factory
|
||||
/: oscomputeversions
|
||||
/v1.1: openstack_compute_api_v2
|
||||
/v2: openstack_compute_api_v2
|
||||
|
||||
[composite:openstack_compute_api_v2]
|
||||
use = call:nova.api.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
|
||||
|
||||
[filter:faultwrap]
|
||||
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
|
||||
|
||||
[filter:noauth]
|
||||
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
|
||||
|
||||
[filter:ratelimit]
|
||||
paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
|
||||
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory
|
||||
|
||||
[app:osapi_compute_app_v2]
|
||||
paste.app_factory = nova.api.openstack.compute:APIRouter.factory
|
||||
|
||||
[pipeline:oscomputeversions]
|
||||
pipeline = faultwrap oscomputeversionapp
|
||||
|
||||
[app:oscomputeversionapp]
|
||||
paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
|
||||
|
||||
##########
|
||||
# Shared #
|
||||
##########
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||
# Workaround for https://bugs.launchpad.net/nova/+bug/1154809
|
||||
auth_version = v2.0
|
@ -0,0 +1,21 @@
|
||||
[loggers]
|
||||
keys = root
|
||||
|
||||
[handlers]
|
||||
keys = syslog
|
||||
|
||||
[formatters]
|
||||
keys = default
|
||||
|
||||
[logger_root]
|
||||
level = DEBUG
|
||||
handlers = syslog
|
||||
qualname = nova
|
||||
|
||||
[handler_syslog]
|
||||
class = handlers.SysLogHandler
|
||||
args = ('/dev/log', handlers.SysLogHandler.LOG_LOCAL0)
|
||||
formatter = default
|
||||
|
||||
[formatter_default]
|
||||
format = nova %(name)s %(levelname)s %(message)s
|
76
config_samples/fuel_web/golden_fuelweb/cnt1/nova/nova.conf
Normal file
76
config_samples/fuel_web/golden_fuelweb/cnt1/nova/nova.conf
Normal file
@ -0,0 +1,76 @@
|
||||
[DEFAULT]
|
||||
logdir = /var/log/nova
|
||||
state_path = /var/lib/nova
|
||||
lock_path = /var/lib/nova/tmp
|
||||
volumes_dir = /etc/nova/volumes
|
||||
dhcpbridge = /usr/bin/nova-dhcpbridge
|
||||
dhcpbridge_flagfile = /etc/nova/nova.conf
|
||||
force_dhcp_release = true
|
||||
injected_network_template = /usr/share/nova/interfaces.template
|
||||
libvirt_nonblocking = True
|
||||
libvirt_inject_partition = -1
|
||||
network_manager = nova.network.manager.VlanManager
|
||||
iscsi_helper = tgtadm
|
||||
sql_connection = mysql://nova:jMsyf1wU@192.168.0.7/nova
|
||||
compute_driver = libvirt.LibvirtDriver
|
||||
firewall_driver = nova.virt.libvirt.firewall.IptablesFirewallDriver
|
||||
rpc_backend = nova.rpc.impl_kombu
|
||||
rootwrap_config = /etc/nova/rootwrap.conf
|
||||
debug=true
|
||||
rabbit_hosts=192.168.0.7:5672
|
||||
quota_volumes=100
|
||||
osapi_compute_listen=192.168.0.2
|
||||
ec2_listen=192.168.0.2
|
||||
quota_max_injected_file_content_bytes=102400
|
||||
glance_api_servers=240.0.1.7:9292
|
||||
novncproxy_host=240.0.1.7
|
||||
rabbit_userid=nova
|
||||
rabbit_ha_queues=True
|
||||
rabbit_password=zrk9MfKV
|
||||
verbose=true
|
||||
auto_assign_floating_ip=True
|
||||
logging_default_format_string=%(levelname)s %(name)s [-] %(instance)s %(message)s
|
||||
quota_cores=100
|
||||
logging_context_format_string=%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s] %(instance)s %(message)s
|
||||
enabled_apis=ec2,osapi_compute
|
||||
rabbit_virtual_host=/
|
||||
image_service=nova.image.glance.GlanceImageService
|
||||
volume_api_class=nova.volume.cinder.API
|
||||
use_cow_images=true
|
||||
quota_max_injected_files=50
|
||||
novncproxy_port=6080
|
||||
log_config=/etc/nova/logging.conf
|
||||
rabbit_port=5672
|
||||
vlan_start=103
|
||||
compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
|
||||
quota_max_injected_file_path_bytes=4096
|
||||
api_paste_config=/etc/nova/api-paste.ini
|
||||
quota_floating_ips=100
|
||||
multi_host=True
|
||||
public_interface=eth0.100
|
||||
start_guests_on_host_boot=true
|
||||
service_down_time=60
|
||||
syslog_log_facility=LOCAL0
|
||||
quota_gigabytes=1000
|
||||
quota_instances=100
|
||||
osapi_volume_listen=192.168.0.2
|
||||
metadata_listen=192.168.0.2
|
||||
auth_strategy=keystone
|
||||
quota_metadata_items=1024
|
||||
fixed_range=10.0.0.0/24
|
||||
use_syslog=True
|
||||
dhcp_domain=novalocal
|
||||
allow_resize_to_same_host=True
|
||||
vlan_interface=eth0
|
||||
memcached_servers=controller-15:11211,controller-14:11211,controller-13:11211
|
||||
|
||||
[keystone_authtoken]
|
||||
admin_tenant_name = services
|
||||
admin_user = nova
|
||||
admin_password = Zc1VlBC9
|
||||
auth_host = 192.168.0.7
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
signing_dir = /tmp/keystone-signing-nova
|
||||
signing_dirname=/tmp/keystone-signing-nova
|
||||
|
161
config_samples/fuel_web/golden_fuelweb/cnt1/nova/policy.json
Normal file
161
config_samples/fuel_web/golden_fuelweb/cnt1/nova/policy.json
Normal file
@ -0,0 +1,161 @@
|
||||
{
|
||||
"context_is_admin": "role:admin",
|
||||
"admin_or_owner": "is_admin:True or project_id:%(project_id)s",
|
||||
"default": "rule:admin_or_owner",
|
||||
|
||||
|
||||
"compute:create": "",
|
||||
"compute:create:attach_network": "",
|
||||
"compute:create:attach_volume": "",
|
||||
"compute:create:forced_host": "is_admin:True",
|
||||
"compute:get_all": "",
|
||||
"compute:get_all_tenants": "",
|
||||
|
||||
|
||||
"admin_api": "is_admin:True",
|
||||
"compute_extension:accounts": "rule:admin_api",
|
||||
"compute_extension:admin_actions": "rule:admin_api",
|
||||
"compute_extension:admin_actions:pause": "rule:admin_or_owner",
|
||||
"compute_extension:admin_actions:unpause": "rule:admin_or_owner",
|
||||
"compute_extension:admin_actions:suspend": "rule:admin_or_owner",
|
||||
"compute_extension:admin_actions:resume": "rule:admin_or_owner",
|
||||
"compute_extension:admin_actions:lock": "rule:admin_api",
|
||||
"compute_extension:admin_actions:unlock": "rule:admin_api",
|
||||
"compute_extension:admin_actions:resetNetwork": "rule:admin_api",
|
||||
"compute_extension:admin_actions:injectNetworkInfo": "rule:admin_api",
|
||||
"compute_extension:admin_actions:createBackup": "rule:admin_or_owner",
|
||||
"compute_extension:admin_actions:migrateLive": "rule:admin_api",
|
||||
"compute_extension:admin_actions:resetState": "rule:admin_api",
|
||||
"compute_extension:admin_actions:migrate": "rule:admin_api",
|
||||
"compute_extension:aggregates": "rule:admin_api",
|
||||
"compute_extension:agents": "rule:admin_api",
|
||||
"compute_extension:attach_interfaces": "",
|
||||
"compute_extension:baremetal_nodes": "rule:admin_api",
|
||||
"compute_extension:cells": "rule:admin_api",
|
||||
"compute_extension:certificates": "",
|
||||
"compute_extension:cloudpipe": "rule:admin_api",
|
||||
"compute_extension:cloudpipe_update": "rule:admin_api",
|
||||
"compute_extension:console_output": "",
|
||||
"compute_extension:consoles": "",
|
||||
"compute_extension:coverage_ext": "rule:admin_api",
|
||||
"compute_extension:createserverext": "",
|
||||
"compute_extension:deferred_delete": "",
|
||||
"compute_extension:disk_config": "",
|
||||
"compute_extension:evacuate": "rule:admin_api",
|
||||
"compute_extension:extended_server_attributes": "rule:admin_api",
|
||||
"compute_extension:extended_status": "",
|
||||
"compute_extension:extended_availability_zone": "",
|
||||
"compute_extension:extended_ips": "",
|
||||
"compute_extension:fixed_ips": "rule:admin_api",
|
||||
"compute_extension:flavor_access": "",
|
||||
"compute_extension:flavor_disabled": "",
|
||||
"compute_extension:flavor_rxtx": "",
|
||||
"compute_extension:flavor_swap": "",
|
||||
"compute_extension:flavorextradata": "",
|
||||
"compute_extension:flavorextraspecs:index": "",
|
||||
"compute_extension:flavorextraspecs:show": "",
|
||||
"compute_extension:flavorextraspecs:create": "rule:admin_api",
|
||||
"compute_extension:flavorextraspecs:update": "rule:admin_api",
|
||||
"compute_extension:flavorextraspecs:delete": "rule:admin_api",
|
||||
"compute_extension:flavormanage": "rule:admin_api",
|
||||
"compute_extension:floating_ip_dns": "",
|
||||
"compute_extension:floating_ip_pools": "",
|
||||
"compute_extension:floating_ips": "",
|
||||
"compute_extension:floating_ips_bulk": "rule:admin_api",
|
||||
"compute_extension:fping": "",
|
||||
"compute_extension:fping:all_tenants": "rule:admin_api",
|
||||
"compute_extension:hide_server_addresses": "is_admin:False",
|
||||
"compute_extension:hosts": "rule:admin_api",
|
||||
"compute_extension:hypervisors": "rule:admin_api",
|
||||
"compute_extension:image_size": "",
|
||||
"compute_extension:instance_actions": "",
|
||||
"compute_extension:instance_actions:events": "rule:admin_api",
|
||||
"compute_extension:instance_usage_audit_log": "rule:admin_api",
|
||||
"compute_extension:keypairs": "",
|
||||
"compute_extension:multinic": "",
|
||||
"compute_extension:networks": "rule:admin_api",
|
||||
"compute_extension:networks:view": "",
|
||||
"compute_extension:networks_associate": "rule:admin_api",
|
||||
"compute_extension:quotas:show": "",
|
||||
"compute_extension:quotas:update": "rule:admin_api",
|
||||
"compute_extension:quota_classes": "",
|
||||
"compute_extension:rescue": "",
|
||||
"compute_extension:security_group_default_rules": "rule:admin_api",
|
||||
"compute_extension:security_groups": "",
|
||||
"compute_extension:server_diagnostics": "rule:admin_api",
|
||||
"compute_extension:server_password": "",
|
||||
"compute_extension:services": "rule:admin_api",
|
||||
"compute_extension:simple_tenant_usage:show": "rule:admin_or_owner",
|
||||
"compute_extension:simple_tenant_usage:list": "rule:admin_api",
|
||||
"compute_extension:users": "rule:admin_api",
|
||||
"compute_extension:virtual_interfaces": "",
|
||||
"compute_extension:virtual_storage_arrays": "",
|
||||
"compute_extension:volumes": "",
|
||||
"compute_extension:volume_attachments:index": "",
|
||||
"compute_extension:volume_attachments:show": "",
|
||||
"compute_extension:volume_attachments:create": "",
|
||||
"compute_extension:volume_attachments:delete": "",
|
||||
"compute_extension:volumetypes": "",
|
||||
"compute_extension:availability_zone:list": "",
|
||||
"compute_extension:availability_zone:detail": "rule:admin_api",
|
||||
|
||||
|
||||
"volume:create": "",
|
||||
"volume:get_all": "",
|
||||
"volume:get_volume_metadata": "",
|
||||
"volume:get_snapshot": "",
|
||||
"volume:get_all_snapshots": "",
|
||||
|
||||
|
||||
"volume_extension:types_manage": "rule:admin_api",
|
||||
"volume_extension:types_extra_specs": "rule:admin_api",
|
||||
"volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
|
||||
"volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
|
||||
"volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
|
||||
|
||||
|
||||
"network:get_all": "",
|
||||
"network:get": "",
|
||||
"network:create": "",
|
||||
"network:delete": "",
|
||||
"network:associate": "",
|
||||
"network:disassociate": "",
|
||||
"network:get_vifs_by_instance": "",
|
||||
"network:allocate_for_instance": "",
|
||||
"network:deallocate_for_instance": "",
|
||||
"network:validate_networks": "",
|
||||
"network:get_instance_uuids_by_ip_filter": "",
|
||||
"network:get_instance_id_by_floating_address": "",
|
||||
"network:setup_networks_on_host": "",
|
||||
"network:get_backdoor_port": "",
|
||||
|
||||
"network:get_floating_ip": "",
|
||||
"network:get_floating_ip_pools": "",
|
||||
"network:get_floating_ip_by_address": "",
|
||||
"network:get_floating_ips_by_project": "",
|
||||
"network:get_floating_ips_by_fixed_address": "",
|
||||
"network:allocate_floating_ip": "",
|
||||
"network:deallocate_floating_ip": "",
|
||||
"network:associate_floating_ip": "",
|
||||
"network:disassociate_floating_ip": "",
|
||||
"network:release_floating_ip": "",
|
||||
"network:migrate_instance_start": "",
|
||||
"network:migrate_instance_finish": "",
|
||||
|
||||
"network:get_fixed_ip": "",
|
||||
"network:get_fixed_ip_by_address": "",
|
||||
"network:add_fixed_ip_to_instance": "",
|
||||
"network:remove_fixed_ip_from_instance": "",
|
||||
"network:add_network_to_project": "",
|
||||
"network:get_instance_nw_info": "",
|
||||
|
||||
"network:get_dns_domains": "",
|
||||
"network:add_dns_entry": "",
|
||||
"network:modify_dns_entry": "",
|
||||
"network:delete_dns_entry": "",
|
||||
"network:get_dns_entries_by_address": "",
|
||||
"network:get_dns_entries_by_name": "",
|
||||
"network:create_private_dns_domain": "",
|
||||
"network:create_public_dns_domain": "",
|
||||
"network:delete_dns_domain": ""
|
||||
}
|
4
config_samples/fuel_web/golden_fuelweb/cnt1/nova/release
Normal file
4
config_samples/fuel_web/golden_fuelweb/cnt1/nova/release
Normal file
@ -0,0 +1,4 @@
|
||||
[Nova]
|
||||
vendor = Red Hat Inc.
|
||||
product = OpenStack Nova
|
||||
package = mira.2
|
@ -0,0 +1,27 @@
|
||||
# Configuration for nova-rootwrap
|
||||
# This file should be owned by (and only-writeable by) the root user
|
||||
|
||||
[DEFAULT]
|
||||
# List of directories to load filter definitions from (separated by ',').
|
||||
# These directories MUST all be only writeable by root !
|
||||
filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap
|
||||
|
||||
# List of directories to search executables in, in case filters do not
|
||||
# explicitely specify a full path (separated by ',')
|
||||
# If not specified, defaults to system PATH environment variable.
|
||||
# These directories MUST all be only writeable by root !
|
||||
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
|
||||
|
||||
# Enable logging to syslog
|
||||
# Default value is False
|
||||
use_syslog=False
|
||||
|
||||
# Which syslog facility to use.
|
||||
# Valid values include auth, authpriv, syslog, user0, user1...
|
||||
# Default value is 'syslog'
|
||||
syslog_log_facility=syslog
|
||||
|
||||
# Which messages to log.
|
||||
# INFO means log all usage
|
||||
# ERROR means only log unsuccessful attempts
|
||||
syslog_log_level=ERROR
|
@ -0,0 +1,146 @@
|
||||
import os
|
||||
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
DEBUG = False
|
||||
TEMPLATE_DEBUG = DEBUG
|
||||
|
||||
|
||||
|
||||
# Specify a regular expression to validate user passwords.
|
||||
# HORIZON_CONFIG = {
|
||||
# "password_validator": {
|
||||
# "regex": '.*',
|
||||
# "help_text": _("Your password does not meet the requirements.")
|
||||
# }
|
||||
# }
|
||||
|
||||
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# Note: You should change this value
|
||||
SECRET_KEY = 'dummy_secret_key'
|
||||
|
||||
# We recommend you use memcached for development; otherwise after every reload
|
||||
# of the django development server, you will have to login again. To use
|
||||
# memcached set CACHE_BACKED to something like 'memcached://127.0.0.1:11211/'
|
||||
CACHE_BACKEND = 'memcached://controller-15:11211;controller-14:11211;controller-13:11211/'
|
||||
|
||||
# Send email to the console by default
|
||||
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
||||
# Or send them to /dev/null
|
||||
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
|
||||
|
||||
# Configure these for your outgoing email host
|
||||
# EMAIL_HOST = 'smtp.my-company.com'
|
||||
# EMAIL_PORT = 25
|
||||
# EMAIL_HOST_USER = 'djangomail'
|
||||
# EMAIL_HOST_PASSWORD = 'top-secret!'
|
||||
|
||||
# For multiple regions uncomment this configuration, and add (endpoint, title).
|
||||
# AVAILABLE_REGIONS = [
|
||||
# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
|
||||
# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
|
||||
# ]
|
||||
|
||||
OPENSTACK_HOST = "192.168.0.7"
|
||||
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
|
||||
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member"
|
||||
|
||||
# Disable SSL certificate checks (useful for self-signed certificates):
|
||||
OPENSTACK_SSL_NO_VERIFY = True
|
||||
|
||||
# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
|
||||
# capabilities of the auth backend for Keystone.
|
||||
# If Keystone has been configured to use LDAP as the auth backend then set
|
||||
# can_edit_user to False and name to 'ldap'.
|
||||
#
|
||||
# TODO(tres): Remove these once Keystone has an API to identify auth backend.
|
||||
OPENSTACK_KEYSTONE_BACKEND = {
|
||||
'name': 'native',
|
||||
'can_edit_user': True
|
||||
}
|
||||
|
||||
OPENSTACK_HYPERVISOR_FEATURES = {
|
||||
'can_set_mount_point': True
|
||||
}
|
||||
|
||||
# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
|
||||
# in the Keystone service catalog. Use this setting when Horizon is running
|
||||
# external to the OpenStack environment. The default is 'internalURL'.
|
||||
#OPENSTACK_ENDPOINT_TYPE = "publicURL"
|
||||
|
||||
# Include the SWIFT interface extension in Horizon
|
||||
SWIFT_ENABLED = True
|
||||
SWIFT_PAGINATE_LIMIT = 100
|
||||
|
||||
# The number of Swift containers and objects to display on a single page before
|
||||
# providing a paging element (a "more" link) to paginate results.
|
||||
API_RESULT_LIMIT = 1000
|
||||
API_RESULT_PAGE_SIZE = 20
|
||||
|
||||
|
||||
# If you have external monitoring links, eg:
|
||||
EXTERNAL_MONITORING = [ ]
|
||||
LOGGING = {
|
||||
'version': 1,
|
||||
# When set to True this will disable all logging except
|
||||
# for loggers specified in this configuration dictionary. Note that
|
||||
# if nothing is specified here and disable_existing_loggers is True,
|
||||
# django.db.backends will still log unless it is disabled explicitly.
|
||||
'disable_existing_loggers': False,
|
||||
'handlers': {
|
||||
'null': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'django.utils.log.NullHandler',
|
||||
},
|
||||
'console': {
|
||||
# Set the level to "DEBUG" for verbose output logging.
|
||||
'level': 'INFO',
|
||||
'class': 'logging.StreamHandler',
|
||||
},
|
||||
'file': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.FileHandler',
|
||||
'filename': '/var/log/horizon/horizon.log'
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
# Logging from django.db.backends is VERY verbose, send to null
|
||||
# by default.
|
||||
'django.db.backends': {
|
||||
'handlers': ['null'],
|
||||
'propagate': False,
|
||||
},
|
||||
'horizon': {
|
||||
'handlers': ['file'],
|
||||
'propagate': False,
|
||||
},
|
||||
'openstack_dashboard': {
|
||||
'handlers': ['file'],
|
||||
'propagate': False,
|
||||
},
|
||||
'novaclient': {
|
||||
'handlers': ['file'],
|
||||
'propagate': False,
|
||||
},
|
||||
'glanceclient': {
|
||||
'handlers': ['file'],
|
||||
'propagate': False,
|
||||
},
|
||||
'keystoneclient': {
|
||||
'handlers': ['file'],
|
||||
'propagate': False,
|
||||
},
|
||||
'nose.plugins.manager': {
|
||||
'handlers': ['file'],
|
||||
'propagate': False,
|
||||
}
|
||||
}
|
||||
}
|
||||
LOGIN_URL='/dashboard/auth/login/'
|
||||
LOGIN_REDIRECT_URL='/dashboard'
|
||||
|
||||
# The Ubuntu package includes pre-compressed JS and compiled CSS to allow
|
||||
# offline compression by default. To enable online compression, install
|
||||
# the node-less package and enable the following option.
|
||||
COMPRESS_OFFLINE = False
|
@ -0,0 +1,6 @@
|
||||
RABBITMQ_NODE_PORT=5673
|
||||
RABBITMQ_NODE_IP_ADDRESS=192.168.0.2
|
||||
RABBITMQ_SERVER_ERL_ARGS="+K true +A30 +P 1048576 \
|
||||
-kernel inet_default_connect_options [{nodelay,true}] \
|
||||
-kernel inet_dist_listen_min 41055 \
|
||||
-kernel inet_dist_listen_max 41055"
|
@ -0,0 +1,6 @@
|
||||
% This file managed by Puppet 2.7.19
|
||||
% Template Path: rabbitmq/templates/rabbitmq.config
|
||||
[
|
||||
{rabbit, [{cluster_nodes, ['rabbit@controller-15', 'rabbit@controller-14', 'rabbit@controller-13']}]}
|
||||
].
|
||||
% EOF
|
@ -0,0 +1,22 @@
|
||||
[DEFAULT]
|
||||
devices = /srv/node
|
||||
bind_ip = 172.16.0.2
|
||||
bind_port = 6002
|
||||
mount_check = false
|
||||
user = swift
|
||||
log_facility = LOG_LOCAL2
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = account-server
|
||||
|
||||
[app:account-server]
|
||||
use = egg:swift#account
|
||||
|
||||
[account-replicator]
|
||||
concurrency = 1
|
||||
|
||||
[account-auditor]
|
||||
|
||||
[account-reaper]
|
||||
concurrency = 1
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,24 @@
|
||||
[DEFAULT]
|
||||
devices = /srv/node
|
||||
bind_ip = 172.16.0.2
|
||||
bind_port = 6001
|
||||
mount_check = false
|
||||
user = swift
|
||||
log_facility = LOG_LOCAL2
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = container-server
|
||||
|
||||
[app:container-server]
|
||||
use = egg:swift#container
|
||||
|
||||
[container-replicator]
|
||||
concurrency = 1
|
||||
|
||||
[container-updater]
|
||||
concurrency = 1
|
||||
|
||||
[container-auditor]
|
||||
|
||||
[container-sync]
|
@ -0,0 +1,18 @@
|
||||
[DEFAULT]
|
||||
bind_ip = 127.0.0.1
|
||||
bind_port = 6001
|
||||
workers = 2
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = container-server
|
||||
|
||||
[app:container-server]
|
||||
use = egg:swift#container
|
||||
|
||||
[container-replicator]
|
||||
|
||||
[container-updater]
|
||||
|
||||
[container-auditor]
|
||||
|
||||
[container-sync]
|
Binary file not shown.
Binary file not shown.
@ -0,0 +1,17 @@
|
||||
[DEFAULT]
|
||||
|
||||
[object-expirer]
|
||||
# auto_create_account_prefix = .
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors cache proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
memcache_servers = 127.0.0.1:11211
|
||||
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
@ -0,0 +1,22 @@
|
||||
[DEFAULT]
|
||||
devices = /srv/node
|
||||
bind_ip = 172.16.0.2
|
||||
bind_port = 6000
|
||||
mount_check = false
|
||||
user = swift
|
||||
log_facility = LOG_LOCAL2
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = object-server
|
||||
|
||||
[app:object-server]
|
||||
use = egg:swift#object
|
||||
|
||||
[object-replicator]
|
||||
concurrency = 1
|
||||
|
||||
[object-updater]
|
||||
concurrency = 1
|
||||
|
||||
[object-auditor]
|
BIN
config_samples/fuel_web/golden_fuelweb/cnt1/swift/object.builder
Normal file
BIN
config_samples/fuel_web/golden_fuelweb/cnt1/swift/object.builder
Normal file
Binary file not shown.
BIN
config_samples/fuel_web/golden_fuelweb/cnt1/swift/object.ring.gz
Normal file
BIN
config_samples/fuel_web/golden_fuelweb/cnt1/swift/object.ring.gz
Normal file
Binary file not shown.
@ -0,0 +1,65 @@
|
||||
# This file is managed by puppet. Do not edit
|
||||
#
|
||||
[DEFAULT]
|
||||
bind_ip = 192.168.0.2
|
||||
bind_port = 8080
|
||||
workers = 1
|
||||
user = swift
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors healthcheck cache ratelimit swift3 s3token authtoken keystone proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
allow_account_management = true
|
||||
account_autocreate = true
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
memcache_servers = controller-13:11211,controller-14:11211,controller-15:11211
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
||||
|
||||
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
|
||||
[filter:ratelimit]
|
||||
use = egg:swift#ratelimit
|
||||
clock_accuracy = 1000
|
||||
max_sleep_time_seconds = 60
|
||||
log_sleep_time_seconds = 0
|
||||
rate_buffer_seconds = 5
|
||||
account_ratelimit = 0
|
||||
|
||||
[filter:swift3]
|
||||
use = egg:swift3#swift3
|
||||
|
||||
[filter:s3token]
|
||||
paste.filter_factory = keystone.middleware.s3_token:filter_factory
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
auth_host = 192.168.0.7
|
||||
|
||||
[filter:keystone]
|
||||
use = egg:swift#keystoneauth
|
||||
operator_roles = admin, SwiftOperator
|
||||
is_admin = true
|
||||
cache = swift.cache
|
||||
|
||||
|
||||
#
|
||||
# used to specify connection information to keystone
|
||||
#
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||
auth_host = 192.168.0.7
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
auth_uri = http://192.168.0.7:35357
|
||||
# if its defined
|
||||
admin_tenant_name = services
|
||||
admin_user = swift
|
||||
admin_password = 0moSL8AJ
|
||||
delay_auth_decision = 0
|
||||
signing_dir = /etc/swift
|
@ -0,0 +1,2 @@
|
||||
[swift-hash]
|
||||
swift_hash_path_suffix = swift_secret
|
@ -0,0 +1,52 @@
|
||||
#############
|
||||
# OpenStack #
|
||||
#############
|
||||
|
||||
[composite:osapi_volume]
|
||||
use = call:cinder.api:root_app_factory
|
||||
/: apiversions
|
||||
/v1: openstack_volume_api_v1
|
||||
/v2: openstack_volume_api_v2
|
||||
|
||||
[composite:openstack_volume_api_v1]
|
||||
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth apiv1
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext apiv1
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv1
|
||||
|
||||
[composite:openstack_volume_api_v2]
|
||||
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth apiv2
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext apiv2
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv2
|
||||
|
||||
[filter:faultwrap]
|
||||
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
|
||||
|
||||
[filter:noauth]
|
||||
paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
|
||||
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
|
||||
|
||||
[app:apiv1]
|
||||
paste.app_factory = cinder.api.v1.router:APIRouter.factory
|
||||
|
||||
[app:apiv2]
|
||||
paste.app_factory = cinder.api.v2.router:APIRouter.factory
|
||||
|
||||
[pipeline:apiversions]
|
||||
pipeline = faultwrap osvolumeversionapp
|
||||
|
||||
[app:osvolumeversionapp]
|
||||
paste.app_factory = cinder.api.versions:Versions.factory
|
||||
|
||||
##########
|
||||
# Shared #
|
||||
##########
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user