Update v1 to 1.17.1
This commit is contained in:
commit
4ec728d693
15
.travis.yml
Normal file
15
.travis.yml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
language: python
|
||||||
|
python:
|
||||||
|
- '2.7'
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- v1
|
||||||
|
script: 'true'
|
||||||
|
deploy:
|
||||||
|
provider: pypi
|
||||||
|
user: f3flight
|
||||||
|
password:
|
||||||
|
secure: EqBazqty5cIAB5jACoFoI2j4lZmjMkb+4Rp7wJZCU8EuLotW/cMRTgIhuTlvkFvMVHUl2f6kBitovrxTcxRAVPAXtUimlcOL4e/YfE+zoqIGlnU1F3xymNKLhfTxYU8buQS3OhVIKPfPEG9lm+2TDGXnqa7A1tkb8N8r+SbhTuioxjn0cLfCfT9E0resAHJV14TSUdn7AsZC3Jb3OzZlhIURpOmZsPEMaiEXeAGWiMRVCE92PfFvRIgz44JPYNYTXsCC+XyHn3WpP0zFgqUEg9DqHMUEMMbmu0ocwMJ4m5ZTufJGqKK6QBfElsj04RIeQ3g3xsBA03c/6z4j1CyglVNqjcgJCTAs47ySGhdFrFtWgwTvDnLDzmF5M02yC3ZYxeVx3P1I0KkFA+uGaCMoT0Wi6CyyFmjdDEAcPeYTlGNgaztyMN2X6bfk0XD2fkLgDT56viF1+6ycMT1prtg3+s62ZDFqULX78w3u8k3uxRqOIhnvyFRw+19MjeLNdxpMl/NC/mOcDquswD2wwAu/1KvSD3JmQWgP8RqPS50gaNuBFcd0gN0qJjqlJCVdhipoWyfNegKp++nhT5vPszg93W8UB1w+0dUkQ5t1BM+gNYM0xHdJQMZ/ZB8qM47izTNVzgMc7fv8TouAWbg2mfTQIwx5zEYopnYsNpT7F9BmXgs=
|
||||||
|
notifications:
|
||||||
|
slack:
|
||||||
|
secure: CIqbLpqxjoWmrK+ApZ01K4pORoOBg4Cg62VZMb1cUICMtbAKYhZuoW5lX3KkBGSKtLt3qd0vxXk7rRy1+GiZBmnaTTiPf501Se/j7jsJ/b2DkTT4thFpbsynEFdxrC9LHBbjnbzfMcoCHIV5Hw8LylZdWO7ees6FQF20/N6bBv0nTEXtzo3iiMejj+E6nrWW5uqAqf60BgLd7M7mOudrjEE9sWS8i+3cZnQED+ljY+2SUAlJW+IP4tc/aYjGzAtpkNniR0uI3TMgNNFyQ/GD8FU8uLu5fBdbVrz6wuLUpJUFItiuZC/ouJpCW9+L1wwM67Xn1++DabmL/7wdL7Jagl8KBs8pP0GZl35JqHSu3B3AcdHSAISLyGTGveHIhB47MSqFBTgOlafy2XNf7FxKKhCnRxU+nt5AsNfxiJwQGWNCf7gjPHq93PT45MSqKdilwzrjyeYwraL8e++NrlDavH5rOcZs76NRNfb53NG0tU0wWo5DTDk8GWUMMPvc8w1YJvs/WuZRdrKWsacf7Zm6TSXfNXOnMYBikxQexZSJ+YK1s5ZSjQ8zezu0O7xZp+CLuS0cO5EuExbXIzWN9Ex6Y/8hdY27dg4CgzQ3BCAW8G5GavZhlMiL/EQNb2jP8HoOOARyLN56LMexA/u6lRtQBEz57P6oIBMRoe/U6GNs9zs=
|
@ -2,4 +2,3 @@ recursive-include rq *
|
|||||||
recursive-include doc *
|
recursive-include doc *
|
||||||
include README.md
|
include README.md
|
||||||
include config.yaml
|
include config.yaml
|
||||||
include rq.yaml
|
|
12
config.yaml
12
config.yaml
@ -8,11 +8,12 @@ ssh_opts:
|
|||||||
env_vars:
|
env_vars:
|
||||||
- 'OPENRC=/root/openrc'
|
- 'OPENRC=/root/openrc'
|
||||||
- 'IPTABLES_STR="iptables -nvL"'
|
- 'IPTABLES_STR="iptables -nvL"'
|
||||||
|
- 'LC_ALL="C"'
|
||||||
|
- 'LANG="C"'
|
||||||
# fuel_ip: '127.0.0.1'
|
# fuel_ip: '127.0.0.1'
|
||||||
# fuel_user: 'admin'
|
# fuel_user: 'admin'
|
||||||
# fuel_pass: 'admin'
|
# fuel_pass: 'admin'
|
||||||
rqdir: './rq'
|
rqdir: './rq'
|
||||||
rqfile: './rq.yaml'
|
|
||||||
soft_filter:
|
soft_filter:
|
||||||
status: ['ready']
|
status: ['ready']
|
||||||
online: True
|
online: True
|
||||||
@ -22,16 +23,19 @@ timeout: 15
|
|||||||
compress_timeout: 3600
|
compress_timeout: 3600
|
||||||
logs:
|
logs:
|
||||||
path: '/var/log'
|
path: '/var/log'
|
||||||
exclude: '[-_]\d{8}$|atop[-_]|\.gz$'
|
exclude:
|
||||||
|
- '[-_]\d{8}$|atop[-_]|\.gz$'
|
||||||
# by_roles:
|
# by_roles:
|
||||||
# compute:
|
# compute:
|
||||||
# logs:
|
# logs:
|
||||||
# path: '/var/log'
|
# path: '/var/log'
|
||||||
# include: 'compute'
|
# include:
|
||||||
|
# - 'compute'
|
||||||
# ceph-osd:
|
# ceph-osd:
|
||||||
# logs:
|
# logs:
|
||||||
# path: '/var/log'
|
# path: '/var/log'
|
||||||
# include: 'ceph'
|
# include:
|
||||||
|
# - 'ceph'
|
||||||
# start: '2016-05-05'
|
# start: '2016-05-05'
|
||||||
# by_id:
|
# by_id:
|
||||||
# 1:
|
# 1:
|
||||||
|
@ -13,8 +13,23 @@ Some of the parameters available in configuration file:
|
|||||||
* **fuel_ip** the IP address of the master node in the environment
|
* **fuel_ip** the IP address of the master node in the environment
|
||||||
* **fuel_user** username to use for accessing Nailgun API
|
* **fuel_user** username to use for accessing Nailgun API
|
||||||
* **fuel_pass** password to access Nailgun API
|
* **fuel_pass** password to access Nailgun API
|
||||||
* **rqdir** the path of *rqdir*, the directory containing scripts to execute and filelists to pass to rsync
|
* **fuel_tenant** Fuel Keystone tenant to use when accessing Nailgun API
|
||||||
* **out_dir** directory to store output data
|
* **fuel_port** port to use when connecting to Fuel Nailgun API
|
||||||
|
* **fuel_keystone_port** port to use when getting a Keystone token to access Nailgun API
|
||||||
|
* **fuelclient** True/False - whether to use fuelclient library to access Nailgun API
|
||||||
|
* **fuel_skip_proxy** True/False - ignore ``http(s)_proxy`` environment variables when connecting to Nailgun API
|
||||||
|
* **rqdir** the path to the directory containing rqfiles, scripts to execute, and filelists to pass to rsync
|
||||||
|
* **rqfile** - list of dicts:
|
||||||
|
* **file** - path to an rqfile containing actions and/or other configuration parameters
|
||||||
|
* **default** - should always be False, except when included default.yaml is used. This option is used to make **logs_no_default** work
|
||||||
|
* **logs_days** how many past days of logs to collect. This option will set **start** parameter for each **logs** action if not defined in it.
|
||||||
|
* **logs_speed_limit** True/False - enable speed limiting of log transfers (total transfer speed limit, not per-node)
|
||||||
|
* **logs_speed_default** Mbit/s - used when autodetect fails
|
||||||
|
* **logs_speed** Mbit/s - manually specify max bandwidth
|
||||||
|
* **logs_size_coefficient** a float value used to check local free space; 'logs size * coefficient' must be > free space; values lower than 0.3 are not recommended and will likely cause local disk fillup during log collection
|
||||||
|
* **do_print_results** print outputs of commands and scripts to stdout
|
||||||
|
* **clean** True/False - erase previous results in outdir and archive_dir dir, if any
|
||||||
|
* **outdir** directory to store output data
|
||||||
* **archive_dir** directory to put resulting archives into
|
* **archive_dir** directory to put resulting archives into
|
||||||
* **timeout** timeout for SSH commands and scripts in seconds
|
* **timeout** timeout for SSH commands and scripts in seconds
|
||||||
|
|
||||||
@ -35,7 +50,7 @@ The following actions are available for definition:
|
|||||||
* **INFO**: Scripts are not copied to the destination system - script code is passed as stdin to `bash -s` executed via ssh or locally. Therefore passing parameters to scripts is not supported (unlike cmds where you can write any Bash string). You can use variables in your scripts instead. Scripts are executed in the following order: all scripts without variables, sorted by their full filename, then all scripts with variables, also sorted by full filename. Therefore if the order matters, it's better to put all scripts into the same folder and name them according to the order in which you want them executed on the same node. Mind that scripts with variables are executed after all scripts without variables. If you need to mix scripts with variables and without and maintain order, just use dict structure for all scripts, and set `null` as the value for those which do not need variables.
|
* **INFO**: Scripts are not copied to the destination system - script code is passed as stdin to `bash -s` executed via ssh or locally. Therefore passing parameters to scripts is not supported (unlike cmds where you can write any Bash string). You can use variables in your scripts instead. Scripts are executed in the following order: all scripts without variables, sorted by their full filename, then all scripts with variables, also sorted by full filename. Therefore if the order matters, it's better to put all scripts into the same folder and name them according to the order in which you want them executed on the same node. Mind that scripts with variables are executed after all scripts without variables. If you need to mix scripts with variables and without and maintain order, just use dict structure for all scripts, and set `null` as the value for those which do not need variables.
|
||||||
* **files** - a list of filenames to collect. passed to ``scp``. Supports wildcards.
|
* **files** - a list of filenames to collect. passed to ``scp``. Supports wildcards.
|
||||||
* **filelists** - a list of filelist filenames located on a local system. Filelist is a text file containing files and directories to collect, passed to rsync. Does not support wildcards. If the filename does not contain path separator, the filelist is expected to be located inside ``rqdir/filelists``. Otherwise the provided path is used to read the filelist.
|
* **filelists** - a list of filelist filenames located on a local system. Filelist is a text file containing files and directories to collect, passed to rsync. Does not support wildcards. If the filename does not contain path separator, the filelist is expected to be located inside ``rqdir/filelists``. Otherwise the provided path is used to read the filelist.
|
||||||
* **log_files**
|
* **logs**
|
||||||
* **path** - base path to scan for logs
|
* **path** - base path to scan for logs
|
||||||
* **include** - regexp string to match log files against for inclusion (if not set = include all)
|
* **include** - regexp string to match log files against for inclusion (if not set = include all)
|
||||||
* **exclude** - regexp string to match log files against. Excludes matched files from collection.
|
* **exclude** - regexp string to match log files against. Excludes matched files from collection.
|
||||||
@ -82,6 +97,16 @@ It is possible to define special **by_<parameter-name>** dicts in config to (re)
|
|||||||
|
|
||||||
In this example for any controller node, cmds setting will be reset to the value above. For nodes without controller role, default (none) values will be used.
|
In this example for any controller node, cmds setting will be reset to the value above. For nodes without controller role, default (none) values will be used.
|
||||||
|
|
||||||
|
Negative matches are possible via **no_** prefix:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
by_roles:
|
||||||
|
no_fuel:
|
||||||
|
cmds: {'check-uptime': 'uptime'}
|
||||||
|
|
||||||
|
In this example **uptime** command will be executed on all nodes except Fuel server.
|
||||||
|
|
||||||
It is also possible to define a special **once_by_<parameter-name>** which works similarly, but will only result in attributes being assigned to a single (first in the list) matching node. Example:
|
It is also possible to define a special **once_by_<parameter-name>** which works similarly, but will only result in attributes being assigned to a single (first in the list) matching node. Example:
|
||||||
|
|
||||||
::
|
::
|
||||||
@ -129,8 +154,7 @@ Configuration is assembled and applied in a specific order:
|
|||||||
3. **rqfile**, if defined (default - ``rq.yaml``), is converted and injected into the configuration. At this stage the configuration is in its final form.
|
3. **rqfile**, if defined (default - ``rq.yaml``), is converted and injected into the configuration. At this stage the configuration is in its final form.
|
||||||
4. for every node, configuration is applied, except ``once_by_`` directives:
|
4. for every node, configuration is applied, except ``once_by_`` directives:
|
||||||
1. first the top-level attributes are set
|
1. first the top-level attributes are set
|
||||||
2. then ``by_<attribute-name>`` parameters except ``by_id`` are iterated to override or append(accumulate) the attributes
|
2. then ``by_<attribute-name>`` parameters are iterated to override settings and append(accumulate) actions
|
||||||
3. then ``by_id`` is iterated to override any matching attributes, redefining what was set before
|
|
||||||
5. finally ``once_by_`<attribute-name>`` parameters are applied - only for one matching node for any set of matching values. This is useful, for example, if you want a specific file or command from only a single node matching a specific role, like running ``nova list`` only on one controller.
|
5. finally ``once_by_`<attribute-name>`` parameters are applied - only for one matching node for any set of matching values. This is useful, for example, if you want a specific file or command from only a single node matching a specific role, like running ``nova list`` only on one controller.
|
||||||
|
|
||||||
Once you are done with the configuration, you might want to familiarize yourself with :doc:`Usage </usage>`.
|
Once you are done with the configuration, you might want to familiarize yourself with :doc:`Usage </usage>`.
|
||||||
|
0
requirements.txt
Normal file
0
requirements.txt
Normal file
@ -1,3 +1,13 @@
|
|||||||
|
files:
|
||||||
|
__default: ['/etc/resolv.conf', '/etc/mcollective', '/etc/astute.yaml', '/root/anaconda*', '/root/*.log', '/root/*.ks', '/var/lib/puppet/state/last_run_summary.yaml', '/var/run/pcap_dir', '/var/lib/cloud']
|
||||||
|
by_roles:
|
||||||
|
controller: ['/etc/apache2', '/etc/keystone', '/etc/swift']
|
||||||
|
fuel: ['/etc/astute', '/etc/dnsmasq.conf', '/etc/centos-release', '/etc/fuel_build_number', '/etc/fuel_build_id', '/etc/cobbler', '/etc/cobbler.dnsmasq.conf', '/root/*.log']
|
||||||
|
ceph: ['/root/.ceph*']
|
||||||
|
no_fuel: '/etc/hiera'
|
||||||
|
by_os_platform:
|
||||||
|
ubuntu: ['/etc/lsb-release', '/etc/network']
|
||||||
|
centos: ['/etc/redhat-release', '/etc/sysconfig']
|
||||||
filelists:
|
filelists:
|
||||||
by_roles:
|
by_roles:
|
||||||
fuel: [etc-nailgun, etc-fuel]
|
fuel: [etc-nailgun, etc-fuel]
|
||||||
@ -22,54 +32,65 @@ scripts:
|
|||||||
controller: [nova-manage-vm-list]
|
controller: [nova-manage-vm-list]
|
||||||
'5.0':
|
'5.0':
|
||||||
by_roles:
|
by_roles:
|
||||||
|
fuel: [fuel-docker-ps, fuel-dockerctl-check, postgres-dump-docker]
|
||||||
controller: [nova-manage-vm-list]
|
controller: [nova-manage-vm-list]
|
||||||
'5.0.1':
|
'5.0.1':
|
||||||
by_roles:
|
by_roles:
|
||||||
fuel: [fuel-docker-ps, fuel-dockerctl-check, fuel-docker-db-archive]
|
fuel: [fuel-docker-ps, fuel-dockerctl-check, postgres-dump-docker]
|
||||||
controller: [nova-manage-vm-list]
|
controller: [nova-manage-vm-list]
|
||||||
'5.1':
|
'5.1':
|
||||||
by_roles:
|
by_roles:
|
||||||
|
fuel: [fuel-dockerctl-list, fuel-docker-ps, fuel-dockerctl-check, postgres-dump-docker]
|
||||||
controller: [nova-manage-vm-list]
|
controller: [nova-manage-vm-list]
|
||||||
'5.1.1':
|
'5.1.1':
|
||||||
by_roles:
|
by_roles:
|
||||||
fuel: [fuel-dockerctl-list, fuel-docker-ps, fuel-dockerctl-check, fuel-docker-db-archive]
|
fuel: [fuel-dockerctl-list, fuel-docker-ps, fuel-dockerctl-check, postgres-dump-docker]
|
||||||
controller: [nova-manage-vm-list]
|
controller: [nova-manage-vm-list]
|
||||||
'6.0':
|
'6.0':
|
||||||
by_roles:
|
by_roles:
|
||||||
|
fuel: [fuel-dockerctl-list, fuel-docker-ps, fuel-dockerctl-check, postgres-dump-docker, docker-images]
|
||||||
compute: [ipset-save, ipset-list]
|
compute: [ipset-save, ipset-list]
|
||||||
controller: [ipset-save, ipset-list, nova-manage-vm-list]
|
controller: [ipset-save, ipset-list, nova-manage-vm-list]
|
||||||
'6.1':
|
'6.1':
|
||||||
by_roles:
|
by_roles:
|
||||||
fuel: [fuel-notifications]
|
fuel: [fuel-notifications, fuel-dockerctl-list, fuel-docker-ps, fuel-dockerctl-check, postgres-dump-docker, docker-images]
|
||||||
controller: [nova-manage-vm-list]
|
controller: [nova-manage-vm-list]
|
||||||
'7.0':
|
'7.0':
|
||||||
by_roles:
|
by_roles:
|
||||||
fuel: [fuel-notifications]
|
fuel: [fuel-notifications, fuel-dockerctl-list, fuel-docker-ps, fuel-dockerctl-check, postgres-dump-docker, docker-images]
|
||||||
'8.0':
|
'8.0':
|
||||||
by_roles:
|
by_roles:
|
||||||
fuel: [fuel-notifications]
|
fuel: [fuel-notifications, fuel-dockerctl-list, fuel-docker-ps, fuel-dockerctl-check, postgres-dump-docker, docker-images, fuel-bootstrap-list]
|
||||||
|
'9.0':
|
||||||
|
by_roles:
|
||||||
|
fuel: [fuel-notifications, fuel-postgres-dump, fuel-bootstrap-list, shotgun2-report]
|
||||||
by_roles:
|
by_roles:
|
||||||
fuel: [fuel-release, fuel-task-list, fuel-environment-list]
|
fuel: [fuel-release, fuel-task-list, fuel-environment-list]
|
||||||
cinder: [ovs-vsctl-show, cinder-manage]
|
cinder: [ovs-vsctl-show, cinder-manage]
|
||||||
compute: [compute-iptables-nat, ovs-dump-flows, compute-iptables, ovs-ofctl-show-bridges,
|
compute: [compute-iptables-nat, ovs-dump-flows, compute-iptables, ovs-ofctl-show-bridges,
|
||||||
ovs-vsctl-show]
|
ovs-vsctl-show]
|
||||||
controller: [rabbitmqctl-list-queues, nova-service-list, iptables-namespaces,
|
controller: [rabbitmqctl-list-queues, nova-service-list, iptables-namespaces,
|
||||||
rabbitmqctl-cluster-status, crm-resource-status, ovs-dump-flows, neutron-agent-list,
|
rabbitmqctl-cluster-status, crm-resource-status, pcs-status, ovs-dump-flows, neutron-agent-list,
|
||||||
mysql-status, ceph-mon-status, ovs-ofctl-show-bridges, rabbitmqctl-list-connections,
|
mysql-status, ceph-mon-status, ovs-ofctl-show-bridges, rabbitmqctl-list-connections,
|
||||||
ovs-vsctl-show, rabbitmqctl-report, mysql-size, rabbitmqctl-status, crm-resource-list,
|
ovs-vsctl-show, rabbitmqctl-report, mysql-size, rabbitmqctl-status, crm-resource-list,
|
||||||
cinder-manage]
|
cinder-manage]
|
||||||
mongo: [mongo-replication-status, ipa, mongo-replica-conf, mongo-status, ovs-vsctl-show]
|
mongo: [mongo-replication-status, ipa, mongo-replica-conf, mongo-status, ovs-vsctl-show]
|
||||||
once_by_roles:
|
once_by_roles:
|
||||||
ceph-osd: [ceph-df, ceph-osd-status, ceph-osd-tree, ceph-pg-dump, ovs-vsctl-show,
|
ceph-osd: [ceph-df, ceph-osd-status, ceph-osd-tree, ceph-pg-dump, ovs-vsctl-show,
|
||||||
ceph-health-detail]
|
ceph-health-detail, ceph-health]
|
||||||
controller: [neutron-router-list, neutron-net-list, neutron-subnet-list, keystone-endpoint-list,
|
controller: [neutron-router-list, neutron-net-list, neutron-subnet-list, keystone-endpoint-list,
|
||||||
cinder-list, nova-list, keystone-tenant-list, nova-usage-list,
|
cinder-list, nova-list, keystone-tenant-list, nova-usage-list,
|
||||||
neutron-port-list]
|
neutron-port-list]
|
||||||
by_os_platform:
|
by_os_platform:
|
||||||
ubuntu: [dmesg-t-ubuntu, packages-ubuntu]
|
ubuntu: [dmesg-t-ubuntu, dpkg-l, apt-cache-policy]
|
||||||
centos: [dmesg-centos, packages-centos]
|
centos: [dmesg-centos, yum-list-installed, yum-v-repolist]
|
||||||
__default:
|
__default:
|
||||||
[ip-ne, iptables, ipnetns, ss, ipa, iptables-nat, df-m, services-status, cpuinfo, df-i, ipro]
|
[ip-ne, iptables, ipnetns, ss, ipa, iptables-nat, df-m, services-status, cpuinfo, df-i, ipro, mount, sysctl-a, pvdisplay, vgdisplay, lvdisplay, lsmod, dmidecode, cat-proc-interrupts, arp-an, uname-a, ps-auxwwf, uptime, dmsetup-info, brctl-show, blkid-o-list]
|
||||||
|
logs:
|
||||||
|
__default:
|
||||||
|
path: '/var/log'
|
||||||
|
exclude:
|
||||||
|
- '\.[^12]\.gz$|\.\d{2,}\.gz$'
|
||||||
# cmds:
|
# cmds:
|
||||||
# __default:
|
# __default:
|
||||||
# test:
|
# test:
|
7
rq/neutron.yaml
Normal file
7
rq/neutron.yaml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
files:
|
||||||
|
__default:
|
||||||
|
- '/etc/neutron'
|
||||||
|
logs:
|
||||||
|
__default:
|
||||||
|
path: '/var/log'
|
||||||
|
include: 'neutron'
|
13
rq/nova.yaml
Normal file
13
rq/nova.yaml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
files:
|
||||||
|
__default:
|
||||||
|
- '/etc/nova'
|
||||||
|
- '/etc/libvirt'
|
||||||
|
scripts:
|
||||||
|
controller:
|
||||||
|
- nova-list
|
||||||
|
- nova-service-list
|
||||||
|
- nova-usage-list
|
||||||
|
logs:
|
||||||
|
__default:
|
||||||
|
path: '/var/log'
|
||||||
|
include: '(nova|libvirt|qemu)'
|
1
rq/scripts/apt-cache-policy
Normal file
1
rq/scripts/apt-cache-policy
Normal file
@ -0,0 +1 @@
|
|||||||
|
apt-cache policy
|
1
rq/scripts/arp-an
Normal file
1
rq/scripts/arp-an
Normal file
@ -0,0 +1 @@
|
|||||||
|
arp -an
|
2
rq/scripts/blkid-o-list
Normal file
2
rq/scripts/blkid-o-list
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
blkid -o list | perl -pe 's/[^[:print:]\r\n]//g'
|
||||||
|
# perl cleanup is necessary to workaroud corrupt output of blkid with long mount points (docker mount points have garbage in the end) - this at least prevents our Python code from crashing
|
1
rq/scripts/cat-proc-interrupts
Normal file
1
rq/scripts/cat-proc-interrupts
Normal file
@ -0,0 +1 @@
|
|||||||
|
cat /proc/interrupts
|
1
rq/scripts/ceph-health
Normal file
1
rq/scripts/ceph-health
Normal file
@ -0,0 +1 @@
|
|||||||
|
ceph health
|
1
rq/scripts/ceph-s
Normal file
1
rq/scripts/ceph-s
Normal file
@ -0,0 +1 @@
|
|||||||
|
ceph -s
|
1
rq/scripts/dmidecode
Normal file
1
rq/scripts/dmidecode
Normal file
@ -0,0 +1 @@
|
|||||||
|
dmidecode
|
1
rq/scripts/dmsetup-info
Normal file
1
rq/scripts/dmsetup-info
Normal file
@ -0,0 +1 @@
|
|||||||
|
dmsetup info -c --nameprefixes --noheadings -o blkdevname,subsystem,blkdevs_used,name,uuid
|
1
rq/scripts/docker-images
Normal file
1
rq/scripts/docker-images
Normal file
@ -0,0 +1 @@
|
|||||||
|
docker images
|
1
rq/scripts/dpkg-l
Normal file
1
rq/scripts/dpkg-l
Normal file
@ -0,0 +1 @@
|
|||||||
|
dpkg -l | cat
|
1
rq/scripts/fuel-bootstrap-list
Normal file
1
rq/scripts/fuel-bootstrap-list
Normal file
@ -0,0 +1 @@
|
|||||||
|
fuel-bootstrap list
|
1
rq/scripts/lsmod
Normal file
1
rq/scripts/lsmod
Normal file
@ -0,0 +1 @@
|
|||||||
|
lsmod
|
1
rq/scripts/lvdisplay
Normal file
1
rq/scripts/lvdisplay
Normal file
@ -0,0 +1 @@
|
|||||||
|
lvdisplay
|
1
rq/scripts/mount
Normal file
1
rq/scripts/mount
Normal file
@ -0,0 +1 @@
|
|||||||
|
mount
|
@ -1 +0,0 @@
|
|||||||
time yum list installed
|
|
@ -1 +0,0 @@
|
|||||||
time dpkg -l
|
|
1
rq/scripts/pcs-status
Normal file
1
rq/scripts/pcs-status
Normal file
@ -0,0 +1 @@
|
|||||||
|
pcs status
|
1
rq/scripts/ps-auxwwf
Normal file
1
rq/scripts/ps-auxwwf
Normal file
@ -0,0 +1 @@
|
|||||||
|
ps auxwwf
|
1
rq/scripts/pvdisplay
Normal file
1
rq/scripts/pvdisplay
Normal file
@ -0,0 +1 @@
|
|||||||
|
pvdisplay
|
1
rq/scripts/shotgun2-report
Normal file
1
rq/scripts/shotgun2-report
Normal file
@ -0,0 +1 @@
|
|||||||
|
shotgun2 report
|
1
rq/scripts/sysctl-a
Normal file
1
rq/scripts/sysctl-a
Normal file
@ -0,0 +1 @@
|
|||||||
|
sysctl -a
|
1
rq/scripts/uname-a
Normal file
1
rq/scripts/uname-a
Normal file
@ -0,0 +1 @@
|
|||||||
|
uname -a
|
1
rq/scripts/uptime
Normal file
1
rq/scripts/uptime
Normal file
@ -0,0 +1 @@
|
|||||||
|
uptime
|
1
rq/scripts/vgdisplay
Normal file
1
rq/scripts/vgdisplay
Normal file
@ -0,0 +1 @@
|
|||||||
|
vgdisplay
|
1
rq/scripts/yum-list-installed
Normal file
1
rq/scripts/yum-list-installed
Normal file
@ -0,0 +1 @@
|
|||||||
|
yum list installed
|
1
rq/scripts/yum-v-repolist
Normal file
1
rq/scripts/yum-v-repolist
Normal file
@ -0,0 +1 @@
|
|||||||
|
yum -v repolist
|
8
setup.py
8
setup.py
@ -24,7 +24,7 @@ pname = project_name
|
|||||||
dtm = os.path.join(os.path.abspath(os.sep), 'usr', 'share', pname)
|
dtm = os.path.join(os.path.abspath(os.sep), 'usr', 'share', pname)
|
||||||
rqfiles = [(os.path.join(dtm, root), [os.path.join(root, f) for f in files])
|
rqfiles = [(os.path.join(dtm, root), [os.path.join(root, f) for f in files])
|
||||||
for root, dirs, files in os.walk('rq')]
|
for root, dirs, files in os.walk('rq')]
|
||||||
rqfiles.append((os.path.join(dtm, 'configs'), ['config.yaml', 'rq.yaml']))
|
rqfiles.append((os.path.join(dtm, 'configs'), ['config.yaml']))
|
||||||
package_data = True
|
package_data = True
|
||||||
|
|
||||||
if os.environ.get("READTHEDOCS", False):
|
if os.environ.get("READTHEDOCS", False):
|
||||||
@ -38,9 +38,9 @@ setup(name=pname,
|
|||||||
author_email='dobdin@gmail.com',
|
author_email='dobdin@gmail.com',
|
||||||
license='Apache2',
|
license='Apache2',
|
||||||
url='https://github.com/adobdin/timmy',
|
url='https://github.com/adobdin/timmy',
|
||||||
description = ('Mirantis OpenStack Ansible-like tool for parallel node '
|
description=('Mirantis OpenStack Ansible-like tool for parallel node '
|
||||||
'operations: two-way data transfer, log collection, '
|
'operations: two-way data transfer, log collection, '
|
||||||
'remote command execution'),
|
'remote command execution'),
|
||||||
long_description=open('README.md').read(),
|
long_description=open('README.md').read(),
|
||||||
packages=[pname],
|
packages=[pname],
|
||||||
install_requires=['pyyaml'],
|
install_requires=['pyyaml'],
|
||||||
|
162
timmy/cli.py
162
timmy/cli.py
@ -22,6 +22,7 @@ import sys
|
|||||||
import os
|
import os
|
||||||
from timmy.conf import load_conf
|
from timmy.conf import load_conf
|
||||||
from timmy.tools import interrupt_wrapper
|
from timmy.tools import interrupt_wrapper
|
||||||
|
from timmy.env import version
|
||||||
|
|
||||||
|
|
||||||
def pretty_run(quiet, msg, f, args=[], kwargs={}):
|
def pretty_run(quiet, msg, f, args=[], kwargs={}):
|
||||||
@ -38,12 +39,17 @@ def parse_args():
|
|||||||
parser = argparse.ArgumentParser(description=('Parallel remote command'
|
parser = argparse.ArgumentParser(description=('Parallel remote command'
|
||||||
' execution and file'
|
' execution and file'
|
||||||
' manipulation tool'))
|
' manipulation tool'))
|
||||||
|
parser.add_argument('-V', '--version', action='store_true',
|
||||||
|
help='Print Timmy version and exit.')
|
||||||
parser.add_argument('-c', '--config',
|
parser.add_argument('-c', '--config',
|
||||||
help='Path to a YAML configuration file.')
|
help='Path to a YAML configuration file.')
|
||||||
parser.add_argument('-j', '--nodes-json',
|
parser.add_argument('-j', '--nodes-json',
|
||||||
help=('Path to a json file retrieved via'
|
help=('Path to a json file retrieved via'
|
||||||
' "fuel node --json". Useful to speed up'
|
' "fuel node --json". Useful to speed up'
|
||||||
' initialization, skips "fuel node" call.'))
|
' initialization, skips "fuel node" call.'))
|
||||||
|
parser.add_argument('--fuel-ip', help='fuel ip address')
|
||||||
|
parser.add_argument('--fuel-user', help='fuel username')
|
||||||
|
parser.add_argument('--fuel-pass', help='fuel password')
|
||||||
parser.add_argument('-o', '--dest-file',
|
parser.add_argument('-o', '--dest-file',
|
||||||
help=('Output filename for the archive in tar.gz'
|
help=('Output filename for the archive in tar.gz'
|
||||||
' format for command outputs and collected'
|
' format for command outputs and collected'
|
||||||
@ -57,12 +63,15 @@ def parse_args():
|
|||||||
parser.add_argument('-r', '--role', action='append',
|
parser.add_argument('-r', '--role', action='append',
|
||||||
help=('Can be specified multiple times.'
|
help=('Can be specified multiple times.'
|
||||||
' Run only on the specified role.'))
|
' Run only on the specified role.'))
|
||||||
parser.add_argument('-d', '--days', type=int,
|
parser.add_argument('-i', '--id', type=int, action='append',
|
||||||
|
help=('Can be specified multiple times.'
|
||||||
|
' Run only on the node(s) with given IDs.'))
|
||||||
|
parser.add_argument('-d', '--days', type=int, metavar='NUMBER',
|
||||||
help=('Define log collection period in days.'
|
help=('Define log collection period in days.'
|
||||||
' Timmy will collect only logs updated on or'
|
' Timmy will collect only logs updated on or'
|
||||||
' more recently then today minus the given'
|
' more recently then today minus the given'
|
||||||
' number of days. Default - 30.'))
|
' number of days. Default - 30.'))
|
||||||
parser.add_argument('-G', '--get', action='append',
|
parser.add_argument('-G', '--get', action='append', metavar='PATH',
|
||||||
help=('Enables shell mode. Can be specified multiple'
|
help=('Enables shell mode. Can be specified multiple'
|
||||||
' times. Filemask to collect via "scp -r".'
|
' times. Filemask to collect via "scp -r".'
|
||||||
' Result is placed into a folder specified'
|
' Result is placed into a folder specified'
|
||||||
@ -80,20 +89,48 @@ def parse_args():
|
|||||||
' parameter. For help on shell mode, read'
|
' parameter. For help on shell mode, read'
|
||||||
' timmy/conf.py.') % Node.skey)
|
' timmy/conf.py.') % Node.skey)
|
||||||
parser.add_argument('-P', '--put', nargs=2, action='append',
|
parser.add_argument('-P', '--put', nargs=2, action='append',
|
||||||
|
metavar=('SOURCE', 'DESTINATION'),
|
||||||
help=('Enables shell mode. Can be specified multiple'
|
help=('Enables shell mode. Can be specified multiple'
|
||||||
' times. Upload filemask via"scp -r" to node(s).'
|
' times. Upload filemask via"scp -r" to node(s).'
|
||||||
' Each argument must contain two strings -'
|
' Each argument must contain two strings -'
|
||||||
' source file/path/mask and dest. file/path.'
|
' source file/path/mask and dest. file/path.'
|
||||||
' For help on shell mode, read timmy/conf.py.'))
|
' For help on shell mode, read timmy/conf.py.'))
|
||||||
parser.add_argument('--rqfile', help='Path to an rqfile in yaml format,'
|
parser.add_argument('-L', '--get-logs', nargs=3, action='append',
|
||||||
' overrides default.')
|
metavar=('PATH', 'INCLUDE', 'EXCLUDE'),
|
||||||
parser.add_argument('-l', '--logs',
|
help=('Define specific logs to collect. Implies "-l".'
|
||||||
|
' Each -L option requires 3 values in the'
|
||||||
|
' following order: path, include, exclude.'
|
||||||
|
' See configuration doc for details on each of'
|
||||||
|
' these parameters. Values except path can be'
|
||||||
|
' skipped by passing empty strings. Example: -L'
|
||||||
|
' "/var/mylogs/" "" "exclude-string"'))
|
||||||
|
parser.add_argument('--rqfile', metavar='PATH', action='append',
|
||||||
|
help=('Can be specified multiple times. Path to'
|
||||||
|
' rqfile(s) in yaml format, overrides default.'))
|
||||||
|
parser.add_argument('-l', '--logs', action='store_true',
|
||||||
help=('Collect logs from nodes. Logs are not collected'
|
help=('Collect logs from nodes. Logs are not collected'
|
||||||
' by default due to their size.'),
|
' by default due to their size.'))
|
||||||
action='store_true', dest='getlogs')
|
parser.add_argument('--logs-no-default', action='store_true',
|
||||||
parser.add_argument('--fuel-ip', help='fuel ip address')
|
help=('Do not use default log collection parameters,'
|
||||||
parser.add_argument('--fuel-user', help='fuel username')
|
' only use what has been provided either via -L'
|
||||||
parser.add_argument('--fuel-pass', help='fuel password')
|
' or in rqfile(s). Implies "-l".'))
|
||||||
|
parser.add_argument('--logs-no-fuel-remote', action='store_true',
|
||||||
|
help='Do not collect remote logs from Fuel.')
|
||||||
|
parser.add_argument('--logs-speed', type=int, metavar='MBIT/S',
|
||||||
|
help=('Limit log collection bandwidth to 90%% of the'
|
||||||
|
' specified speed in Mbit/s.'))
|
||||||
|
parser.add_argument('--logs-speed-auto', action='store_true',
|
||||||
|
help=('Limit log collection bandwidth to 90%% of local'
|
||||||
|
' admin interface speed. If speed detection'
|
||||||
|
' fails, a default value will be used. See'
|
||||||
|
' "logs_speed_default" in conf.py.'))
|
||||||
|
parser.add_argument('--logs-coeff', type=float, metavar='RATIO',
|
||||||
|
help=('Estimated logs compression ratio - this value'
|
||||||
|
' is used during free space check. Set to a'
|
||||||
|
' lower value (default - 1.05) to collect logs'
|
||||||
|
' of a total size larger than locally available'
|
||||||
|
'. Values lower than 0.3 are not recommended'
|
||||||
|
' and may result in filling up local disk.'))
|
||||||
parser.add_argument('--fuel-proxy',
|
parser.add_argument('--fuel-proxy',
|
||||||
help='use os system proxy variables for fuelclient',
|
help='use os system proxy variables for fuelclient',
|
||||||
action='store_true')
|
action='store_true')
|
||||||
@ -121,23 +158,25 @@ def parse_args():
|
|||||||
' This option disables any -v parameters.'),
|
' This option disables any -v parameters.'),
|
||||||
action='store_true')
|
action='store_true')
|
||||||
parser.add_argument('-m', '--maxthreads', type=int, default=100,
|
parser.add_argument('-m', '--maxthreads', type=int, default=100,
|
||||||
|
metavar='NUMBER',
|
||||||
help=('Maximum simultaneous nodes for command'
|
help=('Maximum simultaneous nodes for command'
|
||||||
'execution.'))
|
'execution.'))
|
||||||
parser.add_argument('-L', '--logs-maxthreads', type=int, default=100,
|
parser.add_argument('--logs-maxthreads', type=int, default=100,
|
||||||
|
metavar='NUMBER',
|
||||||
help='Maximum simultaneous nodes for log collection.')
|
help='Maximum simultaneous nodes for log collection.')
|
||||||
parser.add_argument('-t', '--outputs-timestamp',
|
parser.add_argument('-t', '--outputs-timestamp',
|
||||||
help='Add timestamp to outputs - allows accumulating'
|
help=('Add timestamp to outputs - allows accumulating'
|
||||||
' outputs of identical commands/scripts across'
|
' outputs of identical commands/scripts across'
|
||||||
' runs. Only makes sense with --no-clean for'
|
' runs. Only makes sense with --no-clean for'
|
||||||
' subsequent runs.',
|
' subsequent runs.'),
|
||||||
action='store_true')
|
action='store_true')
|
||||||
parser.add_argument('-T', '--dir-timestamp',
|
parser.add_argument('-T', '--dir-timestamp',
|
||||||
help='Add timestamp to output folders (defined by'
|
help=('Add timestamp to output folders (defined by'
|
||||||
' "outdir" and "archive_dir" config options).'
|
' "outdir" and "archive_dir" config options).'
|
||||||
' Makes each run store results in new folders.'
|
' Makes each run store results in new folders.'
|
||||||
' This way Timmy will always preserve previous'
|
' This way Timmy will always preserve previous'
|
||||||
' results. Do not forget to clean up the results'
|
' results. Do not forget to clean up the results'
|
||||||
' manually when using this option.',
|
' manually when using this option.'),
|
||||||
action='store_true')
|
action='store_true')
|
||||||
parser.add_argument('-v', '--verbose', action='count', default=0,
|
parser.add_argument('-v', '--verbose', action='count', default=0,
|
||||||
help=('This works for -vvvv, -vvv, -vv, -v, -v -v,'
|
help=('This works for -vvvv, -vvv, -vv, -v, -v -v,'
|
||||||
@ -145,9 +184,6 @@ def parse_args():
|
|||||||
'selected if more -v are provided it will '
|
'selected if more -v are provided it will '
|
||||||
'step to INFO and DEBUG unless the option '
|
'step to INFO and DEBUG unless the option '
|
||||||
'-q(--quiet) is specified'))
|
'-q(--quiet) is specified'))
|
||||||
parser.add_argument('--fuel-cli', action='store_true',
|
|
||||||
help=('Use fuel command line client instead of '
|
|
||||||
'fuelclient library'))
|
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
||||||
@ -157,6 +193,9 @@ def main(argv=None):
|
|||||||
argv = sys.argv
|
argv = sys.argv
|
||||||
parser = parse_args()
|
parser = parse_args()
|
||||||
args = parser.parse_args(argv[1:])
|
args = parser.parse_args(argv[1:])
|
||||||
|
if args.version:
|
||||||
|
print(version)
|
||||||
|
sys.exit(0)
|
||||||
loglevels = [logging.WARNING, logging.INFO, logging.DEBUG]
|
loglevels = [logging.WARNING, logging.INFO, logging.DEBUG]
|
||||||
if args.quiet and not args.log_file:
|
if args.quiet and not args.log_file:
|
||||||
args.verbose = 0
|
args.verbose = 0
|
||||||
@ -182,9 +221,22 @@ def main(argv=None):
|
|||||||
if args.no_clean:
|
if args.no_clean:
|
||||||
conf['clean'] = False
|
conf['clean'] = False
|
||||||
if args.rqfile:
|
if args.rqfile:
|
||||||
conf['rqfile'] = args.rqfile
|
conf['rqfile'] = []
|
||||||
|
for file in args.rqfile:
|
||||||
|
conf['rqfile'].append({'file': file, 'default': False})
|
||||||
if args.days:
|
if args.days:
|
||||||
conf['logs']['start'] = -args.days
|
conf['logs_days'] = args.days
|
||||||
|
if args.logs_no_default:
|
||||||
|
conf['logs_no_default'] = True
|
||||||
|
args.logs = True
|
||||||
|
if args.logs_no_fuel_remote:
|
||||||
|
conf['logs_no_fuel_remote'] = True
|
||||||
|
if args.logs_speed or args.logs_speed_auto:
|
||||||
|
conf['logs_speed_limit'] = True
|
||||||
|
if args.logs_speed:
|
||||||
|
conf['logs_speed'] = abs(args.logs_speed)
|
||||||
|
if args.logs_coeff:
|
||||||
|
conf['logs_size_coefficient'] = args.logs_coeff
|
||||||
if conf['shell_mode']:
|
if conf['shell_mode']:
|
||||||
filter = conf['hard_filter']
|
filter = conf['hard_filter']
|
||||||
# config cleanup for shell mode
|
# config cleanup for shell mode
|
||||||
@ -209,10 +261,25 @@ def main(argv=None):
|
|||||||
conf[Node.fkey] = args.get
|
conf[Node.fkey] = args.get
|
||||||
else:
|
else:
|
||||||
filter = conf['soft_filter']
|
filter = conf['soft_filter']
|
||||||
|
if args.get_logs:
|
||||||
|
# this code should be after 'shell_mode' which cleans logs too
|
||||||
|
args.logs = True
|
||||||
|
for logs in args.get_logs:
|
||||||
|
logs_conf = {}
|
||||||
|
logs_conf['path'] = logs[0]
|
||||||
|
if logs[1]:
|
||||||
|
logs_conf['include'] = [logs[1]]
|
||||||
|
if logs[2]:
|
||||||
|
logs_conf['exclude'] = [logs[2]]
|
||||||
|
if args.days:
|
||||||
|
logs_conf['start'] = args.days
|
||||||
|
conf['logs'].append(logs_conf)
|
||||||
if args.role:
|
if args.role:
|
||||||
filter['roles'] = args.role
|
filter['roles'] = args.role
|
||||||
if args.env is not None:
|
if args.env is not None:
|
||||||
filter['cluster'] = [args.env]
|
filter['cluster'] = [args.env]
|
||||||
|
if args.id:
|
||||||
|
filter['id'] = args.id
|
||||||
if args.outputs_timestamp:
|
if args.outputs_timestamp:
|
||||||
conf['outputs_timestamp'] = True
|
conf['outputs_timestamp'] = True
|
||||||
if args.dir_timestamp:
|
if args.dir_timestamp:
|
||||||
@ -220,14 +287,27 @@ def main(argv=None):
|
|||||||
if args.dest_file:
|
if args.dest_file:
|
||||||
conf['archive_dir'] = os.path.split(args.dest_file)[0]
|
conf['archive_dir'] = os.path.split(args.dest_file)[0]
|
||||||
conf['archive_name'] = os.path.split(args.dest_file)[1]
|
conf['archive_name'] = os.path.split(args.dest_file)[1]
|
||||||
if args.fuel_cli:
|
|
||||||
conf['fuelclient'] = False
|
|
||||||
logger.info('Using rqdir: %s, rqfile: %s' %
|
logger.info('Using rqdir: %s, rqfile: %s' %
|
||||||
(conf['rqdir'], conf['rqfile']))
|
(conf['rqdir'], conf['rqfile']))
|
||||||
nm = pretty_run(args.quiet, 'Initializing node data',
|
nm = pretty_run(args.quiet, 'Initializing node data',
|
||||||
NodeManager,
|
NodeManager,
|
||||||
kwargs={'conf': conf, 'extended': args.extended,
|
kwargs={'conf': conf, 'extended': args.extended,
|
||||||
'nodes_json': args.nodes_json})
|
'nodes_json': args.nodes_json})
|
||||||
|
if args.only_logs or args.logs:
|
||||||
|
size = pretty_run(args.quiet, 'Calculating logs size',
|
||||||
|
nm.calculate_log_size, args=(args.maxthreads,))
|
||||||
|
if size == 0:
|
||||||
|
logger.warning('Size zero - no logs to collect.')
|
||||||
|
has_logs = False
|
||||||
|
else:
|
||||||
|
has_logs = True
|
||||||
|
print('Total logs size to collect: %dMB.' % (size / 1000))
|
||||||
|
enough_space = pretty_run(args.quiet, 'Checking free space',
|
||||||
|
nm.is_enough_space)
|
||||||
|
if not enough_space:
|
||||||
|
logger.error('Not enough space for logs in "%s", exiting.' %
|
||||||
|
nm.conf['archive_dir'])
|
||||||
|
return 2
|
||||||
if not args.only_logs:
|
if not args.only_logs:
|
||||||
if nm.has(Node.pkey):
|
if nm.has(Node.pkey):
|
||||||
pretty_run(args.quiet, 'Uploading files', nm.put_files)
|
pretty_run(args.quiet, 'Uploading files', nm.put_files)
|
||||||
@ -240,23 +320,12 @@ def main(argv=None):
|
|||||||
if not args.no_archive and nm.has(*Node.conf_archive_general):
|
if not args.no_archive and nm.has(*Node.conf_archive_general):
|
||||||
pretty_run(args.quiet, 'Creating outputs and files archive',
|
pretty_run(args.quiet, 'Creating outputs and files archive',
|
||||||
nm.create_archive_general, args=(60,))
|
nm.create_archive_general, args=(60,))
|
||||||
if args.only_logs or args.getlogs:
|
if (args.only_logs or args.logs) and has_logs and enough_space:
|
||||||
size = pretty_run(args.quiet, 'Calculating logs size',
|
msg = 'Collecting and packing logs'
|
||||||
nm.calculate_log_size, args=(args.maxthreads,))
|
pretty_run(args.quiet, msg, nm.get_logs,
|
||||||
if size == 0:
|
args=(conf['compress_timeout'],),
|
||||||
logger.warning('Size zero - no logs to collect.')
|
kwargs={'maxthreads': args.logs_maxthreads,
|
||||||
return
|
'fake': args.fake_logs})
|
||||||
enough = pretty_run(args.quiet, 'Checking free space',
|
|
||||||
nm.is_enough_space)
|
|
||||||
if enough:
|
|
||||||
msg = 'Collecting and packing %dMB of logs' % (nm.alogsize / 1024)
|
|
||||||
pretty_run(args.quiet, msg, nm.get_logs,
|
|
||||||
args=(conf['compress_timeout'],),
|
|
||||||
kwargs={'maxthreads': args.logs_maxthreads,
|
|
||||||
'fake': args.fake_logs})
|
|
||||||
else:
|
|
||||||
logger.warning(('Not enough space for logs in "%s", skipping'
|
|
||||||
'log collection.') % nm.conf['archive_dir'])
|
|
||||||
logger.info("Nodes:\n%s" % nm)
|
logger.info("Nodes:\n%s" % nm)
|
||||||
if not args.quiet:
|
if not args.quiet:
|
||||||
print('Run complete. Node information:')
|
print('Run complete. Node information:')
|
||||||
@ -273,7 +342,6 @@ def main(argv=None):
|
|||||||
if all([not args.no_archive, nm.has(*Node.conf_archive_general),
|
if all([not args.no_archive, nm.has(*Node.conf_archive_general),
|
||||||
not args.quiet]):
|
not args.quiet]):
|
||||||
print('Archives available in "%s".' % nm.conf['archive_dir'])
|
print('Archives available in "%s".' % nm.conf['archive_dir'])
|
||||||
return 0
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
exit(main(sys.argv))
|
sys.exit(main(sys.argv))
|
||||||
|
@ -24,30 +24,31 @@ def load_conf(filename):
|
|||||||
"""Configuration parameters"""
|
"""Configuration parameters"""
|
||||||
conf = {}
|
conf = {}
|
||||||
conf['hard_filter'] = {}
|
conf['hard_filter'] = {}
|
||||||
conf['soft_filter'] = {'status': ['ready', 'discover'], 'online': True}
|
conf['soft_filter'] = {'no_status': ['deploying'], 'online': True}
|
||||||
conf['ssh_opts'] = ['-oConnectTimeout=2', '-oStrictHostKeyChecking=no',
|
conf['ssh_opts'] = ['-oConnectTimeout=2', '-oStrictHostKeyChecking=no',
|
||||||
'-oUserKnownHostsFile=/dev/null', '-oLogLevel=error',
|
'-oUserKnownHostsFile=/dev/null', '-oLogLevel=error',
|
||||||
'-lroot', '-oBatchMode=yes']
|
'-lroot', '-oBatchMode=yes']
|
||||||
conf['env_vars'] = ['OPENRC=/root/openrc', 'IPTABLES_STR="iptables -nvL"']
|
conf['env_vars'] = ['OPENRC=/root/openrc', 'IPTABLES_STR="iptables -nvL"',
|
||||||
|
'LC_ALL="C"', 'LANG="C"']
|
||||||
conf['fuel_ip'] = '127.0.0.1'
|
conf['fuel_ip'] = '127.0.0.1'
|
||||||
conf['fuel_user'] = 'admin'
|
conf['fuel_user'] = 'admin'
|
||||||
|
conf['fuel_port'] = '8000'
|
||||||
conf['fuel_pass'] = 'admin'
|
conf['fuel_pass'] = 'admin'
|
||||||
conf['fuel_tenant'] = 'admin'
|
conf['fuel_tenant'] = 'admin'
|
||||||
|
conf['fuel_keystone_port'] = '5000'
|
||||||
conf['fuelclient'] = True # use fuelclient library by default
|
conf['fuelclient'] = True # use fuelclient library by default
|
||||||
conf['fuel_skip_proxy'] = True
|
conf['fuel_skip_proxy'] = True
|
||||||
conf['timeout'] = 15
|
conf['timeout'] = 15
|
||||||
conf['prefix'] = 'nice -n 19 ionice -c 3'
|
conf['prefix'] = 'nice -n 19 ionice -c 3'
|
||||||
rqdir = 'rq'
|
rqdir = 'rq'
|
||||||
rqfile = 'rq.yaml'
|
rqfile = 'default.yaml'
|
||||||
dtm = os.path.join(os.path.abspath(os.sep), 'usr', 'share', 'timmy')
|
dtm = os.path.join(os.path.abspath(os.sep), 'usr', 'share', 'timmy')
|
||||||
if os.path.isdir(os.path.join(dtm, rqdir)):
|
if os.path.isdir(os.path.join(dtm, rqdir)):
|
||||||
conf['rqdir'] = os.path.join(dtm, rqdir)
|
conf['rqdir'] = os.path.join(dtm, rqdir)
|
||||||
else:
|
else:
|
||||||
conf['rqdir'] = rqdir
|
conf['rqdir'] = rqdir
|
||||||
if os.path.isfile(os.path.join(dtm, 'configs', rqfile)):
|
conf['rqfile'] = [{'file': os.path.join(conf['rqdir'], rqfile),
|
||||||
conf['rqfile'] = os.path.join(dtm, 'configs', rqfile)
|
'default': True}]
|
||||||
else:
|
|
||||||
conf['rqfile'] = rqfile
|
|
||||||
conf['compress_timeout'] = 3600
|
conf['compress_timeout'] = 3600
|
||||||
conf['outdir'] = os.path.join(gettempdir(), 'timmy', 'info')
|
conf['outdir'] = os.path.join(gettempdir(), 'timmy', 'info')
|
||||||
conf['archive_dir'] = os.path.join(gettempdir(), 'timmy', 'archives')
|
conf['archive_dir'] = os.path.join(gettempdir(), 'timmy', 'archives')
|
||||||
@ -59,9 +60,19 @@ def load_conf(filename):
|
|||||||
conf['scripts'] = []
|
conf['scripts'] = []
|
||||||
conf['files'] = []
|
conf['files'] = []
|
||||||
conf['filelists'] = []
|
conf['filelists'] = []
|
||||||
conf['logs'] = {'path': '/var/log',
|
conf['logs'] = []
|
||||||
'exclude': '\.[^12]\.gz$|\.\d{2,}\.gz$',
|
conf['logs_no_default'] = False # skip logs defined in default.yaml
|
||||||
'start': '30'}
|
conf['logs_fuel_remote_dir'] = ['/var/log/docker-logs/remote',
|
||||||
|
'/var/log/remote']
|
||||||
|
conf['logs_no_fuel_remote'] = False # do not collect /var/log/remote
|
||||||
|
'''Do not collect from /var/log/remote/<node>
|
||||||
|
if node is in the array of nodes filtered out by soft filter'''
|
||||||
|
conf['logs_exclude_filtered'] = True
|
||||||
|
conf['logs_days'] = 30
|
||||||
|
conf['logs_speed_limit'] = False # enable speed limiting of log transfers
|
||||||
|
conf['logs_speed_default'] = 100 # Mbit/s, used when autodetect fails
|
||||||
|
conf['logs_speed'] = 0 # To manually specify max bandwidth in Mbit/s
|
||||||
|
conf['logs_size_coefficient'] = 1.05 # estimated logs compression ratio
|
||||||
'''Shell mode - only run what was specified via command line.
|
'''Shell mode - only run what was specified via command line.
|
||||||
Skip actionable conf fields (see timmy/nodes.py -> Node.conf_actionable);
|
Skip actionable conf fields (see timmy/nodes.py -> Node.conf_actionable);
|
||||||
Skip rqfile import;
|
Skip rqfile import;
|
||||||
|
@ -16,7 +16,8 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
project_name = 'timmy'
|
project_name = 'timmy'
|
||||||
version = '1.9.0'
|
version = '1.17.1'
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
exit(0)
|
import sys
|
||||||
|
sys.exit(0)
|
||||||
|
328
timmy/nodes.py
328
timmy/nodes.py
@ -26,12 +26,25 @@ import logging
|
|||||||
import sys
|
import sys
|
||||||
import re
|
import re
|
||||||
from datetime import datetime, date, timedelta
|
from datetime import datetime, date, timedelta
|
||||||
|
import urllib2
|
||||||
import tools
|
import tools
|
||||||
from tools import w_list, run_with_lock
|
from tools import w_list, run_with_lock
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from fuelclient.client import Client as FuelClient
|
import fuelclient
|
||||||
|
if hasattr(fuelclient, 'connect'):
|
||||||
|
# fuel > 9.0.1
|
||||||
|
from fuelclient import connect as FuelClient
|
||||||
|
FUEL_10 = True
|
||||||
|
else:
|
||||||
|
import fuelclient.client
|
||||||
|
if type(fuelclient.client.APIClient) is fuelclient.client.Client:
|
||||||
|
# fuel 9.0.1 and below
|
||||||
|
from fuelclient.client import Client as FuelClient
|
||||||
|
FUEL_10 = False
|
||||||
|
else:
|
||||||
|
FuelClient = None
|
||||||
except:
|
except:
|
||||||
FuelClient = None
|
FuelClient = None
|
||||||
|
|
||||||
@ -56,7 +69,6 @@ class Node(object):
|
|||||||
conf_once_prefix = 'once_'
|
conf_once_prefix = 'once_'
|
||||||
conf_match_prefix = 'by_'
|
conf_match_prefix = 'by_'
|
||||||
conf_default_key = '__default'
|
conf_default_key = '__default'
|
||||||
conf_priority_section = conf_match_prefix + 'id'
|
|
||||||
header = ['node-id', 'env', 'ip', 'mac', 'os',
|
header = ['node-id', 'env', 'ip', 'mac', 'os',
|
||||||
'roles', 'online', 'status', 'name', 'fqdn']
|
'roles', 'online', 'status', 'name', 'fqdn']
|
||||||
|
|
||||||
@ -118,39 +130,38 @@ class Node(object):
|
|||||||
else:
|
else:
|
||||||
setattr(self, k, deepcopy(v))
|
setattr(self, k, deepcopy(v))
|
||||||
|
|
||||||
def r_apply(el, p, p_s, c_a, k_d, o, d, clean=False):
|
def r_apply(el, p, c_a, k_d, o, d, clean=False):
|
||||||
# apply normal attributes
|
# apply normal attributes
|
||||||
for k in [k for k in el if k != p_s and not k.startswith(p)]:
|
for k in [k for k in el if not k.startswith(p)]:
|
||||||
if el == conf and clean:
|
if el == conf and clean:
|
||||||
apply(k, el[k], c_a, k_d, o, default=True)
|
apply(k, el[k], c_a, k_d, o, default=True)
|
||||||
else:
|
else:
|
||||||
apply(k, el[k], c_a, k_d, o)
|
apply(k, el[k], c_a, k_d, o)
|
||||||
# apply match attributes (by_xxx except by_id)
|
# apply match attributes
|
||||||
for k in [k for k in el if k != p_s and k.startswith(p)]:
|
for k in [k for k in el if k.startswith(p)]:
|
||||||
attr_name = k[len(p):]
|
attr_name = k[len(p):]
|
||||||
if hasattr(self, attr_name):
|
if hasattr(self, attr_name):
|
||||||
attr = w_list(getattr(self, attr_name))
|
attr = w_list(getattr(self, attr_name))
|
||||||
|
matching_keys = []
|
||||||
|
# negative matching ("no_")
|
||||||
|
for nk in [nk for nk in el[k] if nk.startswith('no_')]:
|
||||||
|
key = nk[4:]
|
||||||
|
if key not in attr:
|
||||||
|
matching_keys.append(nk)
|
||||||
|
# positive matching
|
||||||
for v in attr:
|
for v in attr:
|
||||||
if v in el[k]:
|
if v in el[k]:
|
||||||
subconf = el[k][v]
|
matching_keys.append(v)
|
||||||
if d in el:
|
# apply matching keys
|
||||||
d_conf = el[d]
|
for mk in matching_keys:
|
||||||
for a in d_conf:
|
subconf = el[k][mk]
|
||||||
apply(a, d_conf[a], c_a, k_d, o)
|
if d in el:
|
||||||
r_apply(subconf, p, p_s, c_a, k_d, o, d)
|
d_conf = el[d]
|
||||||
# apply priority attributes (by_id)
|
for a in d_conf:
|
||||||
if p_s in el:
|
apply(a, d_conf[a], c_a, k_d, o)
|
||||||
if self.id in el[p_s]:
|
r_apply(subconf, p, c_a, k_d, o, d)
|
||||||
p_conf = el[p_s][self.id]
|
|
||||||
if d in el[p_s]:
|
|
||||||
d_conf = el[p_s][d]
|
|
||||||
for k in d_conf:
|
|
||||||
apply(k, d_conf[k], c_a, k_d, o)
|
|
||||||
for k in [k for k in p_conf if k != d]:
|
|
||||||
apply(k, p_conf[k], c_a, k_d, o, default=True)
|
|
||||||
|
|
||||||
p = Node.conf_match_prefix
|
p = Node.conf_match_prefix
|
||||||
p_s = Node.conf_priority_section
|
|
||||||
c_a = Node.conf_appendable
|
c_a = Node.conf_appendable
|
||||||
k_d = Node.conf_keep_default
|
k_d = Node.conf_keep_default
|
||||||
d = Node.conf_default_key
|
d = Node.conf_default_key
|
||||||
@ -160,7 +171,7 @@ class Node(object):
|
|||||||
duplication if this function gets called more than once'''
|
duplication if this function gets called more than once'''
|
||||||
for f in set(c_a).intersection(k_d):
|
for f in set(c_a).intersection(k_d):
|
||||||
setattr(self, f, [])
|
setattr(self, f, [])
|
||||||
r_apply(conf, p, p_s, c_a, k_d, overridden, d, clean=clean)
|
r_apply(conf, p, c_a, k_d, overridden, d, clean=clean)
|
||||||
|
|
||||||
def get_release(self):
|
def get_release(self):
|
||||||
if self.id == 0:
|
if self.id == 0:
|
||||||
@ -229,12 +240,12 @@ class Node(object):
|
|||||||
f = scr
|
f = scr
|
||||||
else:
|
else:
|
||||||
f = os.path.join(self.rqdir, Node.skey, scr)
|
f = os.path.join(self.rqdir, Node.skey, scr)
|
||||||
self.logger.info('node:%s(%s), exec: %s' % (self.id, self.ip, f))
|
self.logger.debug('node:%s(%s), exec: %s' % (self.id, self.ip, f))
|
||||||
dfile = os.path.join(ddir, 'node-%s-%s-%s' %
|
dfile = os.path.join(ddir, 'node-%s-%s-%s' %
|
||||||
(self.id, self.ip, os.path.basename(f)))
|
(self.id, self.ip, os.path.basename(f)))
|
||||||
if self.outputs_timestamp:
|
if self.outputs_timestamp:
|
||||||
dfile += self.outputs_timestamp_str
|
dfile += self.outputs_timestamp_str
|
||||||
self.logger.info('outfile: %s' % dfile)
|
self.logger.debug('outfile: %s' % dfile)
|
||||||
mapscr[scr] = dfile
|
mapscr[scr] = dfile
|
||||||
if not fake:
|
if not fake:
|
||||||
outs, errs, code = tools.ssh_node(ip=self.ip,
|
outs, errs, code = tools.ssh_node(ip=self.ip,
|
||||||
@ -253,7 +264,7 @@ class Node(object):
|
|||||||
return mapcmds, mapscr
|
return mapcmds, mapscr
|
||||||
|
|
||||||
def exec_simple_cmd(self, cmd, timeout=15, infile=None, outfile=None,
|
def exec_simple_cmd(self, cmd, timeout=15, infile=None, outfile=None,
|
||||||
fake=False, ok_codes=None, input=None):
|
fake=False, ok_codes=None, input=None, decode=True):
|
||||||
self.logger.info('node:%s(%s), exec: %s' % (self.id, self.ip, cmd))
|
self.logger.info('node:%s(%s), exec: %s' % (self.id, self.ip, cmd))
|
||||||
if not fake:
|
if not fake:
|
||||||
outs, errs, code = tools.ssh_node(ip=self.ip,
|
outs, errs, code = tools.ssh_node(ip=self.ip,
|
||||||
@ -263,6 +274,7 @@ class Node(object):
|
|||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
outputfile=outfile,
|
outputfile=outfile,
|
||||||
ok_codes=ok_codes,
|
ok_codes=ok_codes,
|
||||||
|
decode=decode,
|
||||||
input=input,
|
input=input,
|
||||||
prefix=self.prefix)
|
prefix=self.prefix)
|
||||||
self.check_code(code, 'exec_simple_cmd', cmd, errs, ok_codes)
|
self.check_code(code, 'exec_simple_cmd', cmd, errs, ok_codes)
|
||||||
@ -314,18 +326,35 @@ class Node(object):
|
|||||||
recursive=True)
|
recursive=True)
|
||||||
self.check_code(code, 'put_files', 'tools.put_file_scp', errs)
|
self.check_code(code, 'put_files', 'tools.put_file_scp', errs)
|
||||||
|
|
||||||
def logs_populate(self, timeout=5):
|
def logs_populate(self, timeout=5, logs_excluded_nodes=[]):
|
||||||
|
|
||||||
def filter_by_re(item, string):
|
def filter_by_re(item, string):
|
||||||
return (('include' not in item or
|
return (('include' not in item or not item['include'] or
|
||||||
re.search(item['include'], string)) and
|
any([re.search(i, string) for i in item['include']])) and
|
||||||
('exclude' not in item or not
|
('exclude' not in item or not item['exclude'] or not
|
||||||
re.search(item['exclude'], string)))
|
any([re.search(e, string) for e in item['exclude']])))
|
||||||
|
|
||||||
for item in self.logs:
|
for item in self.logs:
|
||||||
start_str = ''
|
if self.logs_no_fuel_remote and 'fuel' in self.roles:
|
||||||
if 'start' in item:
|
self.logger.debug('adding Fuel remote logs to exclude list')
|
||||||
start = item['start']
|
if 'exclude' not in item:
|
||||||
|
item['exclude'] = []
|
||||||
|
for remote_dir in self.logs_fuel_remote_dir:
|
||||||
|
item['exclude'].append(remote_dir)
|
||||||
|
if 'fuel' in self.roles:
|
||||||
|
for n in logs_excluded_nodes:
|
||||||
|
self.logger.debug('removing remote logs for node:%s' % n)
|
||||||
|
if 'exclude' not in item:
|
||||||
|
item['exclude'] = []
|
||||||
|
for remote_dir in self.logs_fuel_remote_dir:
|
||||||
|
ipd = os.path.join(remote_dir, n)
|
||||||
|
item['exclude'].append(ipd)
|
||||||
|
start_str = None
|
||||||
|
if 'start' in item or hasattr(self, 'logs_days'):
|
||||||
|
if hasattr(self, 'logs_days') and 'start' not in item:
|
||||||
|
start = self.logs_days
|
||||||
|
else:
|
||||||
|
start = item['start']
|
||||||
if any([type(start) is str and re.match(r'-?\d+', start),
|
if any([type(start) is str and re.match(r'-?\d+', start),
|
||||||
type(start) is int]):
|
type(start) is int]):
|
||||||
days = abs(int(str(start)))
|
days = abs(int(str(start)))
|
||||||
@ -354,7 +383,7 @@ class Node(object):
|
|||||||
outs, errs, code = tools.ssh_node(ip=self.ip,
|
outs, errs, code = tools.ssh_node(ip=self.ip,
|
||||||
command=cmd,
|
command=cmd,
|
||||||
ssh_opts=self.ssh_opts,
|
ssh_opts=self.ssh_opts,
|
||||||
env_vars='',
|
env_vars=self.env_vars,
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
prefix=self.prefix)
|
prefix=self.prefix)
|
||||||
if code == 124:
|
if code == 124:
|
||||||
@ -427,9 +456,11 @@ class NodeManager(object):
|
|||||||
if self.conf['rqfile']:
|
if self.conf['rqfile']:
|
||||||
self.import_rq()
|
self.import_rq()
|
||||||
self.nodes = {}
|
self.nodes = {}
|
||||||
|
self.token = None
|
||||||
self.fuel_init()
|
self.fuel_init()
|
||||||
# save os environment variables
|
# save os environment variables
|
||||||
environ = os.environ
|
environ = os.environ
|
||||||
|
self.logs_excluded_nodes = []
|
||||||
if FuelClient and conf['fuelclient']:
|
if FuelClient and conf['fuelclient']:
|
||||||
try:
|
try:
|
||||||
if self.conf['fuel_skip_proxy']:
|
if self.conf['fuel_skip_proxy']:
|
||||||
@ -438,11 +469,19 @@ class NodeManager(object):
|
|||||||
os.environ['https_proxy'] = ''
|
os.environ['https_proxy'] = ''
|
||||||
os.environ['http_proxy'] = ''
|
os.environ['http_proxy'] = ''
|
||||||
self.logger.info('Setup fuelclient instance')
|
self.logger.info('Setup fuelclient instance')
|
||||||
self.fuelclient = FuelClient()
|
if FUEL_10:
|
||||||
self.fuelclient.username = self.conf['fuel_user']
|
self.fuelclient = FuelClient(
|
||||||
self.fuelclient.password = self.conf['fuel_pass']
|
host=self.conf['fuel_ip'],
|
||||||
self.fuelclient.tenant_name = self.conf['fuel_tenant']
|
port=self.conf['fuel_port'],
|
||||||
# self.fuelclient.debug_mode(True)
|
os_username=self.conf['fuel_user'],
|
||||||
|
os_password=self.conf['fuel_pass'],
|
||||||
|
os_tenant_name=self.conf['fuel_tenant'])
|
||||||
|
else:
|
||||||
|
self.fuelclient = FuelClient()
|
||||||
|
self.fuelclient.username = self.conf['fuel_user']
|
||||||
|
self.fuelclient.password = self.conf['fuel_pass']
|
||||||
|
self.fuelclient.tenant_name = self.conf['fuel_tenant']
|
||||||
|
# self.fuelclient.debug_mode(True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.info('Failed to setup fuelclient instance:%s' % e,
|
self.logger.info('Failed to setup fuelclient instance:%s' % e,
|
||||||
exc_info=True)
|
exc_info=True)
|
||||||
@ -454,6 +493,7 @@ class NodeManager(object):
|
|||||||
self.nodes_json = tools.load_json_file(nodes_json)
|
self.nodes_json = tools.load_json_file(nodes_json)
|
||||||
else:
|
else:
|
||||||
if (not self.get_nodes_fuelclient() and
|
if (not self.get_nodes_fuelclient() and
|
||||||
|
not self.get_nodes_api() and
|
||||||
not self.get_nodes_cli()):
|
not self.get_nodes_cli()):
|
||||||
sys.exit(4)
|
sys.exit(4)
|
||||||
self.nodes_init()
|
self.nodes_init()
|
||||||
@ -461,18 +501,16 @@ class NodeManager(object):
|
|||||||
for node in self.nodes.values():
|
for node in self.nodes.values():
|
||||||
if not self.filter(node, self.conf['soft_filter']):
|
if not self.filter(node, self.conf['soft_filter']):
|
||||||
node.filtered_out = True
|
node.filtered_out = True
|
||||||
if not conf['shell_mode']:
|
if self.conf['logs_exclude_filtered']:
|
||||||
if not self.get_release_fuel_client():
|
self.logs_excluded_nodes.append(node.fqdn)
|
||||||
self.get_release_cli()
|
self.logs_excluded_nodes.append(node.ip)
|
||||||
|
if (not self.get_release_fuel_client() and
|
||||||
|
not self.get_release_api() and
|
||||||
|
not self.get_release_cli()):
|
||||||
|
self.logger.warning('could not get Fuel and MOS versions')
|
||||||
|
else:
|
||||||
self.nodes_reapply_conf()
|
self.nodes_reapply_conf()
|
||||||
self.conf_assign_once()
|
self.conf_assign_once()
|
||||||
if extended:
|
|
||||||
self.logger.info('NodeManager: extended mode enabled')
|
|
||||||
'''TO-DO: load smth like extended.yaml
|
|
||||||
do additional apply_conf(clean=False) with this yaml.
|
|
||||||
Move some stuff from rq.yaml to extended.yaml'''
|
|
||||||
pass
|
|
||||||
# restore os environment variables
|
|
||||||
os.environ = environ
|
os.environ = environ
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
@ -525,7 +563,11 @@ class NodeManager(object):
|
|||||||
dst[k] = {}
|
dst[k] = {}
|
||||||
if d in el[k]:
|
if d in el[k]:
|
||||||
if k == attr:
|
if k == attr:
|
||||||
dst[k] = el[k][d]
|
if k in Node.conf_appendable:
|
||||||
|
dst[k] = w_list(dst[k])
|
||||||
|
dst[k] += w_list(el[k][d])
|
||||||
|
else:
|
||||||
|
dst[k] = el[k][d]
|
||||||
elif k.startswith(p) or k.startswith(once_p):
|
elif k.startswith(p) or k.startswith(once_p):
|
||||||
dst[k][d] = {attr: el[k][d]}
|
dst[k][d] = {attr: el[k][d]}
|
||||||
else:
|
else:
|
||||||
@ -544,13 +586,25 @@ class NodeManager(object):
|
|||||||
else:
|
else:
|
||||||
dst[k][attr] = el[k]
|
dst[k][attr] = el[k]
|
||||||
|
|
||||||
|
def merge_rq(rqfile, dst):
|
||||||
|
file = rqfile['file']
|
||||||
|
if os.path.sep in file:
|
||||||
|
src = tools.load_yaml_file(file)
|
||||||
|
else:
|
||||||
|
f = os.path.join(self.rqdir, file)
|
||||||
|
src = tools.load_yaml_file(f)
|
||||||
|
if self.conf['logs_no_default'] and rqfile['default']:
|
||||||
|
if 'logs' in src:
|
||||||
|
src.pop('logs')
|
||||||
|
p = Node.conf_match_prefix
|
||||||
|
once_p = Node.conf_once_prefix + p
|
||||||
|
d = Node.conf_default_key
|
||||||
|
for attr in src:
|
||||||
|
r_sub(attr, src, attr, d, p, once_p, dst)
|
||||||
|
|
||||||
dst = self.conf
|
dst = self.conf
|
||||||
src = tools.load_yaml_file(self.conf['rqfile'])
|
for rqfile in self.conf['rqfile']:
|
||||||
p = Node.conf_match_prefix
|
merge_rq(rqfile, dst)
|
||||||
once_p = Node.conf_once_prefix + p
|
|
||||||
d = Node.conf_default_key
|
|
||||||
for attr in src:
|
|
||||||
r_sub(attr, src, attr, d, p, once_p, dst)
|
|
||||||
|
|
||||||
def fuel_init(self):
|
def fuel_init(self):
|
||||||
if not self.conf['fuel_ip']:
|
if not self.conf['fuel_ip']:
|
||||||
@ -576,8 +630,8 @@ class NodeManager(object):
|
|||||||
if not self.fuelclient:
|
if not self.fuelclient:
|
||||||
return False
|
return False
|
||||||
try:
|
try:
|
||||||
|
self.logger.info('using fuelclient to get nodes json')
|
||||||
self.nodes_json = self.fuelclient.get_request('nodes')
|
self.nodes_json = self.fuelclient.get_request('nodes')
|
||||||
self.logger.debug(self.nodes_json)
|
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.warning(("NodeManager: can't "
|
self.logger.warning(("NodeManager: can't "
|
||||||
@ -585,11 +639,28 @@ class NodeManager(object):
|
|||||||
exc_info=True)
|
exc_info=True)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def get_release_api(self):
|
||||||
|
self.logger.info('getting release via API')
|
||||||
|
version_json = self.get_api_request('version')
|
||||||
|
if version_json:
|
||||||
|
version = json.loads(version_json)
|
||||||
|
fuel = self.nodes[self.conf['fuel_ip']]
|
||||||
|
fuel.release = version['release']
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
clusters_json = self.get_api_request('clusters')
|
||||||
|
if clusters_json:
|
||||||
|
clusters = json.loads(clusters_json)
|
||||||
|
self.set_nodes_release(clusters)
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
def get_release_fuel_client(self):
|
def get_release_fuel_client(self):
|
||||||
if not self.fuelclient:
|
if not self.fuelclient:
|
||||||
return False
|
return False
|
||||||
|
self.logger.info('getting release via fuelclient')
|
||||||
try:
|
try:
|
||||||
self.logger.info('getting release from fuel')
|
|
||||||
v = self.fuelclient.get_request('version')
|
v = self.fuelclient.get_request('version')
|
||||||
fuel_version = v['release']
|
fuel_version = v['release']
|
||||||
self.logger.debug('version response:%s' % v)
|
self.logger.debug('version response:%s' % v)
|
||||||
@ -600,6 +671,10 @@ class NodeManager(object):
|
|||||||
"clusters information"))
|
"clusters information"))
|
||||||
return False
|
return False
|
||||||
self.nodes[self.conf['fuel_ip']].release = fuel_version
|
self.nodes[self.conf['fuel_ip']].release = fuel_version
|
||||||
|
self.set_nodes_release(clusters)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def set_nodes_release(self, clusters):
|
||||||
cldict = {}
|
cldict = {}
|
||||||
for cluster in clusters:
|
for cluster in clusters:
|
||||||
cldict[cluster['id']] = cluster
|
cldict[cluster['id']] = cluster
|
||||||
@ -613,10 +688,81 @@ class NodeManager(object):
|
|||||||
node.release = 'n/a'
|
node.release = 'n/a'
|
||||||
self.logger.info('node: %s - release: %s' % (node.id,
|
self.logger.info('node: %s - release: %s' % (node.id,
|
||||||
node.release))
|
node.release))
|
||||||
return True
|
|
||||||
|
def auth_token(self):
|
||||||
|
'''Get keystone token to access Nailgun API. Requires Fuel 5+'''
|
||||||
|
if self.token:
|
||||||
|
return True
|
||||||
|
v2_body = ('{"auth": {"tenantName": "%s", "passwordCredentials": {'
|
||||||
|
'"username": "%s", "password": "%s"}}}')
|
||||||
|
# v3 not fully implemented yet
|
||||||
|
v3_body = ('{ "auth": {'
|
||||||
|
' "scope": {'
|
||||||
|
' "project": {'
|
||||||
|
' "name": "%s",'
|
||||||
|
' "domain": { "id": "default" }'
|
||||||
|
' }'
|
||||||
|
' },'
|
||||||
|
' "identity": {'
|
||||||
|
' "methods": ["password"],'
|
||||||
|
' "password": {'
|
||||||
|
' "user": {'
|
||||||
|
' "name": "%s",'
|
||||||
|
' "domain": { "id": "default" },'
|
||||||
|
' "password": "%s"'
|
||||||
|
' }'
|
||||||
|
' }'
|
||||||
|
' }'
|
||||||
|
'}}')
|
||||||
|
# Sticking to v2 API for now because Fuel 9.1 has a custom
|
||||||
|
# domain_id defined in keystone.conf which we do not know.
|
||||||
|
req_data = v2_body % (self.conf['fuel_tenant'],
|
||||||
|
self.conf['fuel_user'],
|
||||||
|
self.conf['fuel_pass'])
|
||||||
|
req = urllib2.Request("http://%s:%s/v2.0/tokens" %
|
||||||
|
(self.conf['fuel_ip'],
|
||||||
|
self.conf['fuel_keystone_port']), req_data,
|
||||||
|
{'Content-Type': 'application/json'})
|
||||||
|
try:
|
||||||
|
### Disabling v3 token retrieval for now
|
||||||
|
# token = urllib2.urlopen(req).info().getheader('X-Subject-Token')
|
||||||
|
result = urllib2.urlopen(req)
|
||||||
|
resp_body = result.read()
|
||||||
|
resp_json = json.loads(resp_body)
|
||||||
|
token = resp_json['access']['token']['id']
|
||||||
|
self.token = token
|
||||||
|
return True
|
||||||
|
except:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_api_request(self, request):
|
||||||
|
if self.auth_token():
|
||||||
|
url = "http://%s:%s/api/%s" % (self.conf['fuel_ip'],
|
||||||
|
self.conf['fuel_port'],
|
||||||
|
request)
|
||||||
|
req = urllib2.Request(url, None, {'X-Auth-Token': self.token})
|
||||||
|
try:
|
||||||
|
result = urllib2.urlopen(req)
|
||||||
|
code = result.getcode()
|
||||||
|
if code == 200:
|
||||||
|
return result.read()
|
||||||
|
else:
|
||||||
|
self.logger.error('NodeManager: cannot get API response'
|
||||||
|
' from %s, code %s' % (url, code))
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_nodes_api(self):
|
||||||
|
self.logger.info('using API to get nodes json')
|
||||||
|
nodes_json = self.get_api_request('nodes')
|
||||||
|
if nodes_json:
|
||||||
|
self.nodes_json = json.loads(nodes_json)
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
def get_nodes_cli(self):
|
def get_nodes_cli(self):
|
||||||
self.logger.info('use CLI for getting node information')
|
self.logger.info('using CLI to get nodes json')
|
||||||
fuelnode = self.nodes[self.conf['fuel_ip']]
|
fuelnode = self.nodes[self.conf['fuel_ip']]
|
||||||
fuel_node_cmd = ('fuel node list --json --user %s --password %s' %
|
fuel_node_cmd = ('fuel node list --json --user %s --password %s' %
|
||||||
(self.conf['fuel_user'],
|
(self.conf['fuel_user'],
|
||||||
@ -734,8 +880,10 @@ class NodeManager(object):
|
|||||||
run_items = []
|
run_items = []
|
||||||
for key, node in self.nodes.items():
|
for key, node in self.nodes.items():
|
||||||
if not node.filtered_out:
|
if not node.filtered_out:
|
||||||
|
args = {'timeout': timeout,
|
||||||
|
'logs_excluded_nodes': self.logs_excluded_nodes}
|
||||||
run_items.append(tools.RunItem(target=node.logs_populate,
|
run_items.append(tools.RunItem(target=node.logs_populate,
|
||||||
args={'timeout': timeout},
|
args=args,
|
||||||
key=key))
|
key=key))
|
||||||
result = tools.run_batch(run_items, maxthreads, dict_result=True)
|
result = tools.run_batch(run_items, maxthreads, dict_result=True)
|
||||||
for key in result:
|
for key in result:
|
||||||
@ -747,7 +895,7 @@ class NodeManager(object):
|
|||||||
self.alogsize = total_size / 1024
|
self.alogsize = total_size / 1024
|
||||||
return self.alogsize
|
return self.alogsize
|
||||||
|
|
||||||
def is_enough_space(self, coefficient=1.2):
|
def is_enough_space(self):
|
||||||
tools.mdir(self.conf['outdir'])
|
tools.mdir(self.conf['outdir'])
|
||||||
outs, errs, code = tools.free_space(self.conf['outdir'], timeout=1)
|
outs, errs, code = tools.free_space(self.conf['outdir'], timeout=1)
|
||||||
if code != 0:
|
if code != 0:
|
||||||
@ -759,10 +907,16 @@ class NodeManager(object):
|
|||||||
self.logger.error("can't get free space\nouts: %s" %
|
self.logger.error("can't get free space\nouts: %s" %
|
||||||
outs)
|
outs)
|
||||||
return False
|
return False
|
||||||
self.logger.info('logsize: %s Kb, free space: %s Kb' %
|
coeff = self.conf['logs_size_coefficient']
|
||||||
(self.alogsize, fs))
|
self.logger.info('logsize: %s Kb * %s, free space: %s Kb' %
|
||||||
if (self.alogsize*coefficient > fs):
|
(self.alogsize, coeff, fs))
|
||||||
self.logger.error('Not enough space on device')
|
if (self.alogsize*coeff > fs):
|
||||||
|
self.logger.error('Not enough space in "%s", logsize: %s Kb * %s, '
|
||||||
|
'available: %s Kb. Decrease logs_size_coefficien'
|
||||||
|
't config parameter (--logs-coeff CLI parameter)'
|
||||||
|
' or free up space.' % (self.conf['outdir'],
|
||||||
|
self.alogsize, coeff,
|
||||||
|
fs))
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
@ -782,7 +936,7 @@ class NodeManager(object):
|
|||||||
if code != 0:
|
if code != 0:
|
||||||
self.logger.error("Can't create archive %s" % (errs))
|
self.logger.error("Can't create archive %s" % (errs))
|
||||||
|
|
||||||
def find_adm_interface_speed(self, defspeed):
|
def find_adm_interface_speed(self):
|
||||||
'''Returns interface speed through which logs will be dowloaded'''
|
'''Returns interface speed through which logs will be dowloaded'''
|
||||||
for node in self.nodes.values():
|
for node in self.nodes.values():
|
||||||
if not (node.ip == 'localhost' or node.ip.startswith('127.')):
|
if not (node.ip == 'localhost' or node.ip.startswith('127.')):
|
||||||
@ -790,23 +944,27 @@ class NodeManager(object):
|
|||||||
('cat /sys/class/net/', node.ip))
|
('cat /sys/class/net/', node.ip))
|
||||||
out, err, code = tools.launch_cmd(cmd, node.timeout)
|
out, err, code = tools.launch_cmd(cmd, node.timeout)
|
||||||
if code != 0:
|
if code != 0:
|
||||||
self.logger.error("can't get iface speed: error: %s" % err)
|
self.logger.warning("can't get iface speed: err: %s" % err)
|
||||||
return defspeed
|
return self.conf['logs_speed_default']
|
||||||
try:
|
try:
|
||||||
speed = int(out)
|
speed = int(out)
|
||||||
except:
|
except:
|
||||||
speed = defspeed
|
speed = self.conf['logs_speed_default']
|
||||||
return speed
|
return speed
|
||||||
|
|
||||||
@run_with_lock
|
@run_with_lock
|
||||||
def get_logs(self, timeout, fake=False, maxthreads=10, speed=100):
|
def get_logs(self, timeout, fake=False, maxthreads=10):
|
||||||
if fake:
|
if fake:
|
||||||
self.logger.info('fake = True, skipping')
|
self.logger.info('fake = True, skipping')
|
||||||
return
|
return
|
||||||
txtfl = []
|
if self.conf['logs_speed_limit']:
|
||||||
speed = self.find_adm_interface_speed(speed)
|
if self.conf['logs_speed'] > 0:
|
||||||
speed = int(speed * 0.9 / min(maxthreads, len(self.nodes)))
|
speed = self.conf['logs_speed']
|
||||||
pythonslowpipe = tools.slowpipe % speed
|
else:
|
||||||
|
speed = self.find_adm_interface_speed()
|
||||||
|
speed = int(speed * 0.9 / min(maxthreads, len(self.nodes)))
|
||||||
|
py_slowpipe = tools.slowpipe % speed
|
||||||
|
limitcmd = "| python -c '%s'; exit ${PIPESTATUS}" % py_slowpipe
|
||||||
run_items = []
|
run_items = []
|
||||||
for node in [n for n in self.nodes.values() if not n.filtered_out]:
|
for node in [n for n in self.nodes.values() if not n.filtered_out]:
|
||||||
if not node.logs_dict():
|
if not node.logs_dict():
|
||||||
@ -822,22 +980,18 @@ class NodeManager(object):
|
|||||||
input += '%s\0' % fn.lstrip(os.path.abspath(os.sep))
|
input += '%s\0' % fn.lstrip(os.path.abspath(os.sep))
|
||||||
cmd = ("tar --gzip -C %s --create --warning=no-file-changed "
|
cmd = ("tar --gzip -C %s --create --warning=no-file-changed "
|
||||||
" --file - --null --files-from -" % os.path.abspath(os.sep))
|
" --file - --null --files-from -" % os.path.abspath(os.sep))
|
||||||
if not (node.ip == 'localhost' or node.ip.startswith('127.')):
|
if self.conf['logs_speed_limit']:
|
||||||
cmd = ' '.join([cmd, "| python -c '%s'; exit ${PIPESTATUS}" %
|
if not (node.ip == 'localhost' or node.ip.startswith('127.')):
|
||||||
pythonslowpipe])
|
cmd = ' '.join([cmd, limitcmd])
|
||||||
args = {'cmd': cmd,
|
args = {'cmd': cmd,
|
||||||
'timeout': timeout,
|
'timeout': timeout,
|
||||||
'outfile': node.archivelogsfile,
|
'outfile': node.archivelogsfile,
|
||||||
'input': input,
|
'input': input,
|
||||||
'ok_codes': [0, 1]}
|
'ok_codes': [0, 1],
|
||||||
|
'decode': False}
|
||||||
run_items.append(tools.RunItem(target=node.exec_simple_cmd,
|
run_items.append(tools.RunItem(target=node.exec_simple_cmd,
|
||||||
args=args))
|
args=args))
|
||||||
tools.run_batch(run_items, maxthreads)
|
tools.run_batch(run_items, maxthreads)
|
||||||
for tfile in txtfl:
|
|
||||||
try:
|
|
||||||
os.remove(tfile)
|
|
||||||
except:
|
|
||||||
self.logger.error("can't delete file %s" % tfile)
|
|
||||||
|
|
||||||
@run_with_lock
|
@run_with_lock
|
||||||
def get_files(self, timeout=15):
|
def get_files(self, timeout=15):
|
||||||
@ -870,4 +1024,4 @@ def main(argv=None):
|
|||||||
return 0
|
return 0
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
exit(main(sys.argv))
|
sys.exit(main(sys.argv))
|
||||||
|
@ -196,7 +196,7 @@ def mdir(directory):
|
|||||||
sys.exit(3)
|
sys.exit(3)
|
||||||
|
|
||||||
|
|
||||||
def launch_cmd(cmd, timeout, input=None, ok_codes=None):
|
def launch_cmd(cmd, timeout, input=None, ok_codes=None, decode=True):
|
||||||
def _timeout_terminate(pid):
|
def _timeout_terminate(pid):
|
||||||
try:
|
try:
|
||||||
os.kill(pid, 15)
|
os.kill(pid, 15)
|
||||||
@ -204,30 +204,23 @@ def launch_cmd(cmd, timeout, input=None, ok_codes=None):
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
logger.info('launching cmd %s' % cmd)
|
logger.debug('cmd: %s' % cmd)
|
||||||
p = subprocess.Popen(cmd,
|
p = subprocess.Popen(cmd,
|
||||||
shell=True,
|
shell=True,
|
||||||
stdin=subprocess.PIPE,
|
stdin=subprocess.PIPE,
|
||||||
stdout=subprocess.PIPE,
|
stdout=subprocess.PIPE,
|
||||||
stderr=subprocess.PIPE)
|
stderr=subprocess.PIPE)
|
||||||
timeout_killer = None
|
timeout_killer = None
|
||||||
|
outs = None
|
||||||
|
errs = None
|
||||||
try:
|
try:
|
||||||
timeout_killer = threading.Timer(timeout, _timeout_terminate, [p.pid])
|
timeout_killer = threading.Timer(timeout, _timeout_terminate, [p.pid])
|
||||||
timeout_killer.start()
|
timeout_killer.start()
|
||||||
outs, errs = p.communicate(input=input)
|
outs, errs = p.communicate(input=input)
|
||||||
outs = outs.decode('utf-8')
|
|
||||||
errs = errs.decode('utf-8')
|
|
||||||
errs = errs.rstrip('\n')
|
|
||||||
except:
|
|
||||||
try:
|
|
||||||
p.kill()
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
p.stdin = None
|
|
||||||
outs, errs = p.communicate()
|
|
||||||
outs = outs.decode('utf-8')
|
|
||||||
errs = errs.decode('utf-8')
|
|
||||||
errs = errs.rstrip('\n')
|
errs = errs.rstrip('\n')
|
||||||
|
if decode:
|
||||||
|
outs = outs.decode('utf-8')
|
||||||
|
errs = errs.decode('utf-8')
|
||||||
finally:
|
finally:
|
||||||
if timeout_killer:
|
if timeout_killer:
|
||||||
timeout_killer.cancel()
|
timeout_killer.cancel()
|
||||||
@ -235,15 +228,14 @@ def launch_cmd(cmd, timeout, input=None, ok_codes=None):
|
|||||||
logger.debug(('___command: %s\n'
|
logger.debug(('___command: %s\n'
|
||||||
'_exit_code: %s\n'
|
'_exit_code: %s\n'
|
||||||
'_____stdin: %s\n'
|
'_____stdin: %s\n'
|
||||||
'____stdout: %s\n'
|
'____stderr: %s') % (cmd, p.returncode, input,
|
||||||
'____stderr: %s') % (cmd, p.returncode, input, outs,
|
|
||||||
errs))
|
errs))
|
||||||
return outs, errs, p.returncode
|
return outs, errs, p.returncode
|
||||||
|
|
||||||
|
|
||||||
def ssh_node(ip, command='', ssh_opts=None, env_vars=None, timeout=15,
|
def ssh_node(ip, command='', ssh_opts=None, env_vars=None, timeout=15,
|
||||||
filename=None, inputfile=None, outputfile=None,
|
filename=None, inputfile=None, outputfile=None,
|
||||||
ok_codes=None, input=None, prefix=None):
|
ok_codes=None, input=None, prefix=None, decode=True):
|
||||||
if not ssh_opts:
|
if not ssh_opts:
|
||||||
ssh_opts = ''
|
ssh_opts = ''
|
||||||
if not env_vars:
|
if not env_vars:
|
||||||
@ -253,11 +245,10 @@ def ssh_node(ip, command='', ssh_opts=None, env_vars=None, timeout=15,
|
|||||||
if type(env_vars) is list:
|
if type(env_vars) is list:
|
||||||
env_vars = ' '.join(env_vars)
|
env_vars = ' '.join(env_vars)
|
||||||
if (ip in ['localhost', '127.0.0.1']) or ip.startswith('127.'):
|
if (ip in ['localhost', '127.0.0.1']) or ip.startswith('127.'):
|
||||||
logger.info("skip ssh")
|
logger.debug("skip ssh")
|
||||||
bstr = "%s timeout '%s' bash -c " % (
|
bstr = "%s timeout '%s' bash -c " % (
|
||||||
env_vars, timeout)
|
env_vars, timeout)
|
||||||
else:
|
else:
|
||||||
logger.info("exec ssh")
|
|
||||||
bstr = "timeout '%s' ssh -t -T %s '%s' '%s' " % (
|
bstr = "timeout '%s' ssh -t -T %s '%s' '%s' " % (
|
||||||
timeout, ssh_opts, ip, env_vars)
|
timeout, ssh_opts, ip, env_vars)
|
||||||
if filename is None:
|
if filename is None:
|
||||||
@ -269,13 +260,14 @@ def ssh_node(ip, command='', ssh_opts=None, env_vars=None, timeout=15,
|
|||||||
cmd = "%s < '%s'" % (cmd, inputfile)
|
cmd = "%s < '%s'" % (cmd, inputfile)
|
||||||
else:
|
else:
|
||||||
cmd = "%s'%s bash -s' < '%s'" % (bstr, prefix, filename)
|
cmd = "%s'%s bash -s' < '%s'" % (bstr, prefix, filename)
|
||||||
logger.info("inputfile selected, cmd: %s" % cmd)
|
|
||||||
if outputfile is not None:
|
if outputfile is not None:
|
||||||
cmd = "%s > '%s'" % (cmd, outputfile)
|
cmd = "%s > '%s'" % (cmd, outputfile)
|
||||||
|
logger.info("cmd: %s" % cmd)
|
||||||
cmd = ("input=\"$(cat | xxd -p)\"; trap 'kill $pid' 15; " +
|
cmd = ("input=\"$(cat | xxd -p)\"; trap 'kill $pid' 15; " +
|
||||||
"trap 'kill $pid' 2; echo -n \"$input\" | xxd -r -p | " + cmd +
|
"trap 'kill $pid' 2; echo -n \"$input\" | xxd -r -p | " + cmd +
|
||||||
' &:; pid=$!; wait $!')
|
' &:; pid=$!; wait $!')
|
||||||
return launch_cmd(cmd, timeout, input=input, ok_codes=ok_codes)
|
return launch_cmd(cmd, timeout, input=input,
|
||||||
|
ok_codes=ok_codes, decode=decode)
|
||||||
|
|
||||||
|
|
||||||
def get_files_rsync(ip, data, ssh_opts, dpath, timeout=15):
|
def get_files_rsync(ip, data, ssh_opts, dpath, timeout=15):
|
||||||
@ -324,4 +316,4 @@ def w_list(value):
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
exit(0)
|
sys.exit(0)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user