Refactored inventory script

Change-Id: I78dbee41071bbfa8040ee13d662c1ba0fbdc10a5
This commit is contained in:
Jakob Meng 2023-01-13 08:41:23 +01:00
parent d5ab2bf33f
commit 70c029fa50
5 changed files with 770 additions and 320 deletions

View File

@ -0,0 +1,2 @@
[inventory]
enable_plugins=openstack.cloud.openstack

View File

@ -0,0 +1,393 @@
---
- module_defaults:
group/openstack.cloud.openstack:
cloud: "{{ cloud }}"
# Listing modules individually is required for
# backward compatibility with Ansible 2.9 only
openstack.cloud.resource:
cloud: "{{ cloud }}"
openstack.cloud.resources:
cloud: "{{ cloud }}"
openstack.cloud.router:
cloud: "{{ cloud }}"
block:
- name: Create external network
openstack.cloud.resource:
service: network
type: network
attributes:
name: ansible_network_external
is_router_external: true
wait: true
register: network_external
- name: Create external subnet
openstack.cloud.resource:
service: network
type: subnet
attributes:
cidr: 10.6.6.0/24
ip_version: 4
name: ansible_external_subnet
network_id: "{{ network_external.resource.id }}"
register: subnet_external
- name: Create external port
openstack.cloud.resource:
service: network
type: port
attributes:
name: ansible_port_external
network_id: "{{ network_external.resource.id }}"
fixed_ips:
- ip_address: 10.6.6.50
non_updateable_attributes:
- fixed_ips
register: port_external
- name: Create internal network
openstack.cloud.resource:
service: network
type: network
attributes:
name: ansible_network_internal
is_router_external: false
wait: true
register: network_internal
- name: Create internal subnet
openstack.cloud.resource:
service: network
type: subnet
attributes:
cidr: 10.7.7.0/24
ip_version: 4
name: ansible_internal_subnet
network_id: "{{ network_internal.resource.id }}"
register: subnet_internal
- name: Create internal port 1
openstack.cloud.resource:
service: network
type: port
attributes:
name: ansible_port_internal1
network_id: "{{ network_internal.resource.id }}"
fixed_ips:
- ip_address: 10.7.7.100
subnet_id: "{{ subnet_internal.resource.id }}"
register: port_internal1
- name: Create internal port 2
openstack.cloud.resource:
service: network
type: port
attributes:
name: ansible_port_internal2
network_id: "{{ network_internal.resource.id }}"
fixed_ips:
- ip_address: 10.7.7.101
subnet_id: "{{ subnet_internal.resource.id }}"
register: port_internal2
- name: Create router
openstack.cloud.resource:
service: network
type: router
attributes:
name: ansible_router
external_gateway_info:
enable_snat: true
external_fixed_ips:
- ip_address: 10.6.6.10
subnet_id: "{{ subnet_external.resource.id }}"
network_id: "{{ network_external.resource.id }}"
wait: true
register: router
- name: Attach router to internal subnet
openstack.cloud.router:
name: ansible_router
network: "{{ network_external.resource.id }}"
external_fixed_ips:
- ip: 10.6.6.10
subnet: "{{ subnet_external.resource.id }}"
interfaces:
- net: "{{ network_internal.resource.id }}"
subnet: "{{ subnet_internal.resource.id }}"
portip: 10.7.7.1
- name: Create floating ip address 1
openstack.cloud.resource:
service: network
type: ip
attributes:
name: 10.6.6.150
floating_ip_address: 10.6.6.150
floating_network_id: "{{ network_external.resource.id }}"
port_id: "{{ port_internal1.resource.id }}"
register: ip1
- name: List images
openstack.cloud.resources:
service: image
type: image
register: images
- name: Identify CirrOS image id
set_fact:
image_id: "{{ images.resources|community.general.json_query(query)|first }}"
vars:
query: "[?starts_with(name, 'cirros')].id"
- name: List compute flavors
openstack.cloud.resources:
service: compute
type: flavor
register: flavors
- name: Identify m1.tiny flavor id
set_fact:
flavor_id: "{{ flavors.resources|community.general.json_query(query)|first }}"
vars:
query: "[?name == 'm1.tiny'].id"
- name: Create server 1
openstack.cloud.resource:
service: compute
type: server
attributes:
name: ansible_server1
image_id: "{{ image_id }}"
flavor_id: "{{ flavor_id }}"
networks:
- uuid: "{{ network_internal.resource.id }}"
port: "{{ port_internal1.resource.id }}"
- uuid: "{{ network_internal.resource.id }}"
port: "{{ port_internal2.resource.id }}"
non_updateable_attributes:
- name
- image_id
- flavor_id
- networks
wait: true
register: server1
- name: Create server 2
openstack.cloud.resource:
service: compute
type: server
attributes:
name: ansible_server2
image_id: "{{ image_id }}"
flavor_id: "{{ flavor_id }}"
networks:
- uuid: "{{ network_internal.resource.id }}"
non_updateable_attributes:
- name
- image_id
- flavor_id
- networks
wait: true
register: server2
- name: Run inventory plugin tests
always:
- name: Remove temporary inventory directory after block execution
ansible.builtin.file:
path: "{{ tmp_dir.path }}"
state: absent
when: tmp_dir is defined and 'path' in tmp_dir
block:
- name: Ensure clean environment
ansible.builtin.set_fact:
tmp_dir: !!null
- name: Create temporary inventory directory
ansible.builtin.tempfile:
state: directory
register: tmp_dir
- name: Copy ansible.cfg file
ansible.builtin.copy:
src: ansible.cfg
dest: '{{ tmp_dir.path }}/'
mode: '0644'
- name: Create inventory config file
ansible.builtin.template:
src: openstack.yaml.j2
dest: '{{ tmp_dir.path }}/openstack.yaml'
mode: '0644'
- name: List servers with inventory plugin
ansible.builtin.command:
cmd: ansible-inventory --list --yaml --inventory-file openstack.yaml
chdir: "{{ tmp_dir.path }}"
environment:
ANSIBLE_INVENTORY_CACHE: "True"
ANSIBLE_INVENTORY_CACHE_PLUGIN: "jsonfile"
ANSIBLE_CACHE_PLUGIN_CONNECTION: "{{ tmp_dir.path }}/.cache/"
register: inventory
- name: Read YAML output from inventory plugin
ansible.builtin.set_fact:
inventory: "{{ inventory.stdout | from_yaml }}"
- name: Check YAML output from inventory plugin
assert:
that:
- inventory.all.children.RegionOne.hosts.keys() | sort == ['ansible_server1', 'ansible_server2'] | sort
- inventory.all.children.RegionOne.hosts.ansible_server1.ansible_host == '10.6.6.150'
- "'10.7.7.' in inventory.all.children.RegionOne.hosts.ansible_server2.ansible_host"
- inventory.all.children.RegionOne.hosts.ansible_server1.ci_compose_id
== inventory.all.children.RegionOne.hosts.ansible_server1.openstack.id
- inventory.all.children.RegionOne.hosts.ansible_server1.ci_compose_project_id
== inventory.all.children.RegionOne.hosts.ansible_server1.openstack.project_id
- name: Find Ansible's cache file
ansible.builtin.find:
paths: "{{ tmp_dir.path }}/.cache/"
patterns: 'ansible_inventory_*'
register: files
- name: Assert a single cache file only
assert:
that:
- files.files | length == 1
- name: Read Ansible's cache file
ansible.builtin.slurp:
src: "{{ files.files.0.path }}"
register: cache
- name: Process Ansible cache
ansible.builtin.set_fact:
cache: "{{ cache.content | b64decode | from_yaml }}"
- name: Check Ansible's cache
assert:
that:
- cache | map(attribute='name') | list | sort == ['ansible_server1', 'ansible_server2'] | sort
- name: List servers with inventory plugin again
ansible.builtin.command:
cmd: ansible-inventory --list --yaml --inventory-file openstack.yaml
chdir: "{{ tmp_dir.path }}"
environment:
ANSIBLE_INVENTORY_CACHE: "True"
ANSIBLE_INVENTORY_CACHE_PLUGIN: "jsonfile"
ANSIBLE_CACHE_PLUGIN_CONNECTION: "{{ tmp_dir.path }}/.cache/"
register: inventory
- name: Read YAML output from inventory plugin again
ansible.builtin.set_fact:
inventory: "{{ inventory.stdout | from_yaml }}"
- name: Check YAML output from inventory plugin again
assert:
that:
- inventory.all.children.RegionOne.hosts.keys() | sort == ['ansible_server1', 'ansible_server2'] | sort
- name: Delete server 2
openstack.cloud.resource:
service: compute
type: server
attributes:
name: ansible_server2
state: absent
wait: true
- name: Delete server 1
openstack.cloud.resource:
service: compute
type: server
attributes:
name: ansible_server1
state: absent
wait: true
- name: Delete floating ip address 1
openstack.cloud.resource:
service: network
type: ip
attributes:
floating_ip_address: 10.6.6.150
state: absent
- name: Detach router from internal subnet
openstack.cloud.router:
name: ansible_router
network: "{{ network_external.resource.id }}"
external_fixed_ips:
- ip: 10.6.6.10
subnet: "{{ subnet_external.resource.id }}"
interfaces: []
- name: Delete router
openstack.cloud.resource:
service: network
type: router
attributes:
name: ansible_router
state: absent
wait: true
- name: Delete internal port 2
openstack.cloud.resource:
service: network
type: port
attributes:
name: ansible_port_internal2
state: absent
- name: Delete internal port 1
openstack.cloud.resource:
service: network
type: port
attributes:
name: ansible_port_internal1
state: absent
- name: Delete internal subnet
openstack.cloud.resource:
service: network
type: subnet
attributes:
name: ansible_internal_subnet
state: absent
- name: Delete internal network
openstack.cloud.resource:
service: network
type: network
attributes:
name: ansible_network_internal
state: absent
wait: true
- name: Delete external port
openstack.cloud.resource:
service: network
type: port
attributes:
name: ansible_port_external
state: absent
- name: Delete external subnet
openstack.cloud.resource:
service: network
type: subnet
attributes:
name: ansible_external_subnet
state: absent
- name: Delete external network
openstack.cloud.resource:
service: network
type: network
attributes:
name: ansible_network_external
state: absent
wait: true

View File

@ -0,0 +1,11 @@
plugin: openstack.cloud.openstack
all_projects: true
compose:
ci_compose_id: openstack.id
ci_compose_project_id: openstack.project_id
expand_hostvars: true
fail_on_errors: true
only_clouds:
- "{{ cloud }}"
strict: true

View File

@ -24,6 +24,7 @@
- { role: identity_role, tags: identity_role } - { role: identity_role, tags: identity_role }
- { role: identity_user, tags: identity_user } - { role: identity_user, tags: identity_user }
- { role: image, tags: image } - { role: image, tags: image }
- { role: inventory, tags: inventory }
- { role: keypair, tags: keypair } - { role: keypair, tags: keypair }
- { role: keystone_federation_protocol, tags: keystone_federation_protocol } - { role: keystone_federation_protocol, tags: keystone_federation_protocol }
- { role: keystone_idp, tags: keystone_idp } - { role: keystone_idp, tags: keystone_idp }

View File

@ -8,410 +8,453 @@
# Copyright (c) 2017 Ansible Project # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r'''
DOCUMENTATION = '''
---
name: openstack name: openstack
author: OpenStack Ansible SIG author: OpenStack Ansible SIG
short_description: OpenStack inventory source short_description: OpenStack inventory source
description: description:
- Get inventory hosts from OpenStack clouds - Gather servers from OpenStack clouds and add them as Ansible hosts to your
- Uses openstack.(yml|yaml) YAML configuration file to configure the inventory plugin inventory.
- Uses standard clouds.yaml YAML configuration file to configure cloud credentials - Use YAML configuration file C(openstack.{yaml,yml}) to configure this
inventory plugin.
- Consumes cloud credentials from standard YAML configuration files
C(clouds{,-public}.yaml).
options: options:
plugin: all_projects:
description: token that ensures this is a source file for the 'openstack' plugin. description:
required: True - Lists servers from all projects
choices: ['openstack', 'openstack.cloud.openstack'] type: bool
show_all: default: false
description: toggles showing all vms vs only those with a working IP clouds_yaml_path:
type: bool description:
default: false - Override path to C(clouds.yaml) file.
inventory_hostname: - If this value is given it will be searched first.
description: | - Search paths for cloud credentials are complemented with files
What to register as the inventory hostname. C(/etc/ansible/openstack.{yaml,yml}).
If set to 'uuid' the uuid of the server will be used and a - Default search paths are documented in
group will be created for the server name. U(https://docs.openstack.org/os-client-config/latest/user/configuration.html#config-files).
If set to 'name' the name of the server will be used unless type: list
there are more than one server with the same name in which elements: str
case the 'uuid' logic will be used. env:
Default is to do 'name', which is the opposite of the old - name: OS_CLIENT_CONFIG_FILE
openstack.py inventory script's option use_hostnames) expand_hostvars:
type: string description:
choices: - Enrich server facts with additional queries to OpenStack services. This
- name includes requests to Cinder and Neutron which can be time-consuming
- uuid for clouds with many servers.
default: "name" - Default value of I(expand_hostvars) is opposite of the default value
use_names: for option C(expand_hostvars) in legacy openstack.py inventory script.
description: | type: bool
Use the host's 'name' instead of 'interface_ip' for the 'ansible_host' and default: false
'ansible_ssh_host' facts. This might be desired when using jump or fail_on_errors:
bastion hosts and the name is the FQDN of the host. description:
type: bool - Whether the inventory script fails, returning no hosts, when connection
default: false to a cloud failed, for example due to bad credentials or connectivity
expand_hostvars: issues.
description: | - When I(fail_on_errors) is C(false) this inventory script will return
Run extra commands on each host to fill in additional all hosts it could fetch from clouds on a best effort basis.
information about the host. May interrogate cinder and - Default value of I(fail_on_errors) is opposite of the default value
neutron and can be expensive for people with many hosts. for option C(fail_on_errors) in legacy openstack.py inventory script.
(Note, the default value of this is opposite from the default type: bool
old openstack.py inventory script's option expand_hostvars) default: false
type: bool inventory_hostname:
default: false description:
private: - What to register as inventory hostname.
description: | - When set to C(uuid) the ID of a server will be used and a group will
Use the private interface of each server, if it has one, as be created for a server name.
the host's IP in the inventory. This can be useful if you are - When set to C(name) the name of a server will be used. When multiple
running ansible inside a server in the cloud and would rather servers share the same name, then the servers IDs will be used.
communicate to your servers over the private network. - Default value of I(inventory_hostname) is opposite of the default value
type: bool for option C(use_hostnames) in legacy openstack.py inventory script.
default: false type: string
only_clouds: choices: ['name', 'uuid']
description: | default: 'name'
List of clouds from clouds.yaml to use, instead of using legacy_groups:
the whole list. description:
type: list - Automatically create groups from host variables.
elements: str type: bool
default: [] default: true
fail_on_errors: only_clouds:
description: | description:
Causes the inventory to fail and return no hosts if one cloud - List of clouds in C(clouds.yaml) which will be contacted to use instead
has failed (for example, bad credentials or being offline). of using all clouds.
When set to False, the inventory will return as many hosts as type: list
it can from as many clouds as it can contact. (Note, the elements: str
default value of this is opposite from the old openstack.py default: []
inventory script's option fail_on_errors) plugin:
type: bool description:
default: false - Token which marks a given YAML configuration file as a valid input file
all_projects: for this inventory plugin.
description: | required: true
Lists servers from all projects choices: ['openstack', 'openstack.cloud.openstack']
type: bool private:
default: false description:
clouds_yaml_path: - Use private interfaces of servers, if available, when determining ip
description: | addresses for Ansible hosts.
Override path to clouds.yaml file. If this value is given it - Using I(private) helps when running Ansible from a server in the cloud
will be searched first. The default path for the and one wants to ensure that servers communicate over private networks
ansible inventory adds /etc/ansible/openstack.yaml and only.
/etc/ansible/openstack.yml to the regular locations documented type: bool
at https://docs.openstack.org/os-client-config/latest/user/configuration.html#config-files default: false
type: list show_all:
elements: str description:
env: - Whether all servers should be listed or not.
- name: OS_CLIENT_CONFIG_FILE - When I(show_all) is C(false) then only servers with a valid ip
compose: address, regardless it is private or public, will be listed.
description: Create vars from jinja2 expressions. type: bool
type: dictionary default: false
default: {} use_names:
groups: description:
description: Add hosts to group based on Jinja2 conditionals. - "When I(use_names) is C(false), its default value, then a server's
type: dictionary first floating ip address will be used for both facts C(ansible_host)
default: {} and C(ansible_ssh_host). When no floating ip address is attached to a
legacy_groups: server, then its first non-floating ip addresses is used instead. If
description: Automatically create groups from host variables. no addresses are attached to a server, then both facts will not be
type: bool defined."
default: true - "When I(use_names) is C(true), then the server name will be for both
C(ansible_host) and C(ansible_ssh_host) facts. This is useful for
jump or bastion hosts where each server name is actually a server's
FQDN."
type: bool
default: false
requirements: requirements:
- "python >= 3.6" - "python >= 3.6"
- "openstacksdk >= 0.103.0" - "openstacksdk >= 0.103.0"
extends_documentation_fragment: extends_documentation_fragment:
- inventory_cache - inventory_cache
- constructed - constructed
''' '''
EXAMPLES = ''' EXAMPLES = r'''
# file must be named openstack.yaml or openstack.yml # Create a file called openstack.yaml, add the following content and run
# Make the plugin behave like the default behavior of the old script # $> ansible-inventory --list -vvv -i openstack.yaml
plugin: openstack.cloud.openstack plugin: openstack.cloud.openstack
expand_hostvars: yes
fail_on_errors: yes all_projects: false
all_projects: yes expand_hostvars: true
fail_on_errors: true
only_clouds:
- "devstack-admin"
strict: true
''' '''
import collections import collections
import sys import sys
import logging
from ansible.errors import AnsibleParserError from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.utils.display import Display
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import ( from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
ensure_compatibility ensure_compatibility
) )
display = Display()
os_logger = logging.getLogger("openstack")
try: try:
# Due to the name shadowing we should import other way import openstack
import importlib
sdk = importlib.import_module('openstack')
sdk_inventory = importlib.import_module('openstack.cloud.inventory')
client_config = importlib.import_module('openstack.config.loader')
sdk_exceptions = importlib.import_module("openstack.exceptions")
HAS_SDK = True HAS_SDK = True
except ImportError: except ImportError:
display.vvvv("Couldn't import Openstack SDK modules")
HAS_SDK = False HAS_SDK = False
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
''' Host inventory provider for ansible using OpenStack clouds. '''
NAME = 'openstack.cloud.openstack' NAME = 'openstack.cloud.openstack'
def parse(self, inventory, loader, path, cache=True): def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path) super(InventoryModule, self).parse(inventory, loader, path,
cache=cache)
cache_key = self._get_cache_prefix(path) if not HAS_SDK:
raise AnsibleParserError(
'Could not import Python library openstacksdk')
# file is config file try:
self._config_data = self._read_config_data(path) ensure_compatibility(openstack.version.__version__)
except ImportError as e:
raise AnsibleParserError(
'Incompatible openstacksdk library found: {0}'.format(e))
msg = '' # Redirect logging to stderr so it does not mix with output, in
if not self._config_data: # particular JSON output of ansible-inventory.
msg = 'File empty. this is not my config file' # TODO: Integrate openstack's logging with Ansible's logging.
elif 'plugin' in self._config_data and self._config_data['plugin'] not in (self.NAME, 'openstack'): if self.display.verbosity > 3:
msg = 'plugin config file, but not for us: %s' % self._config_data['plugin'] openstack.enable_logging(debug=True, stream=sys.stderr)
elif 'plugin' not in self._config_data and 'clouds' not in self._config_data: else:
msg = "it's not a plugin configuration nor a clouds.yaml file" openstack.enable_logging(stream=sys.stderr)
elif not HAS_SDK:
msg = "openstacksdk is required for the OpenStack inventory plugin. OpenStack inventory sources will be skipped."
if not msg: config = self._read_config_data(path)
try:
ensure_compatibility(sdk.version.__version__)
except ImportError as e:
msg = ("Incompatible openstacksdk library found: {error}."
.format(error=str(e)))
if msg: if 'plugin' not in config and 'clouds' not in config:
display.vvvv(msg) raise AnsibleParserError(
raise AnsibleParserError(msg) "Invalid OpenStack inventory configuration file found,"
" missing 'plugin' and 'clouds' keys.")
if 'clouds' in self._config_data: # TODO: It it wise to disregard a potential user configuration error?
if 'clouds' in config:
self.display.vvvv( self.display.vvvv(
"Found clouds config file instead of plugin config. " 'Found combined plugin config and clouds config file.')
"Using default configuration."
)
self._config_data = {}
# update cache if the user has caching enabled and the cache is being refreshed servers = self._fetch_servers(path, cache)
# will update variable below in the case of an expired cache
cache_needs_update = not cache and self.get_option('cache')
if cache: # determine inventory hostnames
cache = self.get_option('cache') if self.get_option('inventory_hostname') == 'name':
source_data = None count = collections.Counter(s['name'] for s in servers)
if cache:
self.display.vvvv("Reading inventory data from cache: %s" % cache_key)
try:
source_data = self._cache[cache_key]
except KeyError:
# cache expired or doesn't exist yet
display.vvvv("Inventory data cache not found")
cache_needs_update = True
if not source_data: inventory = dict(((server['name'], server)
self.display.vvvv("Getting hosts from Openstack clouds") if count[server['name']] == 1
clouds_yaml_path = self._config_data.get('clouds_yaml_path') else (server['id'], server))
if clouds_yaml_path: for server in servers)
config_files = (
clouds_yaml_path
+ client_config.CONFIG_FILES
)
else:
config_files = None
# Redict logging to stderr so it does not mix with output else: # self.get_option('inventory_hostname') == 'uuid'
# particular ansible-inventory JSON output inventory = dict((server['id'], server)
# TODO(mordred) Integrate openstack's logging with ansible's logging for server in servers)
if self.display.verbosity > 3:
sdk.enable_logging(debug=True, stream=sys.stderr)
else:
sdk.enable_logging(stream=sys.stderr)
cloud_inventory = sdk_inventory.OpenStackInventory( # drop servers without addresses
config_files=config_files, show_all = self.get_option('show_all')
private=self._config_data.get('private', False)) inventory = dict((k, v)
self.display.vvvv("Found %d cloud(s) in Openstack" % for k, v in inventory.items()
len(cloud_inventory.clouds)) if show_all or v['addresses'])
only_clouds = self._config_data.get('only_clouds', [])
if only_clouds and not isinstance(only_clouds, list):
raise ValueError(
'OpenStack Inventory Config Error: only_clouds must be'
' a list')
if only_clouds:
new_clouds = []
for cloud in cloud_inventory.clouds:
self.display.vvvv("Looking at cloud : %s" % cloud.name)
if cloud.name in only_clouds:
self.display.vvvv("Selecting cloud : %s" % cloud.name)
new_clouds.append(cloud)
cloud_inventory.clouds = new_clouds
self.display.vvvv("Selected %d cloud(s)" % for hostname, server in inventory.items():
len(cloud_inventory.clouds)) host_vars = self._generate_host_vars(hostname, server)
self._add_host(hostname, host_vars)
expand_hostvars = self._config_data.get('expand_hostvars', False) if self.get_option('legacy_groups'):
fail_on_errors = self._config_data.get('fail_on_errors', False) for hostname, server in inventory.items():
all_projects = self._config_data.get('all_projects', False) for group in self._generate_legacy_groups(server):
self.use_names = self._config_data.get('use_names', False) group_name = self.inventory.add_group(group)
if group_name == hostname:
self.display.vvvv(
'Same name for host {0} and group {1}'
.format(hostname, group_name))
self.inventory.add_host(hostname, group_name)
else:
self.inventory.add_child(group_name, hostname)
source_data = [] def _add_host(self, hostname, host_vars):
try: # Ref.: https://docs.ansible.com/ansible/latest/dev_guide/
source_data = cloud_inventory.list_hosts( # developing_inventory.html#constructed-features
expand=expand_hostvars, fail_on_cloud_config=fail_on_errors,
all_projects=all_projects)
except Exception as e:
self.display.warning("Couldn't list Openstack hosts. "
"See logs for details")
os_logger.error(e.message)
finally:
if cache_needs_update:
self._cache[cache_key] = source_data
self._populate_from_source(source_data) self.inventory.add_host(hostname, group='all')
def _populate_from_source(self, source_data): for k, v in host_vars.items():
groups = collections.defaultdict(list) self.inventory.set_variable(hostname, k, v)
firstpass = collections.defaultdict(list)
hostvars = {}
use_server_id = (
self._config_data.get('inventory_hostname', 'name') != 'name')
show_all = self._config_data.get('show_all', False)
for server in source_data:
if 'interface_ip' not in server and not show_all:
continue
firstpass[server['name']].append(server)
for name, servers in firstpass.items():
if len(servers) == 1 and not use_server_id:
self._append_hostvars(hostvars, groups, name, servers[0])
else:
server_ids = set()
# Trap for duplicate results
for server in servers:
server_ids.add(server['id'])
if len(server_ids) == 1 and not use_server_id:
self._append_hostvars(hostvars, groups, name, servers[0])
else:
for server in servers:
self._append_hostvars(
hostvars, groups, server['id'], server,
namegroup=True)
self._set_variables(hostvars, groups)
def _set_variables(self, hostvars, groups):
strict = self.get_option('strict') strict = self.get_option('strict')
# set vars in inventory from hostvars self._set_composite_vars(
for host in hostvars: self.get_option('compose'), host_vars, hostname, strict=True)
# actually update inventory self._add_host_to_composed_groups(
for key in hostvars[host]: self.get_option('groups'), host_vars, hostname, strict=strict)
self.inventory.set_variable(host, key, hostvars[host][key])
# create composite vars self._add_host_to_keyed_groups(
self._set_composite_vars( self.get_option('keyed_groups'), host_vars, hostname,
self._config_data.get('compose'), self.inventory.get_host(host).get_vars(), host, strict) strict=strict)
# constructed groups based on conditionals def _fetch_servers(self, path, cache):
self._add_host_to_composed_groups( cache_key = self._get_cache_prefix(path)
self._config_data.get('groups'), hostvars[host], host, strict) user_cache_setting = self.get_option('cache')
attempt_to_read_cache = user_cache_setting and cache
cache_needs_update = not cache and user_cache_setting
# constructed groups based on jinja expressions servers = None
self._add_host_to_keyed_groups(
self._config_data.get('keyed_groups'), hostvars[host], host, strict)
for group_name, group_hosts in groups.items(): if attempt_to_read_cache:
gname = self.inventory.add_group(group_name) self.display.vvvv('Reading OpenStack inventory cache key {0}'
for host in group_hosts: .format(cache_key))
if gname == host: try:
display.vvvv("Same name for host %s and group %s" % (host, gname)) servers = self._cache[cache_key]
self.inventory.add_host(host, gname) except KeyError:
self.display.vvvv("OpenStack inventory cache not found")
cache_needs_update = True
if not attempt_to_read_cache or cache_needs_update:
self.display.vvvv('Retrieving servers from Openstack clouds')
clouds_yaml_path = self.get_option('clouds_yaml_path')
config_files = (
openstack.config.loader.CONFIG_FILES
+ ([clouds_yaml_path] if clouds_yaml_path else []))
config = openstack.config.loader.OpenStackConfig(
config_files=config_files)
only_clouds = self.get_option('only_clouds', [])
if only_clouds:
if not isinstance(only_clouds, list):
raise AnsibleParserError(
'Option only_clouds in OpenStack inventory'
' configuration is not a list')
cloud_regions = [config.get_one(cloud=cloud)
for cloud in only_clouds]
else:
cloud_regions = config.get_all()
clouds = [openstack.connection.Connection(config=cloud_region)
for cloud_region in cloud_regions]
if self.get_option('private'):
for cloud in self.clouds:
cloud.private = True
self.display.vvvv(
'Found {0} OpenStack cloud(s)'
.format(len(clouds)))
self.display.vvvv(
'Using {0} OpenStack cloud(s)'
.format(len(clouds)))
expand_hostvars = self.get_option('expand_hostvars')
all_projects = self.get_option('all_projects')
servers = []
def _expand_server(server, cloud, volumes):
# calling openstacksdk's compute.servers() with
# details=True already fetched most facts
# cloud dict is used for legacy_groups option
server['cloud'] = dict(name=cloud.name)
region = cloud.config.get_region_name()
if region:
server['cloud']['region'] = region
if not expand_hostvars:
# do not query OpenStack API for additional data
return server
# TODO: Consider expanding 'flavor', 'image' and
# 'security_groups' when users still require this
# functionality.
# Ref.: https://opendev.org/openstack/openstacksdk/src/commit/\
# 289e5c2d3cba0eb1c008988ae5dccab5be05d9b6/openstack/cloud/meta.py#L482
server['volumes'] = [v for v in volumes
if any(a['server_id'] == server['id']
for a in v['attachments'])]
return server
for cloud in clouds:
if expand_hostvars:
volumes = [v.to_dict(computed=False)
for v in cloud.block_storage.volumes()]
else: else:
self.inventory.add_child(gname, host) volumes = []
def _get_groups_from_server(self, server_vars, namegroup=True): try:
for server in [
# convert to dict before expanding servers
# to allow us to attach attributes
_expand_server(server.to_dict(computed=False),
cloud,
volumes)
for server in cloud.compute.servers(
all_projects=all_projects,
# details are required because 'addresses'
# attribute must be populated
details=True)
]:
servers.append(server)
except openstack.exceptions.OpenStackCloudException as e:
self.display.warning(
'Fetching servers for cloud {0} failed with: {1}'
.format(cloud.name, str(e)))
if self.get_option('fail_on_errors'):
raise
if cache_needs_update:
self._cache[cache_key] = servers
return servers
def _generate_host_vars(self, hostname, server):
# populate host_vars with 'ansible_host', 'ansible_ssh_host' and
# 'openstack' facts
host_vars = dict(openstack=server)
if self.get_option('use_names'):
host_vars['ansible_ssh_host'] = server['name']
host_vars['ansible_host'] = server['name']
else:
# flatten addresses dictionary
addresses = [a
for addresses in (server['addresses'] or {}).values()
for a in addresses]
floating_ip = next(
(address['addr'] for address in addresses
if address['OS-EXT-IPS:type'] == 'floating'),
None)
fixed_ip = next(
(address['addr'] for address in addresses
if address['OS-EXT-IPS:type'] == 'fixed'),
None)
ip = floating_ip if floating_ip is not None else fixed_ip
if ip is not None:
host_vars['ansible_ssh_host'] = ip
host_vars['ansible_host'] = ip
return host_vars
def _generate_legacy_groups(self, server):
groups = [] groups = []
region = server_vars['region'] # cloud was added by _expand_server()
cloud = server_vars['cloud'] cloud = server['cloud']
metadata = server_vars.get('metadata', {})
# Create a group for the cloud cloud_name = cloud['name']
groups.append(cloud) groups.append(cloud_name)
# Create a group on region region = cloud['region'] if 'region' in cloud else None
if region: if region is not None:
groups.append(region) groups.append(region)
groups.append('{cloud}_{region}'.format(cloud=cloud_name,
region=region))
# And one by cloud_region metadata = server.get('metadata', {})
groups.append("%s_%s" % (cloud, region))
# Check if group metadata key in servers' metadata
if 'group' in metadata: if 'group' in metadata:
groups.append(metadata['group']) groups.append(metadata['group'])
for extra_group in metadata.get('groups', '').split(','): for extra_group in metadata.get('groups', '').split(','):
if extra_group: if extra_group:
groups.append(extra_group.strip()) groups.append(extra_group.strip())
for k, v in metadata.items():
groups.append('meta-{k}_{v}'.format(k=k, v=v))
groups.append('instance-%s' % server_vars['id']) groups.append('instance-{id}'.format(id=server['id']))
if namegroup:
groups.append(server_vars['name'])
for key in ('flavor', 'image'): for k in ('flavor', 'image'):
if 'name' in server_vars[key]: if 'name' in server[k]:
groups.append('%s-%s' % (key, server_vars[key]['name'])) groups.append('{k}-{v}'.format(k=k, v=server[k]['name']))
for key, value in iter(metadata.items()): availability_zone = server['availability_zone']
groups.append('meta-%s_%s' % (key, value)) if availability_zone:
groups.append(availability_zone)
if region:
groups.append(
'{region}_{availability_zone}'
.format(region=region,
availability_zone=availability_zone))
groups.append(
'{cloud}_{region}_{availability_zone}'
.format(cloud=cloud_name,
region=region,
availability_zone=availability_zone))
az = server_vars.get('az', None)
if az:
# Make groups for az, region_az and cloud_region_az
groups.append(az)
groups.append('%s_%s' % (region, az))
groups.append('%s_%s_%s' % (cloud, region, az))
return groups return groups
def _append_hostvars(self, hostvars, groups, current_host,
server, namegroup=False):
if not self.use_names:
hostvars[current_host] = dict(
ansible_ssh_host=server['interface_ip'],
ansible_host=server['interface_ip'],
openstack=server,
)
if self.use_names:
hostvars[current_host] = dict(
ansible_ssh_host=server['name'],
ansible_host=server['name'],
openstack=server,
)
self.inventory.add_host(current_host)
if self.get_option('legacy_groups'):
for group in self._get_groups_from_server(server, namegroup=namegroup):
groups[group].append(current_host)
def verify_file(self, path): def verify_file(self, path):
if super(InventoryModule, self).verify_file(path): if super(InventoryModule, self).verify_file(path):
for fn in ('openstack', 'clouds'): for fn in ('openstack', 'clouds'):
for suffix in ('yaml', 'yml'): for suffix in ('yaml', 'yml'):
maybe = '{fn}.{suffix}'.format(fn=fn, suffix=suffix) maybe = '{fn}.{suffix}'.format(fn=fn, suffix=suffix)
if path.endswith(maybe): if path.endswith(maybe):
self.display.vvvv("Valid plugin config file found") self.display.vvvv(
'OpenStack inventory configuration file found:'
' {0}'.format(maybe))
return True return True
return False return False