Refactor loadbalancer module

* enable check_mode
 * enable allowed_cidrs on listener if octavia version is >= 2.12
 * Only send flavor_id if it is not None

Change-Id: I4fd36bf3ed347e020151721a6b56d1cac0a8fd23
This commit is contained in:
Jesper Schmitz Mouridsen 2020-10-09 13:29:33 +02:00
parent 38e61994c7
commit ab96eb6a11

View File

@ -83,6 +83,12 @@ options:
description: description:
- The protocol port number for the listener. - The protocol port number for the listener.
default: 80 default: 80
allowed_cidrs:
description:
- A list of IPv4, IPv6 or mix of both CIDRs to be allowed access to the listener. The default is all allowed.
When a list of CIDRs is provided, the default switches to deny all.
Ignored on unsupported Octavia versions (less than 2.12)
default: []
pool: pool:
description: description:
- The pool attached to the listener. - The pool attached to the listener.
@ -285,29 +291,27 @@ EXAMPLES = '''
''' '''
import time import time
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (openstack_full_argument_spec,
openstack_module_kwargs,
openstack_cloud_from_module)
def _wait_for_lb(module, cloud, lb, status, failures, interval=5): class LoadBalancerModule(OpenStackModule):
def _wait_for_lb(self, lb, status, failures, interval=5):
"""Wait for load balancer to be in a particular provisioning status.""" """Wait for load balancer to be in a particular provisioning status."""
timeout = module.params['timeout'] timeout = self.params['timeout']
total_sleep = 0 total_sleep = 0
if failures is None: if failures is None:
failures = [] failures = []
while total_sleep < timeout: while total_sleep < timeout:
lb = cloud.load_balancer.find_load_balancer(lb.id) lb = self.conn.load_balancer.find_load_balancer(lb.id)
if lb: if lb:
if lb.provisioning_status == status: if lb.provisioning_status == status:
return None return None
if lb.provisioning_status in failures: if lb.provisioning_status in failures:
module.fail_json( self.fail_json(
msg="Load Balancer %s transitioned to failure state %s" % msg="Load Balancer %s transitioned to failure state %s" %
(lb.id, lb.provisioning_status) (lb.id, lb.provisioning_status)
) )
@ -315,21 +319,19 @@ def _wait_for_lb(module, cloud, lb, status, failures, interval=5):
if status == "DELETED": if status == "DELETED":
return None return None
else: else:
module.fail_json( self.fail_json(
msg="Load Balancer %s transitioned to DELETED" % lb.id msg="Load Balancer %s transitioned to DELETED" % lb.id
) )
time.sleep(interval) time.sleep(interval)
total_sleep += interval total_sleep += interval
module.fail_json( self.fail_json(
msg="Timeout waiting for Load Balancer %s to transition to %s" % msg="Timeout waiting for Load Balancer %s to transition to %s" %
(lb.id, status) (lb.id, status)
) )
argument_spec = dict(
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True), name=dict(required=True),
flavor=dict(required=False), flavor=dict(required=False),
state=dict(default='present', choices=['absent', 'present']), state=dict(default='present', choices=['absent', 'present']),
@ -343,19 +345,18 @@ def main():
public_network=dict(required=False), public_network=dict(required=False),
delete_public_ip=dict(required=False, default=False, type='bool'), delete_public_ip=dict(required=False, default=False, type='bool'),
) )
module_kwargs = openstack_module_kwargs() module_kwargs = dict(supports_check_mode=True)
module = AnsibleModule(argument_spec, **module_kwargs)
sdk, cloud = openstack_cloud_from_module(module)
flavor = module.params['flavor'] def run(self):
vip_network = module.params['vip_network'] flavor = self.params['flavor']
vip_subnet = module.params['vip_subnet'] vip_network = self.params['vip_network']
vip_port = module.params['vip_port'] vip_subnet = self.params['vip_subnet']
listeners = module.params['listeners'] vip_port = self.params['vip_port']
public_vip_address = module.params['public_ip_address'] listeners = self.params['listeners']
allocate_fip = module.params['auto_public_ip'] public_vip_address = self.params['public_ip_address']
delete_fip = module.params['delete_public_ip'] allocate_fip = self.params['auto_public_ip']
public_network = module.params['public_network'] delete_fip = self.params['delete_public_ip']
public_network = self.params['public_network']
vip_network_id = None vip_network_id = None
vip_subnet_id = None vip_subnet_id = None
@ -363,90 +364,114 @@ def main():
flavor_id = None flavor_id = None
try: try:
max_microversion = 1
max_majorversion = 2
changed = False changed = False
lb = cloud.load_balancer.find_load_balancer( lb = self.conn.load_balancer.find_load_balancer(
name_or_id=module.params['name']) name_or_id=self.params['name'])
if self.params['state'] == 'present':
if lb and self.ansible.check_mode:
self.exit_json(changed=False)
if lb:
self.exit_json(changed=False)
ver_data = self.conn.load_balancer.get_all_version_data()
region = list(ver_data.keys())[0]
interface_type = list(ver_data[region].keys())[0]
versions = ver_data[region][interface_type]['load-balancer']
for ver in versions:
if ver['status'] == 'CURRENT':
curversion = ver['version'].split(".")
max_majorversion = int(curversion[0])
max_microversion = int(curversion[1])
if module.params['state'] == 'present':
if not lb: if not lb:
if self.ansible.check_mode:
self.exit_json(changed=True)
if not (vip_network or vip_subnet or vip_port): if not (vip_network or vip_subnet or vip_port):
module.fail_json( self.fail_json(
msg="One of vip_network, vip_subnet, or vip_port must " msg="One of vip_network, vip_subnet, or vip_port must "
"be specified for load balancer creation" "be specified for load balancer creation"
) )
if flavor: if flavor:
_flavor = cloud.load_balancer.find_flavor(flavor) _flavor = self.conn.load_balancer.find_flavor(flavor)
if not _flavor: if not _flavor:
module.fail_json( self.fail_json(
msg='flavor %s not found' % flavor msg='flavor %s not found' % flavor
) )
flavor_id = _flavor.id flavor_id = _flavor.id
if vip_network: if vip_network:
network = cloud.get_network(vip_network) network = self.conn.get_network(vip_network)
if not network: if not network:
module.fail_json( self.fail_json(
msg='network %s is not found' % vip_network msg='network %s is not found' % vip_network
) )
vip_network_id = network.id vip_network_id = network.id
if vip_subnet: if vip_subnet:
subnet = cloud.get_subnet(vip_subnet) subnet = self.conn.get_subnet(vip_subnet)
if not subnet: if not subnet:
module.fail_json( self.fail_json(
msg='subnet %s is not found' % vip_subnet msg='subnet %s is not found' % vip_subnet
) )
vip_subnet_id = subnet.id vip_subnet_id = subnet.id
if vip_port: if vip_port:
port = cloud.get_port(vip_port) port = self.conn.get_port(vip_port)
if not port: if not port:
module.fail_json( self.fail_json(
msg='port %s is not found' % vip_port msg='port %s is not found' % vip_port
) )
vip_port_id = port.id vip_port_id = port.id
lbargs = {"name": self.params['name'],
"vip_network_id": vip_network_id,
"vip_subnet_id": vip_subnet_id,
"vip_port_id": vip_port_id,
"vip_address": self.params['vip_address']
}
if flavor_id is not None:
lbargs["flavor_id"] = flavor_id
lb = self.conn.load_balancer.create_load_balancer(**lbargs)
lb = cloud.load_balancer.create_load_balancer(
name=module.params['name'],
flavor_id=flavor_id,
vip_network_id=vip_network_id,
vip_subnet_id=vip_subnet_id,
vip_port_id=vip_port_id,
vip_address=module.params['vip_address'],
)
changed = True changed = True
if not listeners and not module.params['wait']: if not listeners and not self.params['wait']:
module.exit_json( self.exit_json(
changed=changed, changed=changed,
loadbalancer=lb.to_dict(), loadbalancer=lb.to_dict(),
id=lb.id id=lb.id
) )
_wait_for_lb(module, cloud, lb, "ACTIVE", ["ERROR"]) self._wait_for_lb(lb, "ACTIVE", ["ERROR"])
for listener_def in listeners: for listener_def in listeners:
listener_name = listener_def.get("name") listener_name = listener_def.get("name")
pool_def = listener_def.get("pool") pool_def = listener_def.get("pool")
if not listener_name: if not listener_name:
module.fail_json(msg='listener name is required') self.fail_json(msg='listener name is required')
listener = cloud.load_balancer.find_listener( listener = self.conn.load_balancer.find_listener(
name_or_id=listener_name name_or_id=listener_name
) )
if not listener: if not listener:
_wait_for_lb(module, cloud, lb, "ACTIVE", ["ERROR"]) self._wait_for_lb(lb, "ACTIVE", ["ERROR"])
protocol = listener_def.get("protocol", "HTTP") protocol = listener_def.get("protocol", "HTTP")
protocol_port = listener_def.get("protocol_port", 80) protocol_port = listener_def.get("protocol_port", 80)
allowed_cidrs = listener_def.get("allowed_cidrs", [])
listener = cloud.load_balancer.create_listener( listenerargs = {"name": listener_name,
name=listener_name, "loadbalancer_id": lb.id,
loadbalancer_id=lb.id, "protocol": protocol,
protocol=protocol, "protocol_port": protocol_port
protocol_port=protocol_port, }
) if max_microversion >= 12 and max_majorversion >= 2:
listenerargs['allowed_cidrs'] = allowed_cidrs
listener = self.conn.load_balancer.create_listener(**listenerargs)
changed = True changed = True
# Ensure pool in the listener. # Ensure pool in the listener.
@ -455,18 +480,18 @@ def main():
members = pool_def.get('members', []) members = pool_def.get('members', [])
if not pool_name: if not pool_name:
module.fail_json(msg='pool name is required') self.fail_json(msg='pool name is required')
pool = cloud.load_balancer.find_pool(name_or_id=pool_name) pool = self.conn.load_balancer.find_pool(name_or_id=pool_name)
if not pool: if not pool:
_wait_for_lb(module, cloud, lb, "ACTIVE", ["ERROR"]) self._wait_for_lb(lb, "ACTIVE", ["ERROR"])
protocol = pool_def.get("protocol", "HTTP") protocol = pool_def.get("protocol", "HTTP")
lb_algorithm = pool_def.get("lb_algorithm", lb_algorithm = pool_def.get("lb_algorithm",
"ROUND_ROBIN") "ROUND_ROBIN")
pool = cloud.load_balancer.create_pool( pool = self.conn.load_balancer.create_pool(
name=pool_name, name=pool_name,
listener_id=listener.id, listener_id=listener.id,
protocol=protocol, protocol=protocol,
@ -478,27 +503,27 @@ def main():
for member_def in members: for member_def in members:
member_name = member_def.get("name") member_name = member_def.get("name")
if not member_name: if not member_name:
module.fail_json(msg='member name is required') self.fail_json(msg='member name is required')
member = cloud.load_balancer.find_member(member_name, member = self.conn.load_balancer.find_member(member_name,
pool.id) pool.id
)
if not member: if not member:
_wait_for_lb(module, cloud, lb, "ACTIVE", self._wait_for_lb(lb, "ACTIVE", ["ERROR"])
["ERROR"])
address = member_def.get("address") address = member_def.get("address")
if not address: if not address:
module.fail_json( self.fail_json(
msg='member address for member %s is ' msg='member address for member %s is '
'required' % member_name 'required' % member_name
) )
subnet_id = member_def.get("subnet") subnet_id = member_def.get("subnet")
if subnet_id: if subnet_id:
subnet = cloud.get_subnet(subnet_id) subnet = self.conn.get_subnet(subnet_id)
if not subnet: if not subnet:
module.fail_json( self.fail_json(
msg='subnet %s for member %s is not ' msg='subnet %s for member %s is not '
'found' % (subnet_id, member_name) 'found' % (subnet_id, member_name)
) )
@ -506,7 +531,7 @@ def main():
protocol_port = member_def.get("protocol_port", 80) protocol_port = member_def.get("protocol_port", 80)
member = cloud.load_balancer.create_member( member = self.conn.load_balancer.create_member(
pool, pool,
name=member_name, name=member_name,
address=address, address=address,
@ -522,7 +547,7 @@ def main():
orig_public_ip = None orig_public_ip = None
new_public_ip = None new_public_ip = None
if public_vip_address or allocate_fip: if public_vip_address or allocate_fip:
ips = cloud.network.ips( ips = self.conn.network.ips(
port_id=lb.vip_port_id, port_id=lb.vip_port_id,
fixed_ip_address=lb.vip_address fixed_ip_address=lb.vip_address
) )
@ -532,21 +557,22 @@ def main():
new_public_ip = orig_public_ip.floating_ip_address new_public_ip = orig_public_ip.floating_ip_address
if public_vip_address and public_vip_address != orig_public_ip: if public_vip_address and public_vip_address != orig_public_ip:
fip = cloud.network.find_ip(public_vip_address) fip = self.conn.network.find_ip(public_vip_address)
if not fip: if not fip:
module.fail_json( self.fail_json(
msg='Public IP %s is unavailable' % public_vip_address msg='Public IP %s is unavailable' % public_vip_address
) )
# Release origin public ip first # Release origin public ip first
cloud.network.update_ip( self.conn.network.update_ip(
orig_public_ip, orig_public_ip,
fixed_ip_address=None, fixed_ip_address=None,
port_id=None port_id=None
) )
# Associate new public ip # Associate new public ip
cloud.network.update_ip( self.conn.network.update_ip(
fip, fip,
fixed_ip_address=lb.vip_address, fixed_ip_address=lb.vip_address,
port_id=lb.vip_port_id port_id=lb.vip_port_id
@ -555,22 +581,22 @@ def main():
new_public_ip = public_vip_address new_public_ip = public_vip_address
changed = True changed = True
elif allocate_fip and not orig_public_ip: elif allocate_fip and not orig_public_ip:
fip = cloud.network.find_available_ip() fip = self.conn.network.find_available_ip()
if not fip: if not fip:
if not public_network: if not public_network:
module.fail_json(msg="Public network is not provided") self.fail_json(msg="Public network is not provided")
pub_net = cloud.network.find_network(public_network) pub_net = self.conn.network.find_network(public_network)
if not pub_net: if not pub_net:
module.fail_json( self.fail_json(
msg='Public network %s not found' % msg='Public network %s not found' %
public_network public_network
) )
fip = cloud.network.create_ip( fip = self.conn.network.create_ip(
floating_network_id=pub_net.id floating_network_id=pub_net.id
) )
cloud.network.update_ip( self.conn.network.update_ip(
fip, fip,
fixed_ip_address=lb.vip_address, fixed_ip_address=lb.vip_address,
port_id=lb.vip_port_id port_id=lb.vip_port_id
@ -580,22 +606,24 @@ def main():
changed = True changed = True
# Include public_vip_address in the result. # Include public_vip_address in the result.
lb = cloud.load_balancer.find_load_balancer(name_or_id=lb.id) lb = self.conn.load_balancer.find_load_balancer(name_or_id=lb.id)
lb_dict = lb.to_dict() lb_dict = lb.to_dict()
lb_dict.update({"public_vip_address": new_public_ip}) lb_dict.update({"public_vip_address": new_public_ip})
module.exit_json( self.exit_json(
changed=changed, changed=changed,
loadbalancer=lb_dict, loadbalancer=lb_dict,
id=lb.id id=lb.id
) )
elif module.params['state'] == 'absent': elif self.params['state'] == 'absent':
changed = False changed = False
public_vip_address = None public_vip_address = None
if lb: if lb:
if self.ansible.check_mode:
self.exit_json(changed=True)
if delete_fip: if delete_fip:
ips = cloud.network.ips( ips = self.conn.network.ips(
port_id=lb.vip_port_id, port_id=lb.vip_port_id,
fixed_ip_address=lb.vip_address fixed_ip_address=lb.vip_address
) )
@ -606,19 +634,26 @@ def main():
# Deleting load balancer with `cascade=False` does not make # Deleting load balancer with `cascade=False` does not make
# sense because the deletion will always fail if there are # sense because the deletion will always fail if there are
# sub-resources. # sub-resources.
cloud.load_balancer.delete_load_balancer(lb, cascade=True) self.conn.load_balancer.delete_load_balancer(lb, cascade=True)
changed = True changed = True
if module.params['wait']: if self.params['wait']:
_wait_for_lb(module, cloud, lb, "DELETED", ["ERROR"]) self._wait_for_lb(lb, "DELETED", ["ERROR"])
if delete_fip and public_vip_address: if delete_fip and public_vip_address:
cloud.network.delete_ip(public_vip_address) self.conn.network.delete_ip(public_vip_address)
changed = True changed = True
elif self.ansible.check_mode:
self.exit_json(changed=False)
module.exit_json(changed=changed) self.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e: except Exception as e:
module.fail_json(msg=str(e), extra_data=e.extra_data) self.fail_json(msg=str(e))
def main():
module = LoadBalancerModule()
module()
if __name__ == "__main__": if __name__ == "__main__":