diff --git a/scale/base_compute.py b/scale/base_compute.py index 1f05a76..b39f324 100644 --- a/scale/base_compute.py +++ b/scale/base_compute.py @@ -226,3 +226,21 @@ class Flavor(object): flavor.delete() except Exception: pass + +class NovaQuota(object): + + def __init__(self, novaclient, tenant_id): + self.novaclient = novaclient + self.tenant_id = tenant_id + + def update_quota(self, **kwargs): + self.novaclient.quotas.update(self.tenant_id, **kwargs) + +class CinderQuota(object): + + def __init__(self, cinderclient, tenant_id): + self.cinderclient = cinderclient + self.tenant_id = tenant_id + + def update_quota(self, **kwargs): + self.cinderclient.quotas.update(self.tenant_id, **kwargs) diff --git a/scale/base_network.py b/scale/base_network.py index d683385..0cc5164 100644 --- a/scale/base_network.py +++ b/scale/base_network.py @@ -367,3 +367,16 @@ class Router(object): 'subnet_id': network_instance.network['subnets'][0] } self.neutron_client.remove_interface_router(self.router['router']['id'], body) + + +class NeutronQuota(object): + + def __init__(self, neutronclient, tenant_id): + self.neutronclient = neutronclient + self.tenant_id = tenant_id + + def update_quota(self, quotas): + body = { + 'quota': quotas + } + self.neutronclient.update_quota(self.tenant_id, body) diff --git a/scale/cfg.scale.yaml b/scale/cfg.scale.yaml index 37ec54c..0bd75c7 100644 --- a/scale/cfg.scale.yaml +++ b/scale/cfg.scale.yaml @@ -41,6 +41,7 @@ server: number_tenants: 1 # Number of Users to be created inside the tenant + # For now support only 1 user per tenant users_per_tenant: 1 # Number of routers to be created within the context of each User diff --git a/scale/kb_server/README b/scale/kb_server/README new file mode 100644 index 0000000..d0fe8a9 --- /dev/null +++ b/scale/kb_server/README @@ -0,0 +1,2 @@ +Run below command to start the server: +pecan serve config.py diff --git a/scale/kloudbuster.py b/scale/kloudbuster.py index 78daa3f..3d9b4a0 100644 --- a/scale/kloudbuster.py +++ b/scale/kloudbuster.py @@ -98,10 +98,10 @@ class Kloud(object): self.placement_az = scale_cfg['availability_zone'] LOG.info('%s Availability Zone: %s' % (self.name, self.placement_az)) - def create_resources(self): + def create_resources(self, tenant_quota): for tenant_count in xrange(self.scale_cfg['number_tenants']): tenant_name = self.prefix + "-T" + str(tenant_count) - new_tenant = tenant.Tenant(tenant_name, self) + new_tenant = tenant.Tenant(tenant_name, self, tenant_quota) self.tenant_list.append(new_tenant) new_tenant.create_resources() @@ -277,8 +277,9 @@ class KloudBuster(object): kbrunner = None vm_creation_concurrency = self.client_cfg.vm_creation_concurrency try: - self.kloud.create_resources() - self.testing_kloud.create_resources() + tenant_quota = self.calc_tenant_quota() + self.kloud.create_resources(tenant_quota['server']) + self.testing_kloud.create_resources(tenant_quota['client']) # Start the runner and ready for the incoming redis messages client_list = self.testing_kloud.get_all_instances() @@ -358,10 +359,98 @@ class KloudBuster(object): if kbrunner: kbrunner.dispose() -def get_total_vm_count(config): - return (config['number_tenants'] * config['users_per_tenant'] * - config['routers_per_user'] * config['networks_per_router'] * - config['vms_per_network']) + def get_tenant_vm_count(self, config): + return (config['users_per_tenant'] * config['routers_per_user'] * + config['networks_per_router'] * config['vms_per_network']) + + def calc_neutron_quota(self): + total_vm = self.get_tenant_vm_count(self.server_cfg) + + server_quota = {} + server_quota['network'] = self.server_cfg['networks_per_router'] + server_quota['subnet'] = server_quota['network'] + server_quota['router'] = self.server_cfg['routers_per_user'] + if (self.server_cfg['use_floatingip']): + # (1) Each VM has one floating IP + # (2) Each Router has one external IP + server_quota['floatingip'] = total_vm + server_quota['router'] + # (1) Each VM Floating IP takes up 1 port, total of $total_vm port(s) + # (2) Each VM Fixed IP takes up 1 port, total of $total_vm port(s) + # (3) Each Network has one router_interface (gateway), and one DHCP agent, total of + # server_quota['network'] * 2 port(s) + # (4) Each Router has one external IP, takes up 1 port, total of + # server_quota['router'] port(s) + server_quota['port'] = 2 * total_vm + 2 * server_quota['network'] +\ + server_quota['router'] + else: + server_quota['floatingip'] = server_quota['router'] + server_quota['port'] = total_vm + 2 * server_quota['network'] + server_quota['router'] + server_quota['security_group'] = server_quota['network'] + 1 + server_quota['security_group_rule'] = server_quota['security_group'] * 100 + + client_quota = {} + client_quota['network'] = 1 + client_quota['subnet'] = 1 + client_quota['router'] = 1 + if (self.client_cfg['use_floatingip']): + # (1) Each VM has one floating IP + # (2) Each Router has one external IP, total of 1 router + # (3) KB-Proxy node has one floating IP + client_quota['floatingip'] = total_vm + 1 + 1 + # (1) Each VM Floating IP takes up 1 port, total of $total_vm port(s) + # (2) Each VM Fixed IP takes up 1 port, total of $total_vm port(s) + # (3) Each Network has one router_interface (gateway), and one DHCP agent, total of + # client_quota['network'] * 2 port(s) + # (4) KB-Proxy node takes up 2 ports, one for fixed IP, one for floating IP + # (5) Each Router has one external IP, takes up 1 port, total of 1 router/port + client_quota['port'] = 2 * total_vm + 2 * client_quota['network'] + 2 + 1 + else: + client_quota['floatingip'] = 1 + 1 + client_quota['port'] = total_vm + 2 * client_quota['network'] + 2 + 1 + if self.single_cloud: + # Under single-cloud mode, the shared network is attached to every router in server + # cloud, and each one takes up 1 port on client side. + client_quota['port'] = client_quota['port'] + server_quota['router'] + client_quota['security_group'] = client_quota['network'] + 1 + client_quota['security_group_rule'] = client_quota['security_group'] * 100 + + return [server_quota, client_quota] + + def calc_nova_quota(self): + total_vm = self.get_tenant_vm_count(self.server_cfg) + server_quota = {} + server_quota['instances'] = total_vm + server_quota['cores'] = total_vm * self.server_cfg['flavor']['vcpus'] + server_quota['ram'] = total_vm * self.server_cfg['flavor']['ram'] + + client_quota = {} + client_quota['instances'] = total_vm + 1 + client_quota['cores'] = total_vm * self.client_cfg['flavor']['vcpus'] + 1 + client_quota['ram'] = total_vm * self.client_cfg['flavor']['ram'] + 2048 + + return [server_quota, client_quota] + + def calc_cinder_quota(self): + total_vm = self.get_tenant_vm_count(self.server_cfg) + server_quota = {} + server_quota['gigabytes'] = total_vm * self.server_cfg['flavor']['disk'] + + client_quota = {} + client_quota['gigabytes'] = total_vm * self.client_cfg['flavor']['disk'] + 20 + + return [server_quota, client_quota] + + def calc_tenant_quota(self): + quota_dict = {'server': {}, 'client': {}} + nova_quota = self.calc_nova_quota() + neutron_quota = self.calc_neutron_quota() + cinder_quota = self.calc_cinder_quota() + for idx, val in enumerate(['server', 'client']): + quota_dict[val]['nova'] = nova_quota[idx] + quota_dict[val]['neutron'] = neutron_quota[idx] + quota_dict[val]['cinder'] = cinder_quota[idx] + + return quota_dict # Some hardcoded client side options we do not want users to change hardcoded_client_cfg = { diff --git a/scale/tenant.py b/scale/tenant.py index 115e89a..cea948a 100644 --- a/scale/tenant.py +++ b/scale/tenant.py @@ -26,7 +26,7 @@ class Tenant(object): 2. Uses the User class to perform all user resource creation and deletion """ - def __init__(self, tenant_name, kloud): + def __init__(self, tenant_name, kloud, tenant_quota): """ Holds the tenant name tenant id and keystone client @@ -38,6 +38,7 @@ class Tenant(object): self.kloud = kloud self.tenant_object = self._get_tenant() self.tenant_id = self.tenant_object.id + self.tenant_quota = tenant_quota # Contains a list of user instance objects self.user_list = [] diff --git a/scale/users.py b/scale/users.py index 77ceb33..fa6c013 100644 --- a/scale/users.py +++ b/scale/users.py @@ -14,6 +14,7 @@ import base_compute import base_network +from cinderclient.v2 import client as cinderclient import keystoneclient.openstack.common.apiclient.exceptions as keystone_exception import log as logging from neutronclient.v2_0 import client as neutronclient @@ -40,9 +41,10 @@ class User(object): self.tenant = tenant self.user_id = None self.router_list = [] - # Store the neutron and nova client - self.neutron_client = None + # Store the nova, neutron and cinder client self.nova_client = None + self.neutron_client = None + self.cinder_client = None self.admin_user = self._get_user() # Each user is associated to 1 key pair at most self.key_pair = None @@ -113,6 +115,16 @@ class User(object): # Finally delete the user self.tenant.kloud.keystone.users.delete(self.user_id) + def update_tenant_quota(self, tenant_quota): + nova_quota = base_compute.NovaQuota(self.nova_client, self.tenant.tenant_id) + nova_quota.update_quota(**tenant_quota['nova']) + + cinder_quota = base_compute.CinderQuota(self.cinder_client, self.tenant.tenant_id) + cinder_quota.update_quota(**tenant_quota['cinder']) + + neutron_quota = base_network.NeutronQuota(self.neutron_client, self.tenant.tenant_id) + neutron_quota.update_quota(tenant_quota['neutron']) + def create_resources(self): """ Creates all the User elements associated with a User @@ -129,14 +141,18 @@ class User(object): # Create the neutron client to be used for all operations self.neutron_client = neutronclient.Client(**creden) - # Create a new nova client for this User with correct credentials + # Create a new nova and cinder client for this User with correct credentials creden_nova = {} creden_nova['username'] = self.user_name creden_nova['api_key'] = self.user_name creden_nova['auth_url'] = self.tenant.kloud.auth_url creden_nova['project_id'] = self.tenant.tenant_name creden_nova['version'] = 2 + self.nova_client = Client(**creden_nova) + self.cinder_client = cinderclient.Client(**creden_nova) + + self.update_tenant_quota(self.tenant.tenant_quota) config_scale = self.tenant.kloud.scale_cfg