Scheduler managers enhanced
- scheduler_manager: - added support to trusted token and shared quota - implemented backfill strategy - fairshare_manager: various rewrites and fixes - nova_manager: - added support to user-data (metadata) - new implementation of getProjectUsage() and getProjectServers() - quota_manager: added support to shared quota - queue_manager: few stylistic changes - command: shell conforming to the OS style - setup.cfg: entry points updated - added functional tests Sem-Ver: feature Change-Id: Ib0568d7b3497e3a3534c67130fe31c6527faff68
This commit is contained in:
parent
c86966c326
commit
10601679f4
@ -29,11 +29,9 @@ synergy.managers =
|
||||
SchedulerManager = synergy_scheduler_manager.scheduler_manager:SchedulerManager
|
||||
|
||||
synergy.commands =
|
||||
get_queue = synergy_scheduler_manager.client.command:GetQueue
|
||||
get_quota = synergy_scheduler_manager.client.command:GetQuota
|
||||
get_priority = synergy_scheduler_manager.client.command:GetPriority
|
||||
get_share = synergy_scheduler_manager.client.command:GetShare
|
||||
get_usage = synergy_scheduler_manager.client.command:GetUsage
|
||||
quota = synergy_scheduler_manager.client.command:QuotaCommand
|
||||
queue = synergy_scheduler_manager.client.command:QueueCommand
|
||||
usage = synergy_scheduler_manager.client.command:UsageCommand
|
||||
|
||||
[build_sphinx]
|
||||
source-dir = doc/source
|
||||
|
@ -1,4 +1,10 @@
|
||||
from synergy.client.command import Execute
|
||||
from synergy.client.command import ExecuteCommand
|
||||
from synergy.client.tabulate import tabulate
|
||||
from synergy_scheduler_manager.common.project import Project
|
||||
from synergy_scheduler_manager.common.queue import Queue
|
||||
from synergy_scheduler_manager.common.quota import SharedQuota
|
||||
from synergy_scheduler_manager.common.user import User
|
||||
|
||||
|
||||
__author__ = "Lisa Zangrando"
|
||||
__email__ = "lisa.zangrando[AT]pd.infn.it"
|
||||
@ -19,373 +25,227 @@ See the License for the specific language governing
|
||||
permissions and limitations under the License."""
|
||||
|
||||
|
||||
class GetQuota(Execute):
|
||||
class QueueCommand(ExecuteCommand):
|
||||
|
||||
def __init__(self):
|
||||
super(GetQuota, self).__init__("GET_DYNAMIC_QUOTA")
|
||||
super(QueueCommand, self).__init__("QueueCommand")
|
||||
|
||||
def configureParser(self, subparser):
|
||||
parser = subparser.add_parser("get_quota",
|
||||
add_help=True,
|
||||
help="shows the dynamic quota info")
|
||||
parser.add_argument("--long",
|
||||
action='store_true',
|
||||
help="shows more details")
|
||||
queue_parser = subparser.add_parser('queue')
|
||||
queue_subparsers = queue_parser.add_subparsers(dest="command")
|
||||
queue_subparsers.add_parser("show", add_help=True,
|
||||
help="shows the queue info")
|
||||
|
||||
def sendRequest(self, synergy_url, args):
|
||||
self.long = args.long
|
||||
def execute(self, synergy_url, args):
|
||||
if args.command == "show":
|
||||
command = "GET_QUEUE"
|
||||
cmd_args = {"name": "DYNAMIC"}
|
||||
|
||||
super(GetQuota, self).sendRequest(
|
||||
synergy_url + "/synergy/execute", "QuotaManager", self.getName())
|
||||
queue = super(QueueCommand, self).execute(synergy_url,
|
||||
"QueueManager",
|
||||
command,
|
||||
args=cmd_args)
|
||||
|
||||
def log(self):
|
||||
quota = self.getResults()
|
||||
if not isinstance(queue, Queue):
|
||||
print("wrong data")
|
||||
|
||||
if not self.long:
|
||||
cores_in_use = "{:d}".format(quota["cores"]["in_use"])
|
||||
max_cores_in_use = max(len(cores_in_use), len("in use"))
|
||||
table = []
|
||||
headers = ["name", "size", "is open"]
|
||||
|
||||
cores_limit = "{:.2f}".format(quota["cores"]["limit"])
|
||||
max_cores_limit = max(len(cores_limit), len("limit"))
|
||||
row = []
|
||||
row.append(queue.getName())
|
||||
row.append(queue.getSize())
|
||||
row.append(str(queue.isOpen()).lower())
|
||||
|
||||
ram_in_use = "{:d}".format(quota["ram"]["in_use"])
|
||||
max_ram_in_use = max(len(ram_in_use), len("in use"))
|
||||
table.append(row)
|
||||
|
||||
ram_limit = "{:.2f}".format(quota["ram"]["limit"])
|
||||
max_ram_limit = max(len(ram_limit), len("limit"))
|
||||
|
||||
separator = "-" * (max_cores_in_use + max_cores_limit +
|
||||
max_ram_in_use + max_ram_limit + 7) + "\n"
|
||||
|
||||
raw = "| {0:%ss} | {1:%ss} | {2:%ss} |\n" % (
|
||||
len("ram (MB)"),
|
||||
max(max_cores_in_use, max_ram_in_use),
|
||||
max(max_cores_limit, max_ram_limit))
|
||||
|
||||
msg = separator
|
||||
msg += raw.format("type", "in use", "limit")
|
||||
msg += separator
|
||||
msg += raw.format("ram (MB)", ram_in_use, ram_limit)
|
||||
msg += raw.format("cores", cores_in_use, cores_limit)
|
||||
msg += separator
|
||||
|
||||
print(msg)
|
||||
else:
|
||||
max_ram = 0
|
||||
max_ram_in_use = len("{:d}".format(quota["ram"]["in_use"]))
|
||||
max_ram_limit = len("{:.2f}".format(quota["ram"]["limit"]))
|
||||
max_cores = 0
|
||||
max_cores_in_use = len("{:d}".format(quota["cores"]["in_use"]))
|
||||
max_cores_limit = len("{:.2f}".format(quota["cores"]["limit"]))
|
||||
max_prj_name = len("project")
|
||||
|
||||
for project in quota["projects"].values():
|
||||
max_prj_name = max(len(project["name"]), max_prj_name)
|
||||
max_ram = max(len("{:d}".format(project["ram"])), max_ram)
|
||||
max_cores = max(len("{:d}".format(project["cores"])),
|
||||
max_cores)
|
||||
|
||||
separator = "-" * (max_prj_name + max_cores + max_cores_in_use +
|
||||
max_cores_limit + max_ram + max_ram_in_use +
|
||||
max_ram_limit + 48)
|
||||
|
||||
title = "| {0:%ss} | {1:%ss} | {2:%ss} |\n" % (
|
||||
max_prj_name,
|
||||
max_cores + max_cores_in_use + max_cores_limit + 19,
|
||||
max_ram + max_ram_in_use + max_ram_limit + 19)
|
||||
|
||||
raw = "| {0:%ss} | in use={1:%d} ({2:%d}) | limit={3:%ss} |" \
|
||||
" in use={4:%d} ({5:%d}) | limit={6:%ss} |\n"
|
||||
raw = raw % (max_prj_name, max_cores, max_cores_in_use,
|
||||
max_cores_limit, max_ram, max_ram_in_use,
|
||||
max_ram_limit)
|
||||
|
||||
msg = separator + "\n"
|
||||
msg += title.format("project", "cores", "ram (MB)")
|
||||
msg += separator + "\n"
|
||||
|
||||
for project in quota["projects"].values():
|
||||
msg += raw.format(
|
||||
project["name"], project["cores"],
|
||||
quota["cores"]["in_use"],
|
||||
"{:.2f}".format(quota["cores"]["limit"]),
|
||||
project["ram"],
|
||||
quota["ram"]["in_use"],
|
||||
"{:.2f}".format(quota["ram"]["limit"]))
|
||||
msg += separator + "\n"
|
||||
|
||||
print(msg)
|
||||
print(tabulate(table, headers, tablefmt="fancy_grid"))
|
||||
|
||||
|
||||
class GetPriority(Execute):
|
||||
class QuotaCommand(ExecuteCommand):
|
||||
|
||||
def __init__(self):
|
||||
super(GetPriority, self).__init__("GET_PRIORITY")
|
||||
super(QuotaCommand, self).__init__("QuotaCommand")
|
||||
|
||||
def configureParser(self, subparser):
|
||||
subparser.add_parser("get_priority",
|
||||
add_help=True,
|
||||
help="shows the users priority")
|
||||
quota_parser = subparser.add_parser('quota')
|
||||
quota_subparsers = quota_parser.add_subparsers(dest="command")
|
||||
show_parser = quota_subparsers.add_parser("show", add_help=True,
|
||||
help="shows the quota info")
|
||||
group = show_parser.add_mutually_exclusive_group()
|
||||
group.add_argument("-i", "--project_id", metavar="<id>")
|
||||
group.add_argument("-n", "--project_name", metavar="<name>")
|
||||
group.add_argument("-a", "--all_projects", action="store_true")
|
||||
group.add_argument("-s", "--shared", action="store_true")
|
||||
|
||||
def sendRequest(self, synergy_url, args):
|
||||
super(GetPriority, self).sendRequest(
|
||||
synergy_url + "/synergy/execute",
|
||||
"FairShareManager",
|
||||
self.getName())
|
||||
def execute(self, synergy_url, args):
|
||||
if args.command == "show":
|
||||
command = "show"
|
||||
cmd_args = {"shared": args.shared,
|
||||
"project_id": args.project_id,
|
||||
"project_name": args.project_name,
|
||||
"all_projects": args.all_projects}
|
||||
|
||||
def log(self):
|
||||
projects = self.getResults()
|
||||
result = super(QuotaCommand, self).execute(synergy_url,
|
||||
"QuotaManager",
|
||||
command,
|
||||
args=cmd_args)
|
||||
|
||||
max_prj = len("project")
|
||||
max_user = len("user")
|
||||
max_priority = len("priority")
|
||||
if isinstance(result, SharedQuota):
|
||||
self.printSharedQuota(result)
|
||||
elif isinstance(result, Project):
|
||||
self.printProjects([result])
|
||||
else:
|
||||
self.printProjects(result)
|
||||
|
||||
for prj_name, users in projects.items():
|
||||
max_prj = max(len(prj_name), max_prj)
|
||||
def printProjects(self, projects):
|
||||
table = []
|
||||
headers = ["project", "private quota", "shared quota", "share", "TTL"]
|
||||
|
||||
for user_name, priority in users.items():
|
||||
max_user = max(len(user_name), max_user)
|
||||
max_priority = max(len("{:.2f}".format(priority)),
|
||||
max_priority)
|
||||
for project in projects:
|
||||
share = project.getShare()
|
||||
norm_share = share.getNormalizedValue()
|
||||
quota = project.getQuota()
|
||||
vcpus_size = quota.getSize("vcpus", private=False)
|
||||
vcpus_usage = quota.getUsage("vcpus", private=False)
|
||||
memory_size = quota.getSize("memory", private=False)
|
||||
memory_usage = quota.getUsage("memory", private=False)
|
||||
|
||||
separator = "-" * (max_prj + max_user + max_priority + 10) + "\n"
|
||||
row = []
|
||||
row.append(project.getName())
|
||||
|
||||
raw = "| {0:%ss} | {1:%ss} | {2:%ss} |\n" % (
|
||||
max_prj, max_user, max_priority)
|
||||
private = "vcpus: {:.2f} of {:.2f} | memory: {:.2f} of "\
|
||||
"{:.2f}".format(quota.getUsage("vcpus"),
|
||||
quota.getSize("vcpus"),
|
||||
quota.getUsage("memory"),
|
||||
quota.getSize("memory"))
|
||||
|
||||
msg = separator
|
||||
msg += raw.format("project", "user", "priority")
|
||||
msg += separator
|
||||
shared = "vcpus: {:.2f} of {:.2f} | memory: {:.2f} of {:.2f} | "\
|
||||
"share: {:.2f}% | TTL: {:.2f}".format(vcpus_usage,
|
||||
vcpus_size,
|
||||
memory_usage,
|
||||
memory_size,
|
||||
norm_share,
|
||||
project.getTTL())
|
||||
|
||||
for prj_name in sorted(projects.keys()):
|
||||
for user_name in sorted(projects[prj_name].keys()):
|
||||
msg += raw.format(
|
||||
prj_name,
|
||||
user_name,
|
||||
"{:.2f}".format(projects[prj_name][user_name]))
|
||||
row.append(private)
|
||||
row.append(shared)
|
||||
|
||||
msg += separator
|
||||
table.append(row)
|
||||
|
||||
print(msg)
|
||||
print(tabulate(table, headers, tablefmt="fancy_grid"))
|
||||
|
||||
def printSharedQuota(self, quota):
|
||||
table = []
|
||||
headers = ["resource", "used", "size"]
|
||||
resources = ["vcpus", "memory", "instances"]
|
||||
|
||||
for resource in resources:
|
||||
row = [resource, quota.getUsage(resource), quota.getSize(resource)]
|
||||
table.append(row)
|
||||
|
||||
print(tabulate(table, headers, tablefmt="fancy_grid"))
|
||||
|
||||
|
||||
class GetQueue(Execute):
|
||||
class UsageCommand(ExecuteCommand):
|
||||
|
||||
def __init__(self):
|
||||
super(GetQueue, self).__init__("GET_QUEUE")
|
||||
super(UsageCommand, self).__init__("UsageCommand")
|
||||
|
||||
def configureParser(self, subparser):
|
||||
subparser.add_parser("get_queue",
|
||||
add_help=True,
|
||||
help="shows the queue info")
|
||||
usage_parser = subparser.add_parser('usage')
|
||||
usage_subparsers = usage_parser.add_subparsers(dest="command")
|
||||
show_parser = usage_subparsers.add_parser("show", add_help=True,
|
||||
help="shows the usage info")
|
||||
group = show_parser.add_mutually_exclusive_group()
|
||||
group.add_argument("-i", "--user_id", metavar="<id>")
|
||||
group.add_argument("-n", "--user_name", metavar="<name>")
|
||||
group.add_argument("-a", "--all_users", action="store_true")
|
||||
|
||||
def sendRequest(self, synergy_url, args):
|
||||
super(GetQueue, self).sendRequest(
|
||||
synergy_url + "/synergy/execute",
|
||||
"QueueManager",
|
||||
self.getName(),
|
||||
{"name": "DYNAMIC"})
|
||||
group = show_parser.add_mutually_exclusive_group()
|
||||
group.add_argument("-d", "--project_id", metavar="<id>")
|
||||
group.add_argument("-m", "--project_name", metavar="<name>")
|
||||
|
||||
def log(self):
|
||||
queue = self.getResults()
|
||||
def execute(self, synergy_url, args):
|
||||
if args.command == "show":
|
||||
command = "show"
|
||||
cmd_args = {"user_id": args.user_id,
|
||||
"user_name": args.user_name,
|
||||
"all_users": args.all_users,
|
||||
"project_id": args.project_id,
|
||||
"project_name": args.project_name}
|
||||
|
||||
max_status = len("status")
|
||||
max_queue = max(len(queue["name"]), len("queue"))
|
||||
max_size = max(len("{:d}".format(queue["size"])), len("size"))
|
||||
result = super(UsageCommand, self).execute(synergy_url,
|
||||
"SchedulerManager",
|
||||
command,
|
||||
args=cmd_args)
|
||||
|
||||
separator = "-" * (max_queue + max_status + max_size + 10) + "\n"
|
||||
if isinstance(result, Project):
|
||||
self.printProject(result)
|
||||
elif isinstance(result, User):
|
||||
self.printUsers([result])
|
||||
else:
|
||||
self.printUsers(result)
|
||||
|
||||
raw = "| {0:%ss} | {1:%ss} | {2:%ss} |\n" % (
|
||||
max_queue, max_status, max_size)
|
||||
def printProject(self, project):
|
||||
if not project:
|
||||
return
|
||||
|
||||
msg = separator
|
||||
msg += raw.format("queue", "status", "size")
|
||||
msg += separator
|
||||
data = project.getData()
|
||||
share = project.getShare()
|
||||
date_format = "{:%d %b %Y %H:%M:%S}"
|
||||
from_date = date_format.format(data["time_window_from_date"])
|
||||
to_date = date_format.format(data["time_window_to_date"])
|
||||
|
||||
msg += raw.format(queue["name"],
|
||||
queue["status"],
|
||||
"{:d}".format(queue["size"]))
|
||||
headers = ["project",
|
||||
"shared quota (%s - %s)" % (from_date, to_date),
|
||||
"share"]
|
||||
|
||||
msg += separator
|
||||
table = []
|
||||
row = []
|
||||
row.append(project.getName())
|
||||
|
||||
print(msg)
|
||||
shared = "vcpus: {:.2f}% | memory: {:.2f}%".format(
|
||||
data["effective_vcpus"] * 100, data["effective_memory"] * 100)
|
||||
|
||||
row.append(shared)
|
||||
row.append("{:.2f}%".format(share.getNormalizedValue() * 100))
|
||||
|
||||
class GetShare(Execute):
|
||||
table.append(row)
|
||||
|
||||
def __init__(self):
|
||||
super(GetShare, self).__init__("GET_SHARE")
|
||||
print(tabulate(table, headers, tablefmt="fancy_grid"))
|
||||
|
||||
def configureParser(self, subparser):
|
||||
parser = subparser.add_parser("get_share",
|
||||
add_help=True,
|
||||
help="shows the users share")
|
||||
def printUsers(self, users):
|
||||
if not users:
|
||||
return
|
||||
|
||||
parser.add_argument("--long",
|
||||
action='store_true',
|
||||
help="shows more details")
|
||||
table = []
|
||||
|
||||
def sendRequest(self, synergy_url, args):
|
||||
self.long = args.long
|
||||
date_format = "{:%d %b %Y %H:%M:%S}"
|
||||
data = users[0].getData()
|
||||
from_date = date_format.format(data["time_window_from_date"])
|
||||
to_date = date_format.format(data["time_window_to_date"])
|
||||
|
||||
super(GetShare, self).sendRequest(
|
||||
synergy_url + "/synergy/execute",
|
||||
"FairShareManager",
|
||||
"GET_PROJECTS")
|
||||
headers = ["user",
|
||||
"shared quota (%s - %s)" % (from_date, to_date),
|
||||
"share",
|
||||
"priority"]
|
||||
|
||||
def log(self):
|
||||
projects = self.getResults()
|
||||
for user in users:
|
||||
share = user.getShare()
|
||||
data = user.getData()
|
||||
priority = user.getPriority()
|
||||
|
||||
max_prj = len("project")
|
||||
max_usr = len("user")
|
||||
max_prj_share = len("share")
|
||||
max_usr_share = len("share")
|
||||
row = []
|
||||
row.append(user.getName())
|
||||
|
||||
if self.long:
|
||||
for project in projects.values():
|
||||
max_prj = max(len(project["name"]), max_prj)
|
||||
max_prj_share = max(len("{:.2f}% ({:.2f})".format(
|
||||
project["norm_share"] * 100, project["share"])),
|
||||
max_prj_share)
|
||||
row.append("vcpus: {:.2f}% | memory: {:.2f}%".format(
|
||||
data["actual_rel_vcpus"] * 100,
|
||||
data["actual_rel_memory"] * 100))
|
||||
|
||||
for user in project["users"].values():
|
||||
max_usr = max(len(user["name"]), max_usr)
|
||||
max_usr_share = max(
|
||||
len("{:.2f}%".format(user["norm_share"] * 100)),
|
||||
max_usr_share)
|
||||
row.append("{:.2f}%".format(share.getNormalizedValue() * 100))
|
||||
row.append("{:.2f}".format(priority.getValue()))
|
||||
|
||||
separator = "-" * (max_prj + max_usr + max_prj_share +
|
||||
max_usr_share + 13) + "\n"
|
||||
table.append(row)
|
||||
|
||||
raw = "| {0:%ss} | {1:%ss} | {2:%ss} | {3:%ss} |\n" % (
|
||||
max_prj, max_prj_share, max_usr, max_usr_share)
|
||||
|
||||
msg = separator
|
||||
msg += raw.format("project", "share", "user", "share")
|
||||
msg += separator
|
||||
|
||||
for project in projects.values():
|
||||
for user in project["users"].values():
|
||||
msg += raw.format(
|
||||
project["name"],
|
||||
"{:.2f}% ({:.2f})".format(project["norm_share"] * 100,
|
||||
project["share"]),
|
||||
user["name"],
|
||||
"{:.2f}%".format(user["norm_share"] * 100))
|
||||
|
||||
msg += separator
|
||||
print(msg)
|
||||
else:
|
||||
for project in projects.values():
|
||||
max_prj = max(len(project["name"]), max_prj)
|
||||
max_prj_share = max(len("{:.2f}% ({:.2f})".format(
|
||||
project["norm_share"] * 100, project["share"])),
|
||||
max_prj_share)
|
||||
|
||||
separator = "-" * (max_prj + max_prj_share + 7) + "\n"
|
||||
|
||||
raw = "| {0:%ss} | {1:%ss} |\n" % (max_prj, max_prj_share)
|
||||
|
||||
msg = separator
|
||||
msg += raw.format("project", "share")
|
||||
msg += separator
|
||||
|
||||
for project in projects.values():
|
||||
msg += raw.format(
|
||||
project["name"],
|
||||
"{:.2f}% ({:.2f})".format(project["norm_share"] * 100,
|
||||
project["share"]))
|
||||
|
||||
msg += separator
|
||||
print(msg)
|
||||
|
||||
|
||||
class GetUsage(Execute):
|
||||
|
||||
def __init__(self):
|
||||
super(GetUsage, self).__init__("GET_USAGE")
|
||||
|
||||
def configureParser(self, subparser):
|
||||
subparser.add_parser("get_usage",
|
||||
add_help=True,
|
||||
help="retrieve the resource usages")
|
||||
|
||||
def sendRequest(self, synergy_url, args):
|
||||
super(GetUsage, self).sendRequest(
|
||||
synergy_url + "/synergy/execute",
|
||||
"FairShareManager",
|
||||
"GET_PROJECTS")
|
||||
|
||||
def log(self):
|
||||
projects = self.getResults()
|
||||
|
||||
max_prj = len("project")
|
||||
max_usr = len("user")
|
||||
max_prj_cores = len("cores")
|
||||
max_usr_cores = len("cores")
|
||||
max_prj_ram = len("ram")
|
||||
max_usr_ram = len("ram (abs)")
|
||||
|
||||
for project in projects.values():
|
||||
usage = project["usage"]
|
||||
|
||||
max_prj = max(len(project["name"]), max_prj)
|
||||
max_prj_cores = max(len(
|
||||
"{:.2f}%".format(usage["effective_cores"] * 100)),
|
||||
max_prj_cores)
|
||||
|
||||
max_prj_ram = max(len(
|
||||
"{:.2f}%".format(usage["effective_ram"] * 100)),
|
||||
max_prj_ram)
|
||||
|
||||
for user in project["users"].values():
|
||||
usage = user["usage"]
|
||||
|
||||
max_usr = max(len(user["name"]), max_usr)
|
||||
max_usr_cores = max(len("{:.2f}% ({:.2f})%".format(
|
||||
usage["effective_rel_cores"] * 100,
|
||||
usage["norm_cores"] * 100)),
|
||||
max_usr_cores)
|
||||
|
||||
max_usr_ram = max(len("{:.2f}% ({:.2f})%".format(
|
||||
usage["effective_rel_ram"] * 100,
|
||||
usage["norm_ram"] * 100)),
|
||||
max_usr_ram)
|
||||
|
||||
separator = "-" * (max_prj + max_usr + max_prj_cores +
|
||||
max_usr_cores + max_prj_ram +
|
||||
max_usr_ram + 19) + "\n"
|
||||
|
||||
raw = "| {0:%ss} | {1:%ss} | {2:%ss} | {3:%ss} | {4:%ss} | " \
|
||||
"{5:%ss} | \n" % (max_prj, max_prj_cores, max_prj_ram,
|
||||
max_usr, max_usr_cores, max_usr_ram)
|
||||
|
||||
msg = separator
|
||||
msg += raw.format("project", "cores", "ram",
|
||||
"user", "cores (abs)", "ram (abs)")
|
||||
msg += separator
|
||||
|
||||
for project in projects.values():
|
||||
prj_usage = project["usage"]
|
||||
|
||||
for user in project["users"].values():
|
||||
usr_usage = user["usage"]
|
||||
|
||||
prj_cores = "{:.2f}%".format(
|
||||
prj_usage["effective_cores"] * 100)
|
||||
|
||||
prj_ram = "{:.2f}%".format(prj_usage["effective_ram"] * 100)
|
||||
|
||||
usr_cores = "{:.2f}% ({:.2f}%)".format(
|
||||
usr_usage["effective_rel_cores"] * 100,
|
||||
usr_usage["norm_cores"] * 100)
|
||||
|
||||
usr_ram = "{:.2f}% ({:.2f}%)".format(
|
||||
usr_usage["effective_rel_ram"] * 100,
|
||||
usr_usage["norm_ram"] * 100)
|
||||
|
||||
msg += raw.format(
|
||||
project["name"], prj_cores, prj_ram,
|
||||
user["name"], usr_cores, usr_ram)
|
||||
msg += separator
|
||||
print(msg)
|
||||
print(tabulate(table, headers, tablefmt="fancy_grid"))
|
||||
|
@ -46,6 +46,12 @@ class Hypervisor(Service):
|
||||
def setState(self, state):
|
||||
self.set("state", state)
|
||||
|
||||
def getStatus(self):
|
||||
return self.get("status")
|
||||
|
||||
def setStatus(self, status):
|
||||
self.set("status", status)
|
||||
|
||||
def getWorkload(self):
|
||||
return self.get("workload")
|
||||
|
||||
|
@ -72,21 +72,3 @@ class Project(SynergyObject):
|
||||
|
||||
def setEnabled(self, enabled=True):
|
||||
self.set("enabled", enabled)
|
||||
|
||||
|
||||
def main():
|
||||
project = Project()
|
||||
project.setId("22222222")
|
||||
project.setName("LISA")
|
||||
|
||||
print(project.getName())
|
||||
ser = project.serialize()
|
||||
print(ser)
|
||||
|
||||
project1 = SynergyObject.deserialize(ser)
|
||||
print(project1.serialize())
|
||||
print(project1.getName())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -1,4 +1,4 @@
|
||||
import synergy.common.utils as utils
|
||||
import utils
|
||||
|
||||
from datetime import datetime
|
||||
from synergy.common.serializer import SynergyObject
|
||||
@ -69,10 +69,11 @@ class Server(SynergyObject):
|
||||
def setMetadata(self, metadata):
|
||||
self.set("metadata", metadata)
|
||||
|
||||
if "quota" in metadata and metadata["quota"] == "shared":
|
||||
self.setType("ephemeral")
|
||||
else:
|
||||
self.setType("permanent")
|
||||
if "quota" in metadata:
|
||||
if metadata["quota"] == "shared":
|
||||
self.setType("ephemeral")
|
||||
else:
|
||||
self.setType("permanent")
|
||||
|
||||
def getUserData(self):
|
||||
return self.get("userdata")
|
||||
|
@ -50,11 +50,8 @@ class Service(SynergyObject):
|
||||
def setDescription(self, description):
|
||||
self.set("description", description)
|
||||
|
||||
def getStatus(self):
|
||||
return self.get("status")
|
||||
|
||||
def setStatus(self, status):
|
||||
self.set("status", status)
|
||||
|
||||
def isEnabled(self):
|
||||
return self.get("status") == "enabled"
|
||||
return self.get("enabled")
|
||||
|
||||
def setEnabled(self, enabled=True):
|
||||
self.set("enabled", enabled)
|
||||
|
@ -59,24 +59,3 @@ class User(SynergyObject):
|
||||
|
||||
def setEnabled(self, enabled=True):
|
||||
self.set("enabled", enabled)
|
||||
|
||||
|
||||
def main():
|
||||
user = User()
|
||||
user.setId("22222222")
|
||||
user.setName("LISA")
|
||||
user.setProjectId("pippo")
|
||||
data = user.getData()
|
||||
data["a"] = "b"
|
||||
share = user.getShare()
|
||||
share.setValue("10")
|
||||
|
||||
ser = user.serialize()
|
||||
print(ser)
|
||||
|
||||
user1 = SynergyObject.deserialize(ser)
|
||||
print(user1.serialize())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -1,6 +1,7 @@
|
||||
import logging
|
||||
import threading
|
||||
|
||||
from common.user import User
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
|
||||
@ -37,16 +38,17 @@ LOG = logging.getLogger(__name__)
|
||||
class FairShareManager(Manager):
|
||||
|
||||
def __init__(self):
|
||||
super(FairShareManager, self).__init__(name="FairShareManager")
|
||||
super(FairShareManager, self).__init__("FairShareManager")
|
||||
|
||||
self.config_opts = [
|
||||
cfg.IntOpt('periods', default=3),
|
||||
cfg.IntOpt('period_length', default=7),
|
||||
cfg.FloatOpt('default_share', default=10.0),
|
||||
cfg.FloatOpt('decay_weight', default=0.5, help="the decay weight"),
|
||||
cfg.IntOpt('age_weight', default=1000, help="the age weight"),
|
||||
cfg.IntOpt('vcpus_weight', default=10000, help="the vcpus weight"),
|
||||
cfg.IntOpt('memory_weight', default=7000, help="the memory weight")
|
||||
cfg.FloatOpt('decay_weight', default=0.5,
|
||||
help="the decay weight (float value [0,1])"),
|
||||
cfg.IntOpt('age_weight', default=10, help="the age weight"),
|
||||
cfg.IntOpt('vcpus_weight', default=100, help="the vcpus weight"),
|
||||
cfg.IntOpt('memory_weight', default=70, help="the memory weight")
|
||||
]
|
||||
|
||||
def setup(self):
|
||||
@ -78,6 +80,11 @@ class FairShareManager(Manager):
|
||||
self.keystone_manager = self.getManager("KeystoneManager")
|
||||
self.condition = threading.Condition()
|
||||
|
||||
if self.decay_weight < 0:
|
||||
self.decay_weight = float(0)
|
||||
elif self.decay_weight > 1:
|
||||
self.decay_weight = float(1)
|
||||
|
||||
def execute(self, command, *args, **kargs):
|
||||
if command == "ADD_PROJECT":
|
||||
return self.addProject(*args, **kargs)
|
||||
@ -122,16 +129,14 @@ class FairShareManager(Manager):
|
||||
if prj_id not in self.projects:
|
||||
raise Exception("project=%s not found!" % prj_id)
|
||||
|
||||
if user_id not in self.projects[prj_id]["users"]:
|
||||
user = self.projects[prj_id].getUser(id=user_id)
|
||||
if not user:
|
||||
raise Exception("user=%s not found!" % user_id)
|
||||
|
||||
fair_share_cores = 0
|
||||
fair_share_ram = 0
|
||||
|
||||
with self.condition:
|
||||
user = self.projects[prj_id]["users"].get(user_id)
|
||||
fair_share_cores = user["fairshare_cores"]
|
||||
fair_share_ram = user["fairshare_ram"]
|
||||
priority = user.getPriority()
|
||||
fairshare_vcpus = priority.getFairShare("vcpus")
|
||||
fairshare_memory = priority.getFairShare("memory")
|
||||
|
||||
self.condition.notifyAll()
|
||||
|
||||
@ -143,31 +148,27 @@ class FairShareManager(Manager):
|
||||
diff = (now - timestamp)
|
||||
minutes = diff.seconds / 60
|
||||
priority = (float(self.age_weight) * minutes +
|
||||
float(self.vcpus_weight) * fair_share_cores +
|
||||
float(self.memory_weight) * fair_share_ram -
|
||||
float(self.vcpus_weight) * fairshare_vcpus +
|
||||
float(self.memory_weight) * fairshare_memory -
|
||||
float(self.age_weight) * retry)
|
||||
|
||||
return int(priority)
|
||||
|
||||
def addProject(self, prj_id, prj_name, share=float(0)):
|
||||
if prj_id not in self.projects:
|
||||
if share == 0:
|
||||
share = self.default_share
|
||||
def addProject(self, project):
|
||||
if self.projects.get(project.getId(), None):
|
||||
raise Exception("project %r already exists!" % (project.getId()))
|
||||
|
||||
with self.condition:
|
||||
self.projects[prj_id] = {"id": prj_id,
|
||||
"name": prj_name,
|
||||
"type": "dynamic",
|
||||
"users": {},
|
||||
"usage": {},
|
||||
"share": share}
|
||||
self.condition.notifyAll()
|
||||
prj_share = project.getShare()
|
||||
if prj_share.getValue() == 0:
|
||||
prj_share.setValue(self.default_share)
|
||||
|
||||
self.projects[project.getId()] = project
|
||||
|
||||
def getProject(self, prj_id):
|
||||
if prj_id not in self.projects:
|
||||
raise Exception("project name=%r not found!" % prj_id)
|
||||
|
||||
return self.projects.get(prj_id)
|
||||
return self.projects.get(prj_id, None)
|
||||
|
||||
def getProjects(self):
|
||||
return self.projects
|
||||
@ -183,187 +184,165 @@ class FairShareManager(Manager):
|
||||
return
|
||||
|
||||
total_prj_share = float(0)
|
||||
total_usage_ram = float(0)
|
||||
total_usage_cores = float(0)
|
||||
total_actual_usage_cores = float(0)
|
||||
total_actual_usage_ram = float(0)
|
||||
|
||||
users = self.keystone_manager.execute("GET_USERS")
|
||||
|
||||
if not users:
|
||||
LOG.error("cannot retrieve the users list from KeystoneManager")
|
||||
return
|
||||
|
||||
for user in users:
|
||||
user_id = str(user["id"])
|
||||
user_name = str(user["name"])
|
||||
user_projects = self.keystone_manager.execute("GET_USER_PROJECTS",
|
||||
id=user_id)
|
||||
|
||||
for project in user_projects:
|
||||
prj_id = str(project["id"])
|
||||
|
||||
if prj_id not in self.projects:
|
||||
continue
|
||||
|
||||
p_users = self.projects[prj_id]["users"]
|
||||
|
||||
if user_id not in p_users:
|
||||
p_users[user_id] = {"name": user_name,
|
||||
"share": self.default_share,
|
||||
"usage": {"ram": float(0),
|
||||
"cores": float(0)}}
|
||||
else:
|
||||
p_users[user_id]["usage"]["ram"] = float(0)
|
||||
p_users[user_id]["usage"]["cores"] = float(0)
|
||||
total_memory = float(0)
|
||||
total_vcpus = float(0)
|
||||
|
||||
to_date = datetime.utcnow()
|
||||
|
||||
for x in xrange(self.periods):
|
||||
time_window_from_date = to_date
|
||||
time_window_to_date = to_date
|
||||
|
||||
for prj_id, project in self.projects.items():
|
||||
for user in project.getUsers():
|
||||
data = user.getData()
|
||||
data["vcpus"] = float(0)
|
||||
data["memory"] = float(0)
|
||||
|
||||
for period in xrange(self.periods):
|
||||
default_share = self.default_share
|
||||
decay = self.decay_weight ** x
|
||||
decay = self.decay_weight ** period
|
||||
from_date = to_date - timedelta(days=(self.period_length))
|
||||
time_window_from_date = from_date
|
||||
|
||||
usages = self.nova_manager.execute("GET_RESOURCE_USAGE",
|
||||
prj_ids=self.projects.keys(),
|
||||
from_date=from_date,
|
||||
to_date=to_date)
|
||||
for prj_id, project in self.projects.items():
|
||||
usages = self.nova_manager.getProjectUsage(
|
||||
prj_id, from_date, to_date)
|
||||
|
||||
for prj_id, users in usages.items():
|
||||
project = self.projects[prj_id]
|
||||
for user_id, usage_rec in usages.items():
|
||||
user = project.getUser(id=user_id)
|
||||
|
||||
for user_id, usage_record in users.items():
|
||||
if user_id not in project["users"]:
|
||||
project["users"][user_id] = {"name": user_name,
|
||||
"share": default_share,
|
||||
"usage": {}}
|
||||
if not user:
|
||||
user = User()
|
||||
user.setId(user_id)
|
||||
user.getShare().setValue(default_share)
|
||||
|
||||
user_usage = project["users"][user_id]["usage"]
|
||||
user_usage["ram"] += decay * usage_record["ram"]
|
||||
user_usage["cores"] += decay * usage_record["cores"]
|
||||
data = user.getData()
|
||||
data["vcpus"] = float(0)
|
||||
data["memory"] = float(0)
|
||||
|
||||
total_usage_ram += user_usage["ram"]
|
||||
total_usage_cores += user_usage["cores"]
|
||||
project.addUser(user)
|
||||
|
||||
decay_vcpus = decay * usage_rec["vcpus"]
|
||||
decay_memory = decay * usage_rec["memory"]
|
||||
|
||||
data = user.getData()
|
||||
data["vcpus"] += decay_vcpus
|
||||
data["memory"] += decay_memory
|
||||
|
||||
total_vcpus += decay_vcpus
|
||||
total_memory += decay_memory
|
||||
|
||||
to_date = from_date
|
||||
|
||||
for project in self.projects.values():
|
||||
if "share" not in project or project["share"] == 0:
|
||||
project["share"] = self.default_share
|
||||
prj_share = project.getShare()
|
||||
|
||||
if prj_share.getValue() == 0:
|
||||
prj_share.setValue(self.default_share)
|
||||
|
||||
# check the share for each user and update the usage_record
|
||||
users = project["users"]
|
||||
prj_id = project["id"]
|
||||
prj_share = project["share"]
|
||||
sibling_share = float(0)
|
||||
|
||||
for user_id, user in users.items():
|
||||
if "share" not in user or user["share"] == 0:
|
||||
user["share"] = self.default_share
|
||||
for user in project.getUsers():
|
||||
user_share = user.getShare()
|
||||
|
||||
if len(users) == 1:
|
||||
user["share"] = prj_share
|
||||
sibling_share = prj_share
|
||||
if user_share.getValue() == 0:
|
||||
user_share.setValue(self.default_share)
|
||||
|
||||
if len(project.getUsers()) == 1:
|
||||
user_share.setValue(prj_share.getValue())
|
||||
sibling_share = prj_share.getValue()
|
||||
else:
|
||||
sibling_share += user["share"]
|
||||
sibling_share += user_share.getValue()
|
||||
|
||||
project["sibling_share"] = sibling_share
|
||||
total_prj_share += prj_share
|
||||
for user in project.getUsers():
|
||||
user_share = user.getShare()
|
||||
user_share.setSiblingValue(sibling_share)
|
||||
|
||||
total_prj_share += prj_share.getValue()
|
||||
|
||||
for prj_id, project in self.projects.items():
|
||||
sibling_share = project["sibling_share"]
|
||||
prj_share = project["share"]
|
||||
actual_usage_cores = float(0)
|
||||
actual_usage_ram = float(0)
|
||||
prj_data = project.getData()
|
||||
prj_data["actual_memory"] = float(0)
|
||||
prj_data["actual_vcpus"] = float(0)
|
||||
prj_data["effective_memory"] = float(0)
|
||||
prj_data["effective_vcpus"] = float(0)
|
||||
prj_data["time_window_from_date"] = time_window_from_date
|
||||
prj_data["time_window_to_date"] = time_window_to_date
|
||||
|
||||
users = project["users"]
|
||||
prj_share = project.getShare()
|
||||
prj_share.setSiblingValue(total_prj_share)
|
||||
prj_share.setNormalizedValue(
|
||||
prj_share.getValue() / prj_share.getSiblingValue())
|
||||
|
||||
for user_id, user in users.items():
|
||||
for user in project.getUsers():
|
||||
# for each user the normalized share
|
||||
# is calculated (0 <= user_norm_share <= 1)
|
||||
user_share = user["share"]
|
||||
user_usage = user["usage"]
|
||||
user_usage["norm_ram"] = user_usage["ram"]
|
||||
user_usage["norm_cores"] = user_usage["cores"]
|
||||
usr_share = user.getShare()
|
||||
usr_share.setNormalizedValue(
|
||||
usr_share.getValue() / usr_share.getSiblingValue() *
|
||||
prj_share.getNormalizedValue())
|
||||
|
||||
if prj_share > 0 and sibling_share > 0 and total_prj_share > 0:
|
||||
user["norm_share"] = (user_share / sibling_share) * \
|
||||
(prj_share / total_prj_share)
|
||||
project["norm_share"] = prj_share / total_prj_share
|
||||
else:
|
||||
user["norm_share"] = user_share
|
||||
project["norm_share"] = prj_share
|
||||
usr_data = user.getData()
|
||||
usr_data["actual_memory"] = usr_data["memory"]
|
||||
usr_data["actual_vcpus"] = usr_data["vcpus"]
|
||||
usr_data["time_window_from_date"] = time_window_from_date
|
||||
usr_data["time_window_to_date"] = time_window_to_date
|
||||
|
||||
if total_usage_ram > 0:
|
||||
user_usage["norm_ram"] /= total_usage_ram
|
||||
if total_memory > 0:
|
||||
usr_data["actual_memory"] /= total_memory
|
||||
|
||||
if total_usage_cores > 0:
|
||||
user_usage["norm_cores"] /= total_usage_cores
|
||||
if total_vcpus > 0:
|
||||
usr_data["actual_vcpus"] /= total_vcpus
|
||||
|
||||
actual_usage_ram += user_usage["norm_ram"]
|
||||
actual_usage_cores += user_usage["norm_cores"]
|
||||
|
||||
project["usage"]["actual_ram"] = actual_usage_ram
|
||||
project["usage"]["actual_cores"] = actual_usage_cores
|
||||
|
||||
total_actual_usage_ram += actual_usage_ram
|
||||
total_actual_usage_cores += actual_usage_cores
|
||||
prj_data["actual_memory"] += usr_data["actual_memory"]
|
||||
prj_data["actual_vcpus"] += usr_data["actual_vcpus"]
|
||||
|
||||
for project in self.projects.values():
|
||||
actual_usage_ram = project["usage"]["actual_ram"]
|
||||
actual_usage_cores = project["usage"]["actual_cores"]
|
||||
prj_share = project["share"]
|
||||
sibling_share = project["sibling_share"]
|
||||
users = project["users"]
|
||||
prj_data = project.getData()
|
||||
prj_data["effective_memory"] = prj_data["actual_memory"]
|
||||
prj_data["effective_vcpus"] = prj_data["actual_vcpus"]
|
||||
|
||||
effect_prj_ram_usage = actual_usage_ram
|
||||
effect_prj_cores_usage = actual_usage_cores
|
||||
for user in project.getUsers():
|
||||
usr_priority = user.getPriority()
|
||||
usr_share = user.getShare()
|
||||
share = usr_share.getValue()
|
||||
sibling_share = usr_share.getSiblingValue()
|
||||
norm_share = usr_share.getNormalizedValue()
|
||||
usr_data = user.getData()
|
||||
usr_data["effective_vcpus"] = float(0)
|
||||
usr_data["effective_memory"] = float(0)
|
||||
usr_data["actual_rel_vcpus"] = float(0)
|
||||
usr_data["actual_rel_memory"] = float(0)
|
||||
|
||||
project["usage"]["effective_ram"] = effect_prj_ram_usage
|
||||
project["usage"]["effective_cores"] = effect_prj_cores_usage
|
||||
if prj_data["actual_vcpus"] > 0:
|
||||
usr_data["actual_rel_vcpus"] = usr_data["actual_vcpus"]
|
||||
usr_data["actual_rel_vcpus"] /= prj_data["actual_vcpus"]
|
||||
|
||||
for user in users.values():
|
||||
user["fairshare_ram"] = float(0)
|
||||
user["fairshare_cores"] = float(0)
|
||||
user_share = user["share"]
|
||||
user_usage = user["usage"]
|
||||
user_usage["effective_cores"] = float(0)
|
||||
user_usage["effective_ram"] = float(0)
|
||||
if prj_data["actual_memory"] > 0:
|
||||
usr_data["actual_rel_memory"] = usr_data["actual_memory"]
|
||||
usr_data["actual_rel_memory"] /= prj_data["actual_memory"]
|
||||
|
||||
if user_share > 0:
|
||||
norm_share = user["norm_share"]
|
||||
norm_usage_ram = user_usage["norm_ram"]
|
||||
norm_usage_cores = user_usage["norm_cores"]
|
||||
effective_memory = (usr_data["actual_memory"] + (
|
||||
(prj_data["effective_memory"] -
|
||||
usr_data["actual_memory"]) *
|
||||
share / sibling_share))
|
||||
|
||||
effect_usage_ram = (norm_usage_ram + (
|
||||
(effect_prj_cores_usage -
|
||||
norm_usage_ram) *
|
||||
user_share / sibling_share))
|
||||
effective_vcpus = (usr_data["actual_vcpus"] + (
|
||||
(prj_data["effective_vcpus"] -
|
||||
usr_data["actual_vcpus"]) *
|
||||
share / sibling_share))
|
||||
|
||||
effect_usage_cores = (norm_usage_cores + (
|
||||
(effect_prj_cores_usage -
|
||||
norm_usage_cores) *
|
||||
user_share / sibling_share))
|
||||
usr_data["effective_memory"] = effective_memory
|
||||
usr_data["effective_cores"] = effective_vcpus
|
||||
|
||||
user_usage["effective_ram"] = effect_usage_ram
|
||||
user_usage["effective_rel_ram"] = float(0)
|
||||
f_memory = 2 ** (-effective_memory / norm_share)
|
||||
usr_priority.setFairShare("memory", f_memory)
|
||||
|
||||
user_usage["effective_cores"] = effect_usage_cores
|
||||
user_usage["effective_rel_cores"] = float(0)
|
||||
f_vcpus = 2 ** (-effective_vcpus / norm_share)
|
||||
usr_priority.setFairShare("vcpus", f_vcpus)
|
||||
|
||||
if actual_usage_cores > 0:
|
||||
user_usage["effective_rel_cores"] = norm_usage_cores
|
||||
user_usage["effective_rel_cores"] /= actual_usage_cores
|
||||
usr_priority.setValue(float(self.vcpus_weight) * f_vcpus +
|
||||
float(self.memory_weight) * f_memory)
|
||||
|
||||
if actual_usage_ram > 0:
|
||||
user_usage["effective_rel_ram"] = norm_usage_ram
|
||||
user_usage["effective_rel_ram"] /= actual_usage_ram
|
||||
|
||||
if norm_share > 0:
|
||||
f_ram = 2 ** (-effect_usage_ram / norm_share)
|
||||
user["fairshare_ram"] = f_ram
|
||||
|
||||
f_cores = 2 ** (-effect_usage_cores / norm_share)
|
||||
user["fairshare_cores"] = f_cores
|
||||
|
||||
LOG.debug("fairshare project %s" % project)
|
||||
LOG.debug("fairshare project %s" % project.serialize())
|
||||
|
@ -1,15 +1,21 @@
|
||||
import json
|
||||
import logging
|
||||
import os.path
|
||||
import requests
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
try:
|
||||
from oslo_config import cfg
|
||||
except ImportError:
|
||||
from oslo.config import cfg
|
||||
|
||||
from common.endpoint import Endpoint
|
||||
from common.project import Project
|
||||
from common.role import Role
|
||||
from common.service import Service
|
||||
from common.token import Token
|
||||
from common.trust import Trust
|
||||
from common.user import User
|
||||
|
||||
|
||||
from synergy.common.manager import Manager
|
||||
|
||||
|
||||
@ -32,233 +38,14 @@ See the License for the specific language governing
|
||||
permissions and limitations under the License."""
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class Trust(object):
|
||||
|
||||
def __init__(self, data):
|
||||
data = data["trust"]
|
||||
|
||||
self.id = data["id"]
|
||||
self.impersonations = data["impersonation"]
|
||||
self.roles_links = data["roles_links"]
|
||||
self.trustor_user_id = data["trustor_user_id"]
|
||||
self.trustee_user_id = data["trustee_user_id"]
|
||||
self.links = data["links"]
|
||||
self.roles = data["roles"]
|
||||
self.remaining_uses = data["remaining_uses"]
|
||||
self.expires_at = None
|
||||
|
||||
if data["expires_at"] is not None:
|
||||
self.expires_at = datetime.strptime(data["expires_at"],
|
||||
"%Y-%m-%dT%H:%M:%S.%fZ")
|
||||
self.project_id = data["project_id"]
|
||||
|
||||
def getId(self):
|
||||
return self.id
|
||||
|
||||
def isImpersonations(self):
|
||||
return self.impersonations
|
||||
|
||||
def getRolesLinks(self):
|
||||
return self.roles_links
|
||||
|
||||
def getTrustorUserId(self):
|
||||
return self.trustor_user_id
|
||||
|
||||
def getTrusteeUserId(self):
|
||||
return self.trustee_user_id
|
||||
|
||||
def getlinks(self):
|
||||
return self.links
|
||||
|
||||
def getProjectId(self):
|
||||
return self.project_id
|
||||
|
||||
def getRoles(self):
|
||||
return self.roles
|
||||
|
||||
def getRemainingUses(self):
|
||||
return self.remaining_uses
|
||||
|
||||
def getExpiration(self):
|
||||
return self.expires_at
|
||||
|
||||
def isExpired(self):
|
||||
if self.getExpiration() is None:
|
||||
return False
|
||||
|
||||
return self.getExpiration() < datetime.utcnow()
|
||||
|
||||
|
||||
class Token(object):
|
||||
|
||||
def __init__(self, token, data):
|
||||
self.id = token
|
||||
|
||||
data = data["token"]
|
||||
self.roles = data["roles"]
|
||||
self.catalog = data["catalog"]
|
||||
self.issued_at = datetime.strptime(data["issued_at"],
|
||||
"%Y-%m-%dT%H:%M:%S.%fZ")
|
||||
self.expires_at = datetime.strptime(data["expires_at"],
|
||||
"%Y-%m-%dT%H:%M:%S.%fZ")
|
||||
self.project = data["project"]
|
||||
self.user = data["user"]
|
||||
self.extras = data["extras"]
|
||||
|
||||
def getCatalog(self, service_name=None, interface="public"):
|
||||
if service_name:
|
||||
for service in self.catalog:
|
||||
if service["name"] == service_name:
|
||||
for endpoint in service["endpoints"]:
|
||||
if endpoint["interface"] == interface:
|
||||
return endpoint
|
||||
return None
|
||||
else:
|
||||
return self.catalog
|
||||
|
||||
def getExpiration(self):
|
||||
return self.expires_at
|
||||
|
||||
def getId(self):
|
||||
return self.id
|
||||
|
||||
def getExtras(self):
|
||||
return self.extras
|
||||
|
||||
def getProject(self):
|
||||
return self.project
|
||||
|
||||
def getRoles(self):
|
||||
return self.roles
|
||||
|
||||
def getUser(self):
|
||||
return self.user
|
||||
|
||||
def isAdmin(self):
|
||||
if not self.roles:
|
||||
return False
|
||||
|
||||
for role in self.roles:
|
||||
if role["name"] == "admin":
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def issuedAt(self):
|
||||
return self.issued_at
|
||||
|
||||
def isExpired(self):
|
||||
return self.getExpiration() < datetime.utcnow()
|
||||
|
||||
def save(self, filename):
|
||||
# save to file
|
||||
with open(filename, 'w') as f:
|
||||
token = {}
|
||||
token["catalog"] = self.catalog
|
||||
token["extras"] = self.extras
|
||||
token["user"] = self.user
|
||||
token["project"] = self.project
|
||||
token["roles"] = self.roles
|
||||
token["roles"] = self.roles
|
||||
token["issued_at"] = self.issued_at.isoformat()
|
||||
token["expires_at"] = self.expires_at.isoformat()
|
||||
|
||||
data = {"id": self.id, "token": token}
|
||||
|
||||
json.dump(data, f)
|
||||
|
||||
@classmethod
|
||||
def load(cls, filename):
|
||||
if not os.path.isfile(".auth_token"):
|
||||
return None
|
||||
|
||||
# load from file:
|
||||
with open(filename, 'r') as f:
|
||||
try:
|
||||
data = json.load(f)
|
||||
return Token(data["id"], data)
|
||||
# if the file is empty the ValueError will be thrown
|
||||
except ValueError as ex:
|
||||
raise ex
|
||||
|
||||
def isotime(self, at=None, subsecond=False):
|
||||
"""Stringify time in ISO 8601 format."""
|
||||
if not at:
|
||||
at = datetime.utcnow()
|
||||
|
||||
if not subsecond:
|
||||
st = at.strftime('%Y-%m-%dT%H:%M:%S')
|
||||
else:
|
||||
st = at.strftime('%Y-%m-%dT%H:%M:%S.%f')
|
||||
|
||||
if at.tzinfo:
|
||||
tz = at.tzinfo.tzname(None)
|
||||
else:
|
||||
tz = 'UTC'
|
||||
|
||||
st += ('Z' if tz == 'UTC' else tz)
|
||||
return st
|
||||
|
||||
"""The trustor or grantor of a trust is the person who creates the trust.
|
||||
The trustor is the one who contributes property to the trust.
|
||||
The trustee is the person who manages the trust, and is usually appointed
|
||||
by the trustor. The trustor is also often the trustee in living trusts.
|
||||
"""
|
||||
def trust(self, trustee_user, expires_at=None,
|
||||
project_id=None, roles=None, impersonation=True):
|
||||
if self.isExpired():
|
||||
raise Exception("token expired!")
|
||||
|
||||
headers = {"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"User-Agent": "python-novaclient",
|
||||
"X-Auth-Token": self.getId()}
|
||||
|
||||
if roles is None:
|
||||
roles = self.getRoles()
|
||||
|
||||
if project_id is None:
|
||||
project_id = self.getProject().get("id")
|
||||
|
||||
data = {}
|
||||
data["trust"] = {"impersonation": impersonation,
|
||||
"project_id": project_id,
|
||||
"roles": roles,
|
||||
"trustee_user_id": trustee_user,
|
||||
"trustor_user_id": self.getUser().get("id")}
|
||||
|
||||
if expires_at is not None:
|
||||
data["trust"]["expires_at"] = self.isotime(expires_at, True)
|
||||
|
||||
endpoint = self.getCatalog(service_name="keystone")
|
||||
|
||||
if not endpoint:
|
||||
raise Exception("keystone endpoint not found!")
|
||||
|
||||
if "v2.0" in endpoint["url"]:
|
||||
endpoint["url"] = endpoint["url"].replace("v2.0", "v3")
|
||||
|
||||
response = requests.post(url=endpoint["url"] + "/OS-TRUST/trusts",
|
||||
headers=headers,
|
||||
data=json.dumps(data))
|
||||
|
||||
if response.status_code != requests.codes.ok:
|
||||
response.raise_for_status()
|
||||
|
||||
if not response.text:
|
||||
raise Exception("trust token failed!")
|
||||
|
||||
return Trust(response.json())
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class KeystoneManager(Manager):
|
||||
|
||||
def __init__(self):
|
||||
super(KeystoneManager, self).__init__(name="KeystoneManager")
|
||||
super(KeystoneManager, self).__init__("KeystoneManager")
|
||||
|
||||
self.config_opts = [
|
||||
cfg.StrOpt("auth_url",
|
||||
@ -295,9 +82,19 @@ class KeystoneManager(Manager):
|
||||
self.timeout = CONF.KeystoneManager.timeout
|
||||
self.trust_expiration = CONF.KeystoneManager.trust_expiration
|
||||
self.token = None
|
||||
self.auth_public_url = None
|
||||
|
||||
self.authenticate()
|
||||
|
||||
service = self.getToken().getService("keystone")
|
||||
if not service:
|
||||
raise Exception("keystone service not found!")
|
||||
|
||||
endpoint = service.getEndpoint("public")
|
||||
if not endpoint:
|
||||
raise Exception("keystone endpoint not found!")
|
||||
self.auth_public_url = endpoint.getURL()
|
||||
|
||||
def task(self):
|
||||
pass
|
||||
|
||||
@ -306,11 +103,9 @@ class KeystoneManager(Manager):
|
||||
|
||||
def execute(self, command, *args, **kargs):
|
||||
if command == "GET_USERS":
|
||||
return self.getUsers()
|
||||
return self.getUsers(*args, **kargs)
|
||||
elif command == "GET_USER":
|
||||
return self.getProject(*args, **kargs)
|
||||
elif command == "GET_USER_PROJECTS":
|
||||
return self.getUserProjects(*args, **kargs)
|
||||
elif command == "GET_USER_ROLES":
|
||||
return self.getUserRoles(*args, **kargs)
|
||||
elif command == "GET_PROJECTS":
|
||||
@ -354,7 +149,7 @@ class KeystoneManager(Manager):
|
||||
|
||||
headers = {"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"User-Agent": "python-novaclient"}
|
||||
"User-Agent": "synergy"}
|
||||
|
||||
identity = {"methods": ["password"],
|
||||
"password": {"user": {"name": self.username,
|
||||
@ -383,12 +178,10 @@ class KeystoneManager(Manager):
|
||||
if not response.text:
|
||||
raise Exception("authentication failed!")
|
||||
|
||||
# print(response.__dict__)
|
||||
|
||||
token_subject = response.headers["X-Subject-Token"]
|
||||
token_data = response.json()
|
||||
|
||||
self.token = Token(token_subject, token_data)
|
||||
self.token = Token.parse(token_subject, token_data)
|
||||
|
||||
def getUser(self, id):
|
||||
try:
|
||||
@ -398,36 +191,53 @@ class KeystoneManager(Manager):
|
||||
raise Exception("error on retrieving the user info (id=%r): %s"
|
||||
% (id, response["error"]["message"]))
|
||||
|
||||
if response:
|
||||
response = response["user"]
|
||||
|
||||
return response
|
||||
|
||||
def getUsers(self):
|
||||
try:
|
||||
response = self.getResource("users", "GET")
|
||||
except requests.exceptions.HTTPError as ex:
|
||||
response = ex.response.json()
|
||||
raise Exception("error on retrieving the users list: %s"
|
||||
% response["error"]["message"])
|
||||
user = None
|
||||
|
||||
if response:
|
||||
response = response["users"]
|
||||
info = response["user"]
|
||||
|
||||
return response
|
||||
user = User()
|
||||
user.setId(info["id"])
|
||||
user.setName(info["name"])
|
||||
user.setProjectId(info["tenantId"])
|
||||
user.setEnabled(info["enabled"])
|
||||
|
||||
def getUserProjects(self, id):
|
||||
try:
|
||||
response = self.getResource("users/%s/projects" % id, "GET")
|
||||
except requests.exceptions.HTTPError as ex:
|
||||
response = ex.response.json()
|
||||
raise Exception("error on retrieving the users's projects "
|
||||
"(id=%r): %s" % (id, response["error"]["message"]))
|
||||
return user
|
||||
|
||||
def getUsers(self, prj_id=None):
|
||||
if prj_id:
|
||||
try:
|
||||
response = self.getResource("tenants/%s/users" % prj_id,
|
||||
"GET",
|
||||
version="v2.0")
|
||||
except requests.exceptions.HTTPError as ex:
|
||||
response = ex.response.json()
|
||||
raise Exception("error on retrieving the project's users "
|
||||
"(id=%r): %s" % (prj_id,
|
||||
response["error"]["message"]))
|
||||
else:
|
||||
try:
|
||||
response = self.getResource("/users", "GET")
|
||||
except requests.exceptions.HTTPError as ex:
|
||||
response = ex.response.json()
|
||||
raise Exception("error on retrieving the users list: %s"
|
||||
% response["error"]["message"])
|
||||
|
||||
users = []
|
||||
|
||||
if response:
|
||||
response = response["projects"]
|
||||
user_info = response["users"]
|
||||
|
||||
return response
|
||||
for info in user_info:
|
||||
user = User()
|
||||
user.setId(info["id"])
|
||||
user.setName(info["name"])
|
||||
user.setProjectId(info["tenantId"])
|
||||
user.setEnabled(info["enabled"])
|
||||
|
||||
users.append(user)
|
||||
|
||||
return users
|
||||
|
||||
def getUserRoles(self, user_id, project_id):
|
||||
try:
|
||||
@ -439,11 +249,19 @@ class KeystoneManager(Manager):
|
||||
"prjId=%r): %s" % (user_id,
|
||||
project_id,
|
||||
response["error"]["message"]))
|
||||
roles = []
|
||||
|
||||
if response:
|
||||
response = response["roles"]
|
||||
roles_info = response["roles"]
|
||||
|
||||
return response
|
||||
for info in roles_info:
|
||||
role = Role()
|
||||
role.setId(info["id"])
|
||||
role.setName(info["name"])
|
||||
|
||||
roles.append(role)
|
||||
|
||||
return roles
|
||||
|
||||
def getProject(self, id):
|
||||
try:
|
||||
@ -454,23 +272,50 @@ class KeystoneManager(Manager):
|
||||
"error on retrieving the project (id=%r, msg=%s)." %
|
||||
(id, response["error"]["message"]))
|
||||
|
||||
if response:
|
||||
response = response["project"]
|
||||
|
||||
return response
|
||||
|
||||
def getProjects(self):
|
||||
try:
|
||||
response = self.getResource("/projects", "GET")
|
||||
except requests.exceptions.HTTPError as ex:
|
||||
response = ex.response.json()
|
||||
raise Exception("error on retrieving the projects list: %s"
|
||||
% response["error"]["message"])
|
||||
project = None
|
||||
|
||||
if response:
|
||||
response = response["projects"]
|
||||
info = response["project"]
|
||||
|
||||
return response
|
||||
project = Project()
|
||||
project.setId(info["id"])
|
||||
project.setName(info["name"])
|
||||
project.setEnabled(info["enabled"])
|
||||
|
||||
return project
|
||||
|
||||
def getProjects(self, usr_id=None):
|
||||
if usr_id:
|
||||
try:
|
||||
response = self.getResource(
|
||||
"users/%s/projects" % usr_id, "GET")
|
||||
except requests.exceptions.HTTPError as ex:
|
||||
response = ex.response.json()
|
||||
raise Exception("error on retrieving the users's projects (id="
|
||||
"%r): %s" % (usr_id,
|
||||
response["error"]["message"]))
|
||||
else:
|
||||
try:
|
||||
response = self.getResource("/projects", "GET")
|
||||
except requests.exceptions.HTTPError as ex:
|
||||
response = ex.response.json()
|
||||
raise Exception("error on retrieving the projects list: %s"
|
||||
% response["error"]["message"])
|
||||
|
||||
projects = []
|
||||
|
||||
if response:
|
||||
projects_info = response["projects"]
|
||||
|
||||
for info in projects_info:
|
||||
project = Project()
|
||||
project.setId(info["id"])
|
||||
project.setName(info["name"])
|
||||
project.setEnabled(info["enabled"])
|
||||
|
||||
projects.append(project)
|
||||
|
||||
return projects
|
||||
|
||||
def getRole(self, id):
|
||||
try:
|
||||
@ -480,10 +325,15 @@ class KeystoneManager(Manager):
|
||||
raise Exception("error on retrieving the role info (id=%r): %s"
|
||||
% (id, response["error"]["message"]))
|
||||
|
||||
if response:
|
||||
response = response["role"]
|
||||
role = None
|
||||
|
||||
return response
|
||||
if response:
|
||||
info = response["role"]
|
||||
role = Role()
|
||||
role.setId(info["id"])
|
||||
role.setName(info["name"])
|
||||
|
||||
return role
|
||||
|
||||
def getRoles(self):
|
||||
try:
|
||||
@ -493,10 +343,108 @@ class KeystoneManager(Manager):
|
||||
raise Exception("error on retrieving the roles list: %s"
|
||||
% response["error"]["message"])
|
||||
|
||||
if response:
|
||||
response = response["roles"]
|
||||
roles = []
|
||||
|
||||
return response
|
||||
if response:
|
||||
roles = response["roles"]
|
||||
|
||||
for info in roles:
|
||||
role = Role()
|
||||
role.setId(info["id"])
|
||||
role.setName(info["name"])
|
||||
|
||||
roles.append(role)
|
||||
|
||||
return roles
|
||||
|
||||
def makeTrust(self, trustee_user_id, token=None,
|
||||
expires_at=None, impersonation=True):
|
||||
project_id = token.getProject().getId()
|
||||
roles = token.getRoles()
|
||||
roles_data = []
|
||||
|
||||
for role in roles:
|
||||
roles_data.append({"id": role.getId(), "name": role.getName()})
|
||||
|
||||
data = {}
|
||||
data["trust"] = {"impersonation": impersonation,
|
||||
"project_id": project_id,
|
||||
"roles": roles_data,
|
||||
"trustee_user_id": trustee_user_id,
|
||||
"trustor_user_id": token.getUser().getId()}
|
||||
|
||||
if expires_at is not None:
|
||||
data["trust"]["expires_at"] = token.isotime(expires_at, True)
|
||||
|
||||
try:
|
||||
response = self.getResource("/OS-TRUST/trusts",
|
||||
"POST", data=data, token=token)
|
||||
except requests.exceptions.HTTPError as ex:
|
||||
response = ex.response.json()
|
||||
raise Exception("error on retrieving the trust info (id=%r): %s"
|
||||
% (id, response["error"]["message"]))
|
||||
|
||||
trust = Trust(response["trust"])
|
||||
trust.keystone_url = self.auth_public_url
|
||||
|
||||
return trust
|
||||
|
||||
def getTrust(self, id):
|
||||
try:
|
||||
response = self.getResource("/OS-TRUST/trusts/%s" % id, "GET")
|
||||
except requests.exceptions.HTTPError as ex:
|
||||
response = ex.response.json()
|
||||
raise Exception("error on retrieving the trust info (id=%r): %s"
|
||||
% (id, response["error"]["message"]))
|
||||
|
||||
trust = None
|
||||
|
||||
if response:
|
||||
trust = Trust(response["trust"])
|
||||
trust.keystone_url = self.auth_public_url
|
||||
|
||||
return trust
|
||||
|
||||
def deleteTrust(self, id, token=None):
|
||||
if not token:
|
||||
token = self.getToken()
|
||||
|
||||
if token.isExpired():
|
||||
raise Exception("token expired!")
|
||||
|
||||
try:
|
||||
self.getResource("/OS-TRUST/trusts/%s" % id, "DELETE", token=token)
|
||||
except requests.exceptions.HTTPError as ex:
|
||||
response = ex.response.json()
|
||||
raise Exception("error on deleting the trust (id=%r): %s"
|
||||
% (id, response["error"]["message"]))
|
||||
|
||||
def getTrusts(self, user_id=None, isTrustor=True, token=None):
|
||||
url = "/OS-TRUST/trusts"
|
||||
|
||||
if user_id:
|
||||
if isTrustor:
|
||||
url += "?trustor_user_id=%s" % user_id
|
||||
else:
|
||||
url += "?trustee_user_id=%s" % user_id
|
||||
|
||||
try:
|
||||
response = self.getResource(url, "GET", token=token)
|
||||
except requests.exceptions.HTTPError as ex:
|
||||
response = ex.response.json()
|
||||
raise Exception("error on retrieving the trust list (id=%r): %s"
|
||||
% (id, response["error"]["message"]))
|
||||
|
||||
trusts = []
|
||||
|
||||
if response:
|
||||
for data in response["trusts"]:
|
||||
trust = Trust(data)
|
||||
trust.keystone_url = self.auth_public_url
|
||||
|
||||
trusts.append(trust)
|
||||
|
||||
return trusts
|
||||
|
||||
def getToken(self):
|
||||
self.authenticate()
|
||||
@ -508,8 +456,8 @@ class KeystoneManager(Manager):
|
||||
|
||||
headers = {"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"User-Agent": "python-novaclient",
|
||||
"X-Auth-Project-Id": self.token.getProject()["name"],
|
||||
"User-Agent": "synergy",
|
||||
"X-Auth-Project-Id": self.token.getProject().getName(),
|
||||
"X-Auth-Token": self.token.getId(),
|
||||
"X-Subject-Token": id}
|
||||
|
||||
@ -527,8 +475,8 @@ class KeystoneManager(Manager):
|
||||
|
||||
headers = {"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"User-Agent": "python-novaclient",
|
||||
"X-Auth-Project-Id": self.token.getProject()["name"],
|
||||
"User-Agent": "synergy",
|
||||
"X-Auth-Project-Id": self.token.getProject().getName(),
|
||||
"X-Auth-Token": self.token.getId(),
|
||||
"X-Subject-Token": id}
|
||||
|
||||
@ -545,7 +493,7 @@ class KeystoneManager(Manager):
|
||||
token_subject = response.headers["X-Subject-Token"]
|
||||
token_data = response.json()
|
||||
|
||||
return Token(token_subject, token_data)
|
||||
return Token.parse(token_subject, token_data)
|
||||
|
||||
def getEndpoint(self, id=None, service_id=None):
|
||||
if id:
|
||||
@ -556,9 +504,19 @@ class KeystoneManager(Manager):
|
||||
raise Exception("error on retrieving the endpoint (id=%r): %s"
|
||||
% (id, response["error"]["message"]))
|
||||
if response:
|
||||
response = response["endpoint"]
|
||||
info = response["endpoint"]
|
||||
|
||||
return response
|
||||
endpoint = Endpoint()
|
||||
endpoint.setId(info["id"])
|
||||
endpoint.setName(info["name"])
|
||||
endpoint.setInterface(info["interface"])
|
||||
endpoint.setRegion(info["region"])
|
||||
endpoint.setRegionId(info["region_id"])
|
||||
endpoint.setServiceId(info["service_id"])
|
||||
endpoint.setURL(info["url"])
|
||||
endpoint.setEnabled(info["enabled"])
|
||||
|
||||
return endpoint
|
||||
elif service_id:
|
||||
try:
|
||||
endpoints = self.getEndpoints()
|
||||
@ -570,7 +528,7 @@ class KeystoneManager(Manager):
|
||||
|
||||
if endpoints:
|
||||
for endpoint in endpoints:
|
||||
if endpoint["service_id"] == service_id:
|
||||
if endpoint.getServiceId() == service_id:
|
||||
return endpoint
|
||||
|
||||
return None
|
||||
@ -583,10 +541,25 @@ class KeystoneManager(Manager):
|
||||
raise Exception("error on retrieving the endpoints list: %s"
|
||||
% response["error"]["message"])
|
||||
|
||||
if response:
|
||||
response = response["endpoints"]
|
||||
endpoints = []
|
||||
|
||||
return response
|
||||
if response:
|
||||
endpoints_info = response["endpoints"]
|
||||
|
||||
for info in endpoints_info:
|
||||
endpoint = Endpoint()
|
||||
endpoint.setId(info["id"])
|
||||
endpoint.setName(info["name"])
|
||||
endpoint.setInterface(info["interface"])
|
||||
endpoint.setRegion(info["region"])
|
||||
endpoint.setRegionId(info["region_id"])
|
||||
endpoint.setServiceId(info["service_id"])
|
||||
endpoint.setURL(info["url"])
|
||||
endpoint.setEnabled(info["enabled"])
|
||||
|
||||
endpoints.append(endpoint)
|
||||
|
||||
return endpoints
|
||||
|
||||
def getService(self, id=None, name=None):
|
||||
if id:
|
||||
@ -598,14 +571,32 @@ class KeystoneManager(Manager):
|
||||
": %s" % (id, response["error"]["message"]))
|
||||
|
||||
if response:
|
||||
response = response["service"]
|
||||
return response
|
||||
info = response["service"]
|
||||
|
||||
service = Service()
|
||||
service.setId(info["id"])
|
||||
service.setName(info["name"])
|
||||
service.setType(info["type"])
|
||||
service.setDescription(info["description"])
|
||||
service.setEnabled(info["enabled"])
|
||||
|
||||
for endpoint_info in info.get("endpoints", []):
|
||||
endpoint = Endpoint()
|
||||
endpoint.setId(endpoint_info["id"])
|
||||
endpoint.setInterface(endpoint_info["interface"])
|
||||
endpoint.setRegion(endpoint_info["region"])
|
||||
endpoint.setRegionId(endpoint_info["region_id"])
|
||||
endpoint.setURL(endpoint_info["url"])
|
||||
|
||||
service.getEndpoints().append(endpoint)
|
||||
|
||||
return service
|
||||
elif name:
|
||||
services = self.getServices()
|
||||
|
||||
if services:
|
||||
for service in services:
|
||||
if service["name"] == name:
|
||||
if service.getName() == name:
|
||||
return service
|
||||
|
||||
return None
|
||||
@ -618,21 +609,55 @@ class KeystoneManager(Manager):
|
||||
raise Exception("error on retrieving the services list: %s"
|
||||
% response["error"]["message"])
|
||||
|
||||
services = []
|
||||
|
||||
if response:
|
||||
response = response["services"]
|
||||
services_info = response["services"]
|
||||
|
||||
return response
|
||||
for info in services_info:
|
||||
service = Service()
|
||||
service.setId(info["id"])
|
||||
service.setName(info["name"])
|
||||
service.setType(info["type"])
|
||||
service.setDescription(info["description"])
|
||||
service.setEnabled(info["enabled"])
|
||||
|
||||
def getResource(self, resource, method, data=None):
|
||||
self.authenticate()
|
||||
for endpoint_info in service.get("endpoints"):
|
||||
endpoint = Endpoint()
|
||||
endpoint.setId(endpoint_info["id"])
|
||||
endpoint.setInterface(endpoint_info["interface"])
|
||||
endpoint.setRegion(endpoint_info["region"])
|
||||
endpoint.setRegionId(endpoint_info["region_id"])
|
||||
endpoint.setURL(endpoint_info["url"])
|
||||
|
||||
url = self.auth_url + "/" + resource
|
||||
service.getEndpoints().append(endpoint)
|
||||
|
||||
services.append(service)
|
||||
|
||||
return services
|
||||
|
||||
def getResource(
|
||||
self, resource, method, version=None, data=None, token=None):
|
||||
if token:
|
||||
if token.isExpired():
|
||||
raise Exception("token expired!")
|
||||
|
||||
url = self.auth_public_url
|
||||
else:
|
||||
self.authenticate()
|
||||
token = self.getToken()
|
||||
url = self.auth_url
|
||||
|
||||
if version:
|
||||
url = url[:url.rfind("/") + 1] + version
|
||||
|
||||
url = url + "/" + resource
|
||||
|
||||
headers = {"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"User-Agent": "python-novaclient",
|
||||
"X-Auth-Project-Id": self.token.getProject()["name"],
|
||||
"X-Auth-Token": self.token.getId()}
|
||||
"User-Agent": "synergy",
|
||||
"X-Auth-Project-Id": token.getProject().getName(),
|
||||
"X-Auth-Token": token.getId()}
|
||||
|
||||
if method == "GET":
|
||||
response = requests.get(url,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,18 +1,12 @@
|
||||
import heapq
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
try:
|
||||
from oslo_config import cfg
|
||||
except ImportError:
|
||||
from oslo.config import cfg
|
||||
|
||||
from common.queue import QueueDB
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
|
||||
from synergy.common.manager import Manager
|
||||
|
||||
|
||||
@ -38,365 +32,6 @@ CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class QueueItem(object):
|
||||
|
||||
def __init__(self, id, user_id, prj_id, priority,
|
||||
retry_count, creation_time, last_update, data=None):
|
||||
self.id = id
|
||||
self.user_id = user_id
|
||||
self.prj_id = prj_id
|
||||
self.priority = priority
|
||||
self.retry_count = retry_count
|
||||
self.creation_time = creation_time
|
||||
self.last_update = last_update
|
||||
self.data = data
|
||||
|
||||
def getId(self):
|
||||
return self.id
|
||||
|
||||
def setId(self, id):
|
||||
self.id = id
|
||||
|
||||
def getUserId(self):
|
||||
return self.user_id
|
||||
|
||||
def setUserId(self, user_id):
|
||||
self.user_id = user_id
|
||||
|
||||
def getProjectId(self):
|
||||
return self.prj_id
|
||||
|
||||
def setProjectId(self, prj_id):
|
||||
self.prj_id = prj_id
|
||||
|
||||
def getPriority(self):
|
||||
return self.priority
|
||||
|
||||
def setPriority(self, priority):
|
||||
self.priority = priority
|
||||
|
||||
def getRetryCount(self):
|
||||
return self.retry_count
|
||||
|
||||
def setRetryCount(self, retry_count):
|
||||
self.retry_count = retry_count
|
||||
|
||||
def incRetryCount(self):
|
||||
self.retry_count += 1
|
||||
|
||||
def getCreationTime(self):
|
||||
return self.creation_time
|
||||
|
||||
def setCreationTime(self, creation_time):
|
||||
self.creation_time = creation_time
|
||||
|
||||
def getLastUpdate(self):
|
||||
return self.last_update
|
||||
|
||||
def setLastUpdate(self, last_update):
|
||||
self.last_update = last_update
|
||||
|
||||
def getData(self):
|
||||
return self.data
|
||||
|
||||
def setData(self, data):
|
||||
self.data = data
|
||||
|
||||
|
||||
class PriorityQueue(object):
|
||||
|
||||
def __init__(self):
|
||||
self.queue = []
|
||||
self._index = 0
|
||||
|
||||
def put(self, priority, item):
|
||||
heapq.heappush(self.queue, (-priority, self._index, item))
|
||||
self._index += 1
|
||||
|
||||
def get(self):
|
||||
return heapq.heappop(self.queue)[-1]
|
||||
|
||||
def size(self):
|
||||
return len(self.queue)
|
||||
|
||||
|
||||
class Queue(object):
|
||||
|
||||
def __init__(self, name, db_engine, fairshare_manager=None):
|
||||
self.name = name
|
||||
self.db_engine = db_engine
|
||||
self.fairshare_manager = fairshare_manager
|
||||
self.is_closed = False
|
||||
self.priority_updater = None
|
||||
self.condition = threading.Condition()
|
||||
self.pqueue = PriorityQueue()
|
||||
self.createTable()
|
||||
self.buildFromDB()
|
||||
|
||||
def getName(self):
|
||||
return self.name
|
||||
|
||||
def getSize(self):
|
||||
connection = self.db_engine.connect()
|
||||
|
||||
try:
|
||||
QUERY = "select count(*) from `%s`" % self.name
|
||||
result = connection.execute(QUERY)
|
||||
|
||||
row = result.fetchone()
|
||||
|
||||
return row[0]
|
||||
except SQLAlchemyError as ex:
|
||||
raise Exception(ex.message)
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
def createTable(self):
|
||||
TABLE = """CREATE TABLE IF NOT EXISTS `%s` (`id` BIGINT NOT NULL \
|
||||
AUTO_INCREMENT PRIMARY KEY, `priority` INT DEFAULT 0, user_id CHAR(40) \
|
||||
NOT NULL, prj_id CHAR(40) NOT NULL, `retry_count` INT DEFAULT 0, \
|
||||
`creation_time` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, `last_update` \
|
||||
TIMESTAMP NULL, `data` TEXT NOT NULL ) ENGINE=InnoDB""" % self.name
|
||||
|
||||
connection = self.db_engine.connect()
|
||||
|
||||
try:
|
||||
connection.execute(TABLE)
|
||||
except SQLAlchemyError as ex:
|
||||
raise Exception(ex.message)
|
||||
except Exception as ex:
|
||||
raise Exception(ex.message)
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
def close(self):
|
||||
if not self.is_closed:
|
||||
self.is_closed = True
|
||||
|
||||
with self.condition:
|
||||
self.condition.notifyAll()
|
||||
|
||||
def isClosed(self):
|
||||
return self.is_closed
|
||||
|
||||
def buildFromDB(self):
|
||||
connection = self.db_engine.connect()
|
||||
|
||||
try:
|
||||
QUERY = "select id, user_id, prj_id, priority, retry_count, " \
|
||||
"creation_time, last_update from `%s`" % self.name
|
||||
result = connection.execute(QUERY)
|
||||
|
||||
for row in result:
|
||||
queue_item = QueueItem(row[0], row[1], row[2],
|
||||
row[3], row[4], row[5], row[6])
|
||||
|
||||
self.pqueue.put(row[3], queue_item)
|
||||
except SQLAlchemyError as ex:
|
||||
raise Exception(ex.message)
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
with self.condition:
|
||||
self.condition.notifyAll()
|
||||
|
||||
def insertItem(self, user_id, prj_id, priority, data):
|
||||
with self.condition:
|
||||
idRecord = -1
|
||||
QUERY = "insert into `%s` (user_id, prj_id, priority, " \
|
||||
"data) values" % self.name
|
||||
QUERY += "(%s, %s, %s, %s)"
|
||||
|
||||
connection = self.db_engine.connect()
|
||||
trans = connection.begin()
|
||||
|
||||
try:
|
||||
result = connection.execute(QUERY,
|
||||
[user_id, prj_id, priority,
|
||||
json.dumps(data)])
|
||||
|
||||
idRecord = result.lastrowid
|
||||
|
||||
trans.commit()
|
||||
except SQLAlchemyError as ex:
|
||||
trans.rollback()
|
||||
raise Exception(ex.message)
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
now = datetime.now()
|
||||
queue_item = QueueItem(idRecord, user_id, prj_id,
|
||||
priority, 0, now, now)
|
||||
|
||||
self.pqueue.put(priority, queue_item)
|
||||
|
||||
self.condition.notifyAll()
|
||||
|
||||
def reinsertItem(self, queue_item):
|
||||
with self.condition:
|
||||
self.pqueue.put(queue_item.getPriority(), queue_item)
|
||||
self.condition.notifyAll()
|
||||
|
||||
def getItem(self):
|
||||
item = None
|
||||
queue_item = None
|
||||
|
||||
with self.condition:
|
||||
while (queue_item is None and not self.is_closed):
|
||||
if self.pqueue.size() > 0:
|
||||
queue_item = self.pqueue.get()
|
||||
|
||||
# self.pqueue.task_done()
|
||||
else:
|
||||
self.condition.wait()
|
||||
|
||||
if (not self.is_closed and queue_item is not None):
|
||||
connection = self.db_engine.connect()
|
||||
|
||||
try:
|
||||
QUERY = """select user_id, prj_id, priority, \
|
||||
retry_count, creation_time, last_update, data from `%s`""" % self.name
|
||||
QUERY += " where id=%s"
|
||||
|
||||
result = connection.execute(QUERY, [queue_item.getId()])
|
||||
|
||||
row = result.fetchone()
|
||||
|
||||
item = QueueItem(queue_item.getId(), row[0], row[1],
|
||||
row[2], row[3], row[4], row[5],
|
||||
json.loads(row[6]))
|
||||
except SQLAlchemyError as ex:
|
||||
raise Exception(ex.message)
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
self.condition.notifyAll()
|
||||
return item
|
||||
|
||||
def deleteItem(self, queue_item):
|
||||
if not queue_item:
|
||||
return
|
||||
|
||||
with self.condition:
|
||||
connection = self.db_engine.connect()
|
||||
trans = connection.begin()
|
||||
|
||||
try:
|
||||
QUERY = "delete from `%s`" % self.name
|
||||
QUERY += " where id=%s"
|
||||
|
||||
connection.execute(QUERY, [queue_item.getId()])
|
||||
|
||||
trans.commit()
|
||||
except SQLAlchemyError as ex:
|
||||
trans.rollback()
|
||||
|
||||
raise Exception(ex.message)
|
||||
finally:
|
||||
connection.close()
|
||||
self.condition.notifyAll()
|
||||
|
||||
def updateItem(self, queue_item):
|
||||
if not queue_item:
|
||||
return
|
||||
|
||||
with self.condition:
|
||||
connection = self.db_engine.connect()
|
||||
trans = connection.begin()
|
||||
|
||||
try:
|
||||
queue_item.setLastUpdate(datetime.now())
|
||||
|
||||
QUERY = "update `%s`" % self.name
|
||||
QUERY += " set priority=%s, retry_count=%s, " \
|
||||
"last_update=%s where id=%s"
|
||||
|
||||
connection.execute(QUERY, [queue_item.getPriority(),
|
||||
queue_item.getRetryCount(),
|
||||
queue_item.getLastUpdate(),
|
||||
queue_item.getId()])
|
||||
|
||||
trans.commit()
|
||||
except SQLAlchemyError as ex:
|
||||
trans.rollback()
|
||||
|
||||
raise Exception(ex.message)
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
self.pqueue.put(queue_item.getPriority(), queue_item)
|
||||
self.condition.notifyAll()
|
||||
|
||||
def updatePriority(self):
|
||||
if self.fairshare_manager is None:
|
||||
# LOG.warn("priority_updater not found!!!")
|
||||
return
|
||||
|
||||
with self.condition:
|
||||
# now = datetime.now()
|
||||
queue_items = []
|
||||
|
||||
connection = self.db_engine.connect()
|
||||
|
||||
while self.pqueue.size() > 0:
|
||||
queue_item = self.pqueue.get()
|
||||
priority = queue_item.getPriority()
|
||||
|
||||
try:
|
||||
priority = self.fairshare_manager.execute(
|
||||
"CALCULATE_PRIORITY",
|
||||
user_id=queue_item.getUserId(),
|
||||
prj_id=queue_item.getProjectId(),
|
||||
timestamp=queue_item.getCreationTime(),
|
||||
retry=queue_item.getRetryCount())
|
||||
|
||||
queue_item.setPriority(priority)
|
||||
except Exception as ex:
|
||||
continue
|
||||
finally:
|
||||
queue_items.append(queue_item)
|
||||
|
||||
trans = connection.begin()
|
||||
|
||||
try:
|
||||
queue_item.setLastUpdate(datetime.now())
|
||||
|
||||
QUERY = "update `%s`" % self.name
|
||||
QUERY += " set priority=%s, last_update=%s where id=%s"
|
||||
|
||||
connection.execute(QUERY, [queue_item.getPriority(),
|
||||
queue_item.getLastUpdate(),
|
||||
queue_item.getId()])
|
||||
|
||||
trans.commit()
|
||||
except SQLAlchemyError as ex:
|
||||
trans.rollback()
|
||||
raise Exception(ex.message)
|
||||
|
||||
connection.close()
|
||||
|
||||
if len(queue_items) > 0:
|
||||
for queue_item in queue_items:
|
||||
self.pqueue.put(queue_item.getPriority(), queue_item)
|
||||
|
||||
del queue_items
|
||||
|
||||
self.condition.notifyAll()
|
||||
|
||||
def toDict(self):
|
||||
queue = {}
|
||||
queue["name"] = self.name
|
||||
queue["size"] = self.getSize()
|
||||
# queue["size"] = self.pqueue.size()
|
||||
|
||||
if self.is_closed:
|
||||
queue["status"] = "OFF"
|
||||
else:
|
||||
queue["status"] = "ON"
|
||||
|
||||
return queue
|
||||
|
||||
|
||||
class QueueManager(Manager):
|
||||
|
||||
def __init__(self):
|
||||
@ -415,6 +50,9 @@ class QueueManager(Manager):
|
||||
|
||||
self.fairshare_manager = self.getManager("FairShareManager")
|
||||
|
||||
if self.fairshare_manager is None:
|
||||
raise Exception("FairShareManager not found!")
|
||||
|
||||
db_connection = CONF.QueueManager.db_connection
|
||||
pool_size = CONF.QueueManager.db_pool_size
|
||||
max_overflow = CONF.QueueManager.db_max_overflow
|
||||
@ -433,13 +71,19 @@ class QueueManager(Manager):
|
||||
elif command == "DELETE_QUEUE":
|
||||
return self.deleteQueue(*args, **kargs)
|
||||
elif command == "GET_QUEUE":
|
||||
return self.getQueue(*args, **kargs)
|
||||
queue = self.getQueue(kargs.get("name", None))
|
||||
return queue
|
||||
|
||||
else:
|
||||
raise Exception("command=%r not supported!" % command)
|
||||
|
||||
def task(self):
|
||||
for queue in self.queue_list.values():
|
||||
queue.updatePriority()
|
||||
try:
|
||||
for queue in self.queue_list.values():
|
||||
queue.updatePriority()
|
||||
except Exception as ex:
|
||||
LOG.error("Exception has occured", exc_info=1)
|
||||
LOG.error(ex)
|
||||
|
||||
def destroy(self):
|
||||
for queue in self.queue_list.values():
|
||||
@ -447,7 +91,7 @@ class QueueManager(Manager):
|
||||
|
||||
def createQueue(self, name):
|
||||
if name not in self.queue_list:
|
||||
queue = Queue(name, self.db_engine, self.fairshare_manager)
|
||||
queue = QueueDB(name, self.db_engine, self.fairshare_manager)
|
||||
self.queue_list[name] = queue
|
||||
return queue
|
||||
else:
|
||||
|
@ -1,12 +1,12 @@
|
||||
import ConfigParser
|
||||
import logging
|
||||
import threading
|
||||
|
||||
try:
|
||||
from oslo_config import cfg
|
||||
except ImportError:
|
||||
from oslo.config import cfg
|
||||
|
||||
from common.quota import SharedQuota
|
||||
from synergy.common.manager import Manager
|
||||
|
||||
|
||||
@ -34,379 +34,272 @@ CONFIG = ConfigParser.SafeConfigParser()
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DynamicQuota(object):
|
||||
|
||||
def __init__(self):
|
||||
self.exit = False
|
||||
self.projects = {}
|
||||
self.ram = {"in_use": 0, "limit": 0}
|
||||
self.cores = {"in_use": 0, "limit": 0}
|
||||
self.condition = threading.Condition()
|
||||
|
||||
def setSize(self, cores, ram):
|
||||
self.ram["limit"] = ram
|
||||
self.cores["limit"] = cores
|
||||
|
||||
def getSize(self):
|
||||
return {"cores": self.cores["limit"], "ram": self.ram["limit"]}
|
||||
|
||||
def getProjects(self):
|
||||
return self.projects
|
||||
|
||||
def getProject(self, prj_id):
|
||||
return self.projects.get(prj_id, None)
|
||||
|
||||
def addProject(self, prj_id, prj_name, usage=None):
|
||||
if prj_id not in self.projects:
|
||||
with self.condition:
|
||||
project = {"name": prj_name,
|
||||
"cores": 0,
|
||||
"ram": 0,
|
||||
"instances": {"active": [], "pending": []},
|
||||
"TTL": 0}
|
||||
|
||||
if usage is not None:
|
||||
project["cores"] = usage["cores"]
|
||||
project["ram"] = usage["ram"]
|
||||
project["instances"]["active"].extend(usage["instances"])
|
||||
|
||||
self.ram["in_use"] += project["ram"]
|
||||
self.cores["in_use"] += project["cores"]
|
||||
|
||||
self.projects[prj_id] = project
|
||||
self.condition.notifyAll()
|
||||
else:
|
||||
raise Exception("project %r (id=%s) alredy added!"
|
||||
% (prj_name, prj_id))
|
||||
|
||||
def removeProject(self, prj_id):
|
||||
if prj_id in self.projects:
|
||||
with self.condition:
|
||||
project = self.projects[prj_id]
|
||||
|
||||
self.ram["in_use"] -= project["ram"]
|
||||
self.cores["in_use"] -= project["cores"]
|
||||
|
||||
del self.projects[prj_id]
|
||||
self.condition.notifyAll()
|
||||
|
||||
return True
|
||||
return False
|
||||
|
||||
def close(self):
|
||||
self.exit = True
|
||||
|
||||
def allocate(self, instance_id, prj_id, cores, ram, blocking=True):
|
||||
if prj_id not in self.projects:
|
||||
return
|
||||
|
||||
project = self.projects[prj_id]
|
||||
|
||||
if project is None:
|
||||
return
|
||||
|
||||
found = False
|
||||
|
||||
with self.condition:
|
||||
if instance_id in project["instances"]["active"]:
|
||||
found = True
|
||||
elif instance_id in project["instances"]["pending"]:
|
||||
found = True
|
||||
else:
|
||||
project["instances"]["pending"].append(instance_id)
|
||||
|
||||
while (not self.exit and not found and
|
||||
instance_id in project["instances"]["pending"]):
|
||||
|
||||
LOG.debug("allocate instance_id=%s project=%s cores=%s "
|
||||
"ram=%s [vcpu in use %s of %s; ram in use %s of %s]"
|
||||
% (instance_id,
|
||||
project["name"],
|
||||
cores,
|
||||
ram,
|
||||
self.cores["in_use"],
|
||||
self.cores["limit"],
|
||||
self.ram["in_use"],
|
||||
self.ram["limit"]))
|
||||
|
||||
if (self.cores["limit"] - self.cores["in_use"] >= cores) and \
|
||||
(self.ram["limit"] - self.ram["in_use"] >= ram):
|
||||
self.cores["in_use"] += cores
|
||||
self.ram["in_use"] += ram
|
||||
project["cores"] += cores
|
||||
project["ram"] += ram
|
||||
|
||||
found = True
|
||||
project["instances"]["active"].append(instance_id)
|
||||
project["instances"]["pending"].remove(instance_id)
|
||||
|
||||
LOG.info("allocated instance_id=%s project=%s cores=%s ram"
|
||||
"=%s [vcpu in use %s of %s; ram in use %s of %s]"
|
||||
% (instance_id,
|
||||
project["name"],
|
||||
cores,
|
||||
ram,
|
||||
self.cores["in_use"],
|
||||
self.cores["limit"],
|
||||
self.ram["in_use"],
|
||||
self.ram["limit"]))
|
||||
elif blocking:
|
||||
LOG.info("allocate wait!!!")
|
||||
self.condition.wait()
|
||||
|
||||
self.condition.notifyAll()
|
||||
|
||||
return found
|
||||
|
||||
def release(self, instance_id, prj_id, cores, ram):
|
||||
if prj_id not in self.projects:
|
||||
return
|
||||
|
||||
project = self.projects[prj_id]
|
||||
|
||||
LOG.debug("release instance_id=%s project=%s cores=%s "
|
||||
"ram=%s [vcpu in use %s of %s; ram in use %s of %s]"
|
||||
% (instance_id,
|
||||
project["name"],
|
||||
cores,
|
||||
ram,
|
||||
self.cores["in_use"],
|
||||
self.cores["limit"],
|
||||
self.ram["in_use"],
|
||||
self.ram["limit"]))
|
||||
|
||||
with self.condition:
|
||||
if instance_id in project["instances"]["pending"]:
|
||||
project["instances"]["pending"].remove(instance_id)
|
||||
elif instance_id in instance_id in project["instances"]["active"]:
|
||||
if self.cores["in_use"] - cores < 0:
|
||||
self.cores["in_use"] = 0
|
||||
else:
|
||||
self.cores["in_use"] -= cores
|
||||
|
||||
if self.ram["in_use"] - ram < 0:
|
||||
self.ram["in_use"] = 0
|
||||
else:
|
||||
self.ram["in_use"] -= ram
|
||||
|
||||
if project["cores"] - cores < 0:
|
||||
project["cores"] = 0
|
||||
else:
|
||||
project["cores"] -= cores
|
||||
|
||||
if project["ram"] - ram < 0:
|
||||
project["ram"] = 0
|
||||
else:
|
||||
project["ram"] -= ram
|
||||
|
||||
project["instances"]["active"].remove(instance_id)
|
||||
|
||||
LOG.info("released instance_id=%s project=%s cores=%s "
|
||||
"ram=%s [vcpu in use %s of %s; ram in use %s of %s]"
|
||||
% (instance_id,
|
||||
project["name"],
|
||||
cores,
|
||||
ram,
|
||||
self.cores["in_use"],
|
||||
self.cores["limit"],
|
||||
self.ram["in_use"],
|
||||
self.ram["limit"]))
|
||||
else:
|
||||
LOG.debug("release: instance '%s' not found!" % (instance_id))
|
||||
|
||||
self.condition.notifyAll()
|
||||
|
||||
def toDict(self):
|
||||
quota = {}
|
||||
quota["ram"] = self.ram
|
||||
quota["cores"] = self.cores
|
||||
quota["projects"] = self.projects
|
||||
|
||||
return quota
|
||||
|
||||
|
||||
class QuotaManager(Manager):
|
||||
|
||||
def __init__(self):
|
||||
super(QuotaManager, self).__init__(name="QuotaManager")
|
||||
super(QuotaManager, self).__init__("QuotaManager")
|
||||
|
||||
def setup(self):
|
||||
try:
|
||||
self.dynamic_quota = DynamicQuota()
|
||||
self.projects = {}
|
||||
|
||||
if self.getManager("NovaManager") is None:
|
||||
raise Exception("NovaManager not found!")
|
||||
if self.getManager("NovaManager") is None:
|
||||
raise Exception("NovaManager not found!")
|
||||
|
||||
if self.getManager("KeystoneManager") is None:
|
||||
raise Exception("KeystoneManager not found!")
|
||||
if self.getManager("KeystoneManager") is None:
|
||||
raise Exception("KeystoneManager not found!")
|
||||
|
||||
self.nova_manager = self.getManager("NovaManager")
|
||||
self.keystone_manager = self.getManager("KeystoneManager")
|
||||
self.listener = None
|
||||
except Exception as ex:
|
||||
LOG.error("Exception has occured", exc_info=1)
|
||||
LOG.error(ex)
|
||||
self.nova_manager = self.getManager("NovaManager")
|
||||
self.keystone_manager = self.getManager("KeystoneManager")
|
||||
self.listener = None
|
||||
|
||||
def destroy(self):
|
||||
LOG.info("destroy invoked!")
|
||||
self.dynamic_quota.close()
|
||||
SharedQuota.disable()
|
||||
|
||||
def execute(self, command, *args, **kargs):
|
||||
if command == "ADD_PROJECT":
|
||||
return self.addProject(*args, **kargs)
|
||||
if command == "show":
|
||||
prj_id = kargs.get("project_id", None)
|
||||
prj_name = kargs.get("project_name", None)
|
||||
all_projects = kargs.get("all_projects", None)
|
||||
|
||||
if prj_id:
|
||||
project = self.projects.get(prj_id, None)
|
||||
|
||||
if project:
|
||||
return project
|
||||
|
||||
raise Exception("project (id=%r) not found!" % prj_id)
|
||||
elif prj_name:
|
||||
for project in self.projects.values():
|
||||
if prj_name == project.getName():
|
||||
return project
|
||||
|
||||
raise Exception("project (name=%r) not found!" % prj_name)
|
||||
elif all_projects:
|
||||
return self.projects.values()
|
||||
else:
|
||||
return SharedQuota()
|
||||
elif command == "GET_SHARED_QUOTA":
|
||||
return SharedQuota()
|
||||
elif command == "GET_PROJECTS":
|
||||
return self.projects.values()
|
||||
elif command == "GET_PROJECT":
|
||||
return self.getProject(*args, **kargs)
|
||||
elif command == "REMOVE_PROJECT":
|
||||
return self.removeProject(*args, **kargs)
|
||||
elif command == "GET_DYNAMIC_QUOTA":
|
||||
return self.dynamic_quota
|
||||
else:
|
||||
raise Exception("command=%r not supported!" % command)
|
||||
|
||||
def task(self):
|
||||
try:
|
||||
self.updateDynamicQuota()
|
||||
self.deleteExpiredServices()
|
||||
self.updateSharedQuota()
|
||||
self.deleteExpiredServers()
|
||||
except Exception as ex:
|
||||
LOG.error(ex)
|
||||
|
||||
def getProject(self, prj_id):
|
||||
return self.dynamic_quota.getProject(prj_id)
|
||||
return self.projects.get(prj_id, None)
|
||||
|
||||
def addProject(self, prj_id, prj_name):
|
||||
if self.dynamic_quota.getProject(prj_id) is not None:
|
||||
raise Exception("project %r (id=%s) alredy added!"
|
||||
% (prj_name, prj_id))
|
||||
def getProjects(self):
|
||||
return self.projects
|
||||
|
||||
def addProject(self, project):
|
||||
if self.projects.get(project.getId(), None):
|
||||
raise Exception("project %r already exists!" % (project.getId()))
|
||||
|
||||
try:
|
||||
usage = self.nova_manager.execute("GET_PROJECT_USAGE", prj_id)
|
||||
self.dynamic_quota.addProject(prj_id, prj_name, usage)
|
||||
self.updateDynamicQuota()
|
||||
quota = self.nova_manager.getQuota(project.getId())
|
||||
|
||||
if quota.getSize("vcpus") > 0 and \
|
||||
quota.getSize("memory") > 0 and \
|
||||
quota.getSize("instances") > 0:
|
||||
|
||||
self.nova_manager.updateQuota(quota, is_class=True)
|
||||
quota.setSize("vcpus", -1)
|
||||
quota.setSize("memory", -1)
|
||||
quota.setSize("instances", -1)
|
||||
|
||||
self.nova_manager.updateQuota(quota)
|
||||
|
||||
class_quota = self.nova_manager.getQuota(
|
||||
project.getId(), is_class=True)
|
||||
|
||||
quota = project.getQuota()
|
||||
quota.setSize("vcpus", class_quota.getSize("vcpus"))
|
||||
quota.setSize("memory", class_quota.getSize("memory"))
|
||||
quota.setSize("instances", class_quota.getSize("instances"))
|
||||
|
||||
quota.setSize(
|
||||
"vcpus", SharedQuota.getSize("vcpus"), private=False)
|
||||
quota.setSize(
|
||||
"memory", SharedQuota.getSize("memory"), private=False)
|
||||
quota.setSize(
|
||||
"instances", SharedQuota.getSize("instances"), private=False)
|
||||
|
||||
servers = self.nova_manager.getProjectServers(project.getId())
|
||||
|
||||
for server in servers:
|
||||
if server.getState() != "building":
|
||||
quota.allocate(server)
|
||||
|
||||
self.projects[project.getId()] = project
|
||||
except Exception as ex:
|
||||
LOG.error("Exception has occured", exc_info=1)
|
||||
LOG.error(ex)
|
||||
raise ex
|
||||
|
||||
def removeProject(self, prj_id, destroy=False):
|
||||
project = self.dynamic_quota.getProject(prj_id)
|
||||
def removeProject(self, project, destroy=False):
|
||||
project = self.projects[project.getId()]
|
||||
|
||||
if project is None:
|
||||
return
|
||||
|
||||
try:
|
||||
if destroy:
|
||||
quota = project.getQuota()
|
||||
|
||||
ids = []
|
||||
ids.extend(project["instances"]["active"])
|
||||
ids.extend(project["instances"]["pending"])
|
||||
ids.extend(quota.getServers("active", private=False))
|
||||
ids.extend(quota.getServers("pending", private=False))
|
||||
ids.extend(quota.getServers("error", private=False))
|
||||
|
||||
for instance_id in ids:
|
||||
self.nova_manager.execute("DELETE_SERVER", instance_id)
|
||||
for server_id in ids:
|
||||
self.nova_manager.deleteServer(server_id)
|
||||
|
||||
self.dynamic_quota.removeProject(prj_id)
|
||||
|
||||
self.updateDynamicQuota()
|
||||
del self.projects[project.getId()]
|
||||
except Exception as ex:
|
||||
LOG.error(ex)
|
||||
raise ex
|
||||
|
||||
def deleteExpiredServices(self):
|
||||
for prj_id, project in self.dynamic_quota.projects.items():
|
||||
instance_ids = project["instances"]["active"]
|
||||
TTL = project["TTL"]
|
||||
def deleteExpiredServers(self):
|
||||
for prj_id, project in self.getProjects().items():
|
||||
TTL = project.getTTL()
|
||||
quota = project.getQuota()
|
||||
|
||||
if project["TTL"] == 0:
|
||||
ids = []
|
||||
ids.extend(quota.getServers("active", private=False))
|
||||
ids.extend(quota.getServers("error", private=False))
|
||||
|
||||
if TTL == 0:
|
||||
continue
|
||||
|
||||
if not ids:
|
||||
continue
|
||||
|
||||
try:
|
||||
expired_ids = self.nova_manager.execute("GET_EXPIRED_SERVERS",
|
||||
prj_id=prj_id,
|
||||
instances=instance_ids,
|
||||
expiration=TTL)
|
||||
servers = self.nova_manager.getExpiredServers(
|
||||
prj_id=prj_id, server_ids=ids, TTL=TTL)
|
||||
|
||||
for instance_id in expired_ids:
|
||||
self.nova_manager.execute("DELETE_SERVER", instance_id)
|
||||
for server in servers:
|
||||
uuid = server.getId()
|
||||
state = server.getState()
|
||||
|
||||
if server.getState() == "error":
|
||||
LOG.info("the server instance %r will be destroyed "
|
||||
"because it is in %s state (TTL=%s, prj_id"
|
||||
"=%r)" % (uuid, state, TTL, prj_id))
|
||||
else:
|
||||
LOG.info("the server instance %r will be destroyed "
|
||||
"because it exceeded its maximum time to live"
|
||||
" (TTL=%s, state=%s, prj_id=%r)"
|
||||
% (uuid, TTL, state, prj_id))
|
||||
|
||||
self.nova_manager.deleteServer(server)
|
||||
except Exception as ex:
|
||||
LOG.error(ex)
|
||||
raise ex
|
||||
|
||||
def updateDynamicQuota(self):
|
||||
def updateSharedQuota(self):
|
||||
# calculate the the total limit per cores and ram
|
||||
total_ram = float(0)
|
||||
total_cores = float(0)
|
||||
static_ram = float(0)
|
||||
static_cores = float(0)
|
||||
dynamic_ram = float(0)
|
||||
dynamic_cores = float(0)
|
||||
total_memory = float(0)
|
||||
total_vcpus = float(0)
|
||||
static_memory = float(0)
|
||||
static_vcpus = float(0)
|
||||
shared_memory = float(0)
|
||||
shared_vcpus = float(0)
|
||||
|
||||
try:
|
||||
cpu_ratio = self.nova_manager.execute("GET_PARAMETER",
|
||||
name="cpu_allocation_ratio",
|
||||
default=float(16))
|
||||
cpu_ratio = self.nova_manager.getParameter(
|
||||
"cpu_allocation_ratio", "NovaManager")
|
||||
|
||||
ram_ratio = self.nova_manager.execute("GET_PARAMETER",
|
||||
name="ram_allocation_ratio",
|
||||
default=float(1.5))
|
||||
ram_ratio = self.nova_manager.getParameter(
|
||||
"ram_allocation_ratio", "NovaManager")
|
||||
|
||||
hypervisors = self.nova_manager.execute("GET_HYPERVISORS")
|
||||
cpu_ratio = self.nova_manager.getParameter(
|
||||
"cpu_allocation_ratio", default=cpu_ratio)
|
||||
|
||||
for hypervisor in hypervisors:
|
||||
if hypervisor["status"] == "enabled" and \
|
||||
hypervisor["state"] == "up":
|
||||
info = self.nova_manager.execute("GET_HYPERVISOR",
|
||||
hypervisor["id"])
|
||||
ram_ratio = self.nova_manager.getParameter(
|
||||
"ram_allocation_ratio", default=ram_ratio)
|
||||
|
||||
total_ram += info["memory_mb"]
|
||||
total_cores += info["vcpus"]
|
||||
hypervisors = self.nova_manager.getHypervisors()
|
||||
|
||||
total_ram *= float(ram_ratio)
|
||||
total_cores *= float(cpu_ratio)
|
||||
for hv in hypervisors:
|
||||
if hv.getState() == "down" or hv.getStatus() == "disabled":
|
||||
continue
|
||||
|
||||
kprojects = self.keystone_manager.execute("GET_PROJECTS")
|
||||
if hv.getMemory() > 0:
|
||||
total_memory += hv.getMemory()
|
||||
|
||||
for project in kprojects:
|
||||
prj_id = project["id"]
|
||||
if hv.getVCPUs() > 0:
|
||||
total_vcpus += hv.getVCPUs()
|
||||
|
||||
if self.dynamic_quota.getProject(prj_id) is None:
|
||||
quota = self.nova_manager.execute("GET_QUOTA", prj_id)
|
||||
total_memory *= float(ram_ratio)
|
||||
total_vcpus *= float(cpu_ratio)
|
||||
|
||||
static_cores += quota["cores"]
|
||||
static_ram += quota["ram"]
|
||||
kprojects = self.keystone_manager.getProjects()
|
||||
|
||||
enabled = False
|
||||
for kproject in kprojects:
|
||||
project = self.getProject(kproject.getId())
|
||||
|
||||
if total_cores < static_cores:
|
||||
if self.dynamic_quota.getProjects():
|
||||
LOG.warn("dynamic quota: the total statically "
|
||||
"allocated cores (%s) is greater than the "
|
||||
"total amount of cores allowed (%s)"
|
||||
% (static_cores, total_cores))
|
||||
if project:
|
||||
quota = project.getQuota()
|
||||
else:
|
||||
quota = self.nova_manager.getQuota(kproject.getId())
|
||||
|
||||
if quota.getSize("vcpus") > 0:
|
||||
static_vcpus += quota.getSize("vcpus")
|
||||
|
||||
if quota.getSize("memory") > 0:
|
||||
static_memory += quota.getSize("memory")
|
||||
|
||||
if total_vcpus < static_vcpus:
|
||||
if self.getProjects():
|
||||
LOG.warn("shared quota: the total statically "
|
||||
"allocated vcpus (%s) is greater than the "
|
||||
"total amount of vcpus allowed (%s)"
|
||||
% (static_vcpus, total_vcpus))
|
||||
else:
|
||||
enabled = True
|
||||
dynamic_cores = total_cores - static_cores
|
||||
shared_vcpus = total_vcpus - static_vcpus
|
||||
|
||||
if total_ram < static_ram:
|
||||
if total_memory < static_memory:
|
||||
enabled = False
|
||||
|
||||
if self.dynamic_quota.getProjects():
|
||||
LOG.warn("dynamic quota: the total statically "
|
||||
"allocated ram (%s) is greater than the "
|
||||
"total amount of ram allowed (%s)"
|
||||
% (static_ram, total_ram))
|
||||
if self.getProjects():
|
||||
LOG.warn("shared quota: the total statically "
|
||||
"allocated memory (%s) is greater than the "
|
||||
"total amount of memory allowed (%s)"
|
||||
% (static_memory, total_memory))
|
||||
else:
|
||||
enabled = True
|
||||
dynamic_ram = total_ram - static_ram
|
||||
shared_memory = total_memory - static_memory
|
||||
|
||||
if enabled:
|
||||
LOG.info("dynamic quota: cores=%s ram=%s"
|
||||
% (dynamic_cores, dynamic_ram))
|
||||
LOG.info("shared quota enabled: vcpus=%s memory=%s"
|
||||
% (shared_vcpus, shared_memory))
|
||||
|
||||
self.dynamic_quota.setSize(dynamic_cores, dynamic_ram)
|
||||
SharedQuota.enable()
|
||||
SharedQuota.setSize("vcpus", shared_vcpus)
|
||||
SharedQuota.setSize("memory", shared_memory)
|
||||
else:
|
||||
LOG.info("shared quota disabled")
|
||||
|
||||
"""
|
||||
LOG.info("cpu_ratio=%s, ram_ratio=%s" % (cpu_ratio, ram_ratio))
|
||||
LOG.info("total_cores=%s total_ram=%s" % (total_cores, total_ram))
|
||||
LOG.info("static cores=%s ram=%s" % (static_cores, static_ram))
|
||||
LOG.info("dynamic cores=%s ram=%s" % (dynamic_cores, dynamic_ram))
|
||||
"""
|
||||
LOG.debug("dynamic quota %s" % self.dynamic_quota.toDict())
|
||||
SharedQuota.disable()
|
||||
SharedQuota.setSize("vcpus", 0)
|
||||
SharedQuota.setSize("memory", 0)
|
||||
|
||||
for project in self.getProjects().values():
|
||||
quota = project.getQuota()
|
||||
quota.setSize("vcpus", shared_vcpus, private=False)
|
||||
quota.setSize("memory", shared_memory, private=False)
|
||||
except Exception as ex:
|
||||
LOG.error("Exception has occured", exc_info=1)
|
||||
LOG.error(ex)
|
||||
raise ex
|
||||
|
@ -1,15 +1,18 @@
|
||||
import logging
|
||||
import re
|
||||
import threading
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
try:
|
||||
from oslo_config import cfg
|
||||
except ImportError:
|
||||
from oslo.config import cfg
|
||||
|
||||
from common.flavor import Flavor
|
||||
from common.quota import SharedQuota
|
||||
from common.request import Request
|
||||
from common.server import Server
|
||||
from synergy.common.manager import Manager
|
||||
from threading import Thread
|
||||
|
||||
|
||||
__author__ = "Lisa Zangrando"
|
||||
__email__ = "lisa.zangrando[AT]pd.infn.it"
|
||||
@ -35,10 +38,10 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
class Notifications(object):
|
||||
|
||||
def __init__(self, dynamic_quota):
|
||||
def __init__(self, projects):
|
||||
super(Notifications, self).__init__()
|
||||
|
||||
self.dynamic_quota = dynamic_quota
|
||||
self.projects = projects
|
||||
|
||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
LOG.debug("Notification INFO: event_type=%s payload=%s"
|
||||
@ -48,16 +51,11 @@ class Notifications(object):
|
||||
return
|
||||
|
||||
state = payload["state"]
|
||||
instance_id = payload["instance_id"]
|
||||
|
||||
if ((event_type == "compute.instance.delete.end" and
|
||||
(state == "deleted" or state == "error" or state == "building")) or
|
||||
(event_type == "compute.instance.update" and
|
||||
(state == "deleted" or state == "error")) or
|
||||
(event_type == "compute.instance.update" and state == "error") or
|
||||
(event_type == "scheduler.run_instance" and state == "error")):
|
||||
ram = 0
|
||||
cores = 0
|
||||
prj_id = None
|
||||
instance_info = None
|
||||
|
||||
if event_type == "scheduler.run_instance":
|
||||
@ -65,41 +63,58 @@ class Notifications(object):
|
||||
else:
|
||||
instance_info = payload
|
||||
|
||||
prj_id = instance_info["tenant_id"]
|
||||
instance_id = instance_info["instance_id"]
|
||||
ram = instance_info["memory_mb"]
|
||||
cores = instance_info["vcpus"]
|
||||
if instance_info["tenant_id"] not in self.projects:
|
||||
return
|
||||
|
||||
LOG.debug("Notification INFO (type=%s state=%s): cores=%s ram=%s "
|
||||
"prj_id=%s instance_id=%s"
|
||||
% (event_type, state, cores, ram, prj_id, instance_id))
|
||||
flavor = Flavor()
|
||||
flavor.setName(instance_info["instance_type"])
|
||||
flavor.setMemory(instance_info["memory_mb"])
|
||||
flavor.setVCPUs(instance_info["vcpus"])
|
||||
flavor.setStorage(instance_info["root_gb"])
|
||||
|
||||
server = Server()
|
||||
server.setFlavor(flavor)
|
||||
server.setId(instance_info["instance_id"])
|
||||
server.setUserId(instance_info["user_id"])
|
||||
server.setProjectId(instance_info["tenant_id"])
|
||||
server.setMetadata(instance_info["metadata"])
|
||||
|
||||
LOG.debug("Notification INFO (type=%s state=%s): vcpus=%s "
|
||||
"memory=%s prj_id=%s server_id=%s"
|
||||
% (event_type, state, flavor.getVCPUs(),
|
||||
flavor.getMemory(), server.getProjectId(),
|
||||
server.getId()))
|
||||
|
||||
quota = self.projects[server.getProjectId()].getQuota()
|
||||
|
||||
try:
|
||||
self.dynamic_quota.release(instance_id, prj_id, cores, ram)
|
||||
quota.release(server)
|
||||
except Exception as ex:
|
||||
LOG.warn("Notification INFO: %s" % ex)
|
||||
LOG.error("Exception has occured", exc_info=1)
|
||||
|
||||
def warn(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
state = payload["state"]
|
||||
instance_id = payload["instance_id"]
|
||||
LOG.debug("Notification WARN: event_type=%s state=%s instance_id=%s"
|
||||
% (event_type, state, instance_id))
|
||||
LOG.debug("Notification WARN: event_type=%s state=%s instance_id=%s "
|
||||
"payload=%s" % (event_type, state, instance_id, payload))
|
||||
|
||||
def error(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
LOG.debug("Notification ERROR: event_type=%s payload=%s metadata=%s"
|
||||
% (event_type, payload, metadata))
|
||||
|
||||
|
||||
class Worker(threading.Thread):
|
||||
class Worker(Thread):
|
||||
|
||||
def __init__(self, name, queue, quota, nova_manager):
|
||||
def __init__(self, name, queue, projects, nova_manager, keystone_manager):
|
||||
super(Worker, self).__init__()
|
||||
self.setDaemon(True)
|
||||
|
||||
self.name = name
|
||||
self.queue = queue
|
||||
self.quota = quota
|
||||
self.projects = projects
|
||||
self.nova_manager = nova_manager
|
||||
self.keystone_manager = keystone_manager
|
||||
self.exit = False
|
||||
LOG.info("Worker %r created!" % self.name)
|
||||
|
||||
@ -117,100 +132,110 @@ class Worker(threading.Thread):
|
||||
|
||||
def run(self):
|
||||
LOG.info("Worker %r running!" % self.name)
|
||||
queue_items = []
|
||||
last_release_time = SharedQuota.getLastReleaseTime()
|
||||
|
||||
while not self.exit and not self.queue.isClosed():
|
||||
try:
|
||||
queue_item = self.queue.getItem()
|
||||
except Exception as ex:
|
||||
LOG.error("Worker %r: %s" % (self.name, ex))
|
||||
continue
|
||||
if last_release_time < SharedQuota.getLastReleaseTime():
|
||||
last_release_time = SharedQuota.getLastReleaseTime()
|
||||
|
||||
while queue_items:
|
||||
item = queue_items.pop(0)
|
||||
self.queue.reinsertItem(item)
|
||||
|
||||
queue_item = self.queue.getItem(blocking=False)
|
||||
|
||||
if queue_item is None:
|
||||
if self.queue.getSize():
|
||||
SharedQuota.wait()
|
||||
continue
|
||||
else:
|
||||
queue_item = self.queue.getItem(blocking=True)
|
||||
|
||||
if queue_item is None:
|
||||
continue
|
||||
|
||||
try:
|
||||
request = queue_item.getData()
|
||||
request = Request.fromDict(queue_item.getData())
|
||||
|
||||
instance = request["instance"]
|
||||
prj_id = instance["nova_object.data"]["project_id"]
|
||||
uuid = instance["nova_object.data"]["uuid"]
|
||||
vcpus = instance["nova_object.data"]["vcpus"]
|
||||
memory_mb = instance["nova_object.data"]["memory_mb"]
|
||||
context = request["context"]
|
||||
filter_properties = request["filter_properties"]
|
||||
admin_password = request["admin_password"]
|
||||
injected_files = request["injected_files"]
|
||||
requested_networks = request["requested_networks"]
|
||||
security_groups = request["security_groups"]
|
||||
block_device_mapping = request["block_device_mapping"]
|
||||
legacy_bdm = request["legacy_bdm"]
|
||||
image = request["image"]
|
||||
prj_id = request.getProjectId()
|
||||
context = request.getContext()
|
||||
server = request.getServer()
|
||||
server_id = server.getId()
|
||||
quota = None
|
||||
|
||||
try:
|
||||
server = self.nova_manager.execute("GET_SERVER",
|
||||
id=uuid)
|
||||
s = self.nova_manager.getServer(server_id, detail=True)
|
||||
if s.getState() != "building":
|
||||
# or server["OS-EXT-STS:task_state"] != "scheduling":
|
||||
self.queue.deleteItem(queue_item)
|
||||
continue
|
||||
except Exception as ex:
|
||||
LOG.warn("Worker %s: server %r not found! reason=%s"
|
||||
% (self.name, uuid, ex))
|
||||
LOG.warn("Worker %s: the server %r is not anymore "
|
||||
"available! reason=%s" % (self.name, prj_id, ex))
|
||||
self.queue.deleteItem(queue_item)
|
||||
|
||||
self.nova_manager.execute("DELETE_SERVER", id=uuid)
|
||||
continue
|
||||
|
||||
if server["OS-EXT-STS:vm_state"] != "building" or \
|
||||
server["OS-EXT-STS:task_state"] != "scheduling":
|
||||
self.queue.deleteItem(queue_item)
|
||||
continue
|
||||
quota = self.projects[prj_id].getQuota()
|
||||
|
||||
if self.quota.allocate(instance_id=uuid,
|
||||
prj_id=prj_id,
|
||||
cores=vcpus,
|
||||
ram=memory_mb,
|
||||
blocking=True):
|
||||
if quota.allocate(server, blocking=False):
|
||||
try:
|
||||
self.nova_manager.execute(
|
||||
"BUILD_SERVER",
|
||||
context=context,
|
||||
instance=instance,
|
||||
image=image,
|
||||
filter_properties=filter_properties,
|
||||
admin_password=admin_password,
|
||||
injected_files=injected_files,
|
||||
requested_networks=requested_networks,
|
||||
security_groups=security_groups,
|
||||
block_device_mapping=block_device_mapping,
|
||||
legacy_bdm=legacy_bdm)
|
||||
|
||||
LOG.info("Worker %r: server (instance_id=%s) build OK"
|
||||
% (self.name, uuid))
|
||||
computes = self.nova_manager.selectComputes(request)
|
||||
except Exception as ex:
|
||||
LOG.error("Worker %r: error on building the server "
|
||||
"(instance_id=%s) reason=%s"
|
||||
% (self.name, uuid, ex))
|
||||
LOG.warn("Worker %s: compute %r not found! reason=%s"
|
||||
% (self.name, server.getId(), ex))
|
||||
|
||||
self.quota.release(instance_id=uuid,
|
||||
prj_id=prj_id,
|
||||
cores=vcpus,
|
||||
ram=memory_mb)
|
||||
found = False
|
||||
|
||||
for compute in computes:
|
||||
try:
|
||||
km = self.keystone_manager
|
||||
trust = km.getTrust(context["trust_id"])
|
||||
token = trust.getToken(km.getToken().getId())
|
||||
|
||||
context["auth_token"] = token.getId()
|
||||
context["user_id"] = token.getUser().getId()
|
||||
|
||||
self.nova_manager.buildServer(request, compute)
|
||||
|
||||
LOG.info("Worker %r: server (id=%r) "
|
||||
"builded!" % (self.name, server.getId()))
|
||||
|
||||
found = True
|
||||
break
|
||||
except Exception as ex:
|
||||
LOG.error("Worker %r: error on building the "
|
||||
"server (id=%r) reason=%s"
|
||||
% (self.name, server.getId(), ex))
|
||||
|
||||
if found:
|
||||
self.queue.deleteItem(queue_item)
|
||||
else:
|
||||
quota.release(server)
|
||||
queue_items.append(queue_item)
|
||||
else:
|
||||
queue_items.append(queue_item)
|
||||
|
||||
except Exception as ex:
|
||||
LOG.error("Exception has occured", exc_info=1)
|
||||
LOG.error("Worker %r: %s" % (self.name, ex))
|
||||
|
||||
self.queue.deleteItem(queue_item)
|
||||
except Exception as ex:
|
||||
LOG.error("Worker '%s': %s" % (self.name, ex))
|
||||
continue
|
||||
|
||||
LOG.info("Worker '%s' destroyed!" % self.name)
|
||||
LOG.info("Worker %r destroyed!" % self.name)
|
||||
|
||||
|
||||
class SchedulerManager(Manager):
|
||||
|
||||
def __init__(self):
|
||||
Manager.__init__(self, name="SchedulerManager")
|
||||
super(SchedulerManager, self).__init__("SchedulerManager")
|
||||
|
||||
self.config_opts = [
|
||||
cfg.FloatOpt('default_TTL', default=10.0),
|
||||
cfg.ListOpt("projects", default=[], help="the projects list"),
|
||||
cfg.ListOpt("shares", default=[], help="the shares list"),
|
||||
cfg.ListOpt("TTLs", default=[], help="the TTLs list"),
|
||||
cfg.ListOpt("TTLs", default=[], help="the TTLs list")
|
||||
]
|
||||
self.workers = []
|
||||
|
||||
@ -240,14 +265,12 @@ class SchedulerManager(Manager):
|
||||
self.projects = {}
|
||||
self.listener = None
|
||||
self.exit = False
|
||||
self.configured = False
|
||||
|
||||
def parseAttribute(self, attribute):
|
||||
if attribute is None:
|
||||
return None
|
||||
|
||||
prj_name = None
|
||||
value = float(0)
|
||||
|
||||
parsed_attribute = re.split('=', attribute)
|
||||
|
||||
if len(parsed_attribute) > 1:
|
||||
@ -268,213 +291,226 @@ class SchedulerManager(Manager):
|
||||
return (prj_name, value)
|
||||
|
||||
def execute(self, command, *args, **kargs):
|
||||
if command == "PROCESS_REQUEST":
|
||||
return self.processRequest(*args, **kargs)
|
||||
if command == "show":
|
||||
usr_id = kargs.get("user_id", None)
|
||||
usr_name = kargs.get("user_name", None)
|
||||
all_users = kargs.get("all_users", False)
|
||||
prj_id = kargs.get("project_id", None)
|
||||
prj_name = kargs.get("project_name", None)
|
||||
project = None
|
||||
|
||||
if (usr_id is not None or usr_name is not None or all_users) and \
|
||||
prj_id is None and prj_name is None:
|
||||
raise Exception("project id or name not defined!")
|
||||
|
||||
if prj_id:
|
||||
project = self.projects.get(prj_id, None)
|
||||
|
||||
if not project:
|
||||
raise Exception("project (id=%r) not found!" % prj_id)
|
||||
elif prj_name:
|
||||
for prj in self.projects.values():
|
||||
if prj_name == prj.getName():
|
||||
project = prj
|
||||
break
|
||||
|
||||
if not project:
|
||||
raise Exception("project (name=%r) not found!" % prj_name)
|
||||
else:
|
||||
return self.projects.values()
|
||||
|
||||
if usr_id or usr_name:
|
||||
return project.getUser(id=usr_id, name=usr_name)
|
||||
elif all_users:
|
||||
return project.getUsers()
|
||||
else:
|
||||
return project
|
||||
else:
|
||||
raise Exception("command=%r not supported!" % command)
|
||||
|
||||
def task(self):
|
||||
if self.listener is None:
|
||||
self.dynamic_quota = self.quota_manager.execute(
|
||||
"GET_DYNAMIC_QUOTA")
|
||||
if not self.configured:
|
||||
for project in self.keystone_manager.getProjects():
|
||||
if project.getName() in CONF.SchedulerManager.projects:
|
||||
CONF.SchedulerManager.projects.remove(project.getName())
|
||||
project.setTTL(self.default_TTL)
|
||||
|
||||
defaults = self.nova_manager.execute("GET_QUOTA", defaults=True)
|
||||
try:
|
||||
users = self.keystone_manager.getUsers(
|
||||
prj_id=project.getId())
|
||||
|
||||
k_projects = self.keystone_manager.execute("GET_PROJECTS")
|
||||
for user in users:
|
||||
project.addUser(user)
|
||||
except Exception as ex:
|
||||
LOG.error("Exception has occured", exc_info=1)
|
||||
LOG.error(ex)
|
||||
|
||||
for k_project in k_projects:
|
||||
prj_id = str(k_project["id"])
|
||||
prj_name = str(k_project["name"])
|
||||
|
||||
if prj_name in CONF.SchedulerManager.projects:
|
||||
CONF.SchedulerManager.projects.remove(prj_name)
|
||||
|
||||
self.projects[prj_name] = {"id": prj_id,
|
||||
"name": prj_name,
|
||||
"type": "dynamic",
|
||||
"share": float(0),
|
||||
"TTL": self.default_TTL}
|
||||
|
||||
self.nova_manager.execute("UPDATE_QUOTA",
|
||||
id=prj_id,
|
||||
cores=-1,
|
||||
ram=-1,
|
||||
instances=-1)
|
||||
self.projects[project.getName()] = project
|
||||
else:
|
||||
quota = self.nova_manager.execute("GET_QUOTA", id=prj_id)
|
||||
quota = self.nova_manager.getQuota(project.getId())
|
||||
|
||||
if quota["cores"] == -1 and quota["ram"] == -1 and \
|
||||
quota["instances"] == -1:
|
||||
self.nova_manager.execute(
|
||||
"UPDATE_QUOTA",
|
||||
id=prj_id,
|
||||
cores=defaults["cores"],
|
||||
ram=defaults["ram"],
|
||||
instances=defaults["instances"])
|
||||
if quota.getSize("vcpus") <= -1 and \
|
||||
quota.getSize("memory") <= -1 and \
|
||||
quota.getSize("instances") <= -1:
|
||||
|
||||
qc = self.nova_manager.getQuota(project.getId(),
|
||||
is_class=True)
|
||||
|
||||
self.nova_manager.updateQuota(qc)
|
||||
|
||||
if len(CONF.SchedulerManager.projects) > 0:
|
||||
raise Exception("projects %s not found"
|
||||
% CONF.SchedulerManager.projects)
|
||||
raise Exception("projects %s not found, please check the syn"
|
||||
"ergy.conf" % CONF.SchedulerManager.projects)
|
||||
|
||||
self.quota_manager.updateSharedQuota()
|
||||
|
||||
for prj_ttl in CONF.SchedulerManager.TTLs:
|
||||
prj_name, TTL = self.parseAttribute(prj_ttl)
|
||||
self.projects[prj_name]["TTL"] = TTL
|
||||
self.projects[prj_name].setTTL(TTL)
|
||||
|
||||
for prj_share in CONF.SchedulerManager.shares:
|
||||
prj_name, share = self.parseAttribute(prj_share)
|
||||
self.projects[prj_name]["share"] = share
|
||||
|
||||
for project in self.projects.values():
|
||||
prj_id = project["id"]
|
||||
prj_name = project["name"]
|
||||
prj_share = project["share"]
|
||||
prj_name, share_value = self.parseAttribute(prj_share)
|
||||
p_share = self.projects[prj_name].getShare()
|
||||
p_share.setValue(share_value)
|
||||
|
||||
for prj_name, project in self.projects.items():
|
||||
del self.projects[prj_name]
|
||||
self.projects[prj_id] = project
|
||||
self.projects[project.getId()] = project
|
||||
|
||||
self.quota_manager.execute("ADD_PROJECT",
|
||||
prj_id=prj_id,
|
||||
prj_name=prj_name)
|
||||
self.quota_manager.addProject(project)
|
||||
|
||||
self.fairshare_manager.execute("ADD_PROJECT",
|
||||
prj_id=prj_id,
|
||||
prj_name=prj_name,
|
||||
share=prj_share)
|
||||
self.fairshare_manager.addProject(project)
|
||||
|
||||
self.fairshare_manager.execute("CALCULATE_FAIRSHARE")
|
||||
self.quota_manager.updateSharedQuota()
|
||||
self.fairshare_manager.calculateFairShare()
|
||||
|
||||
try:
|
||||
self.dynamic_queue = self.queue_manager.execute("CREATE_QUEUE",
|
||||
name="DYNAMIC")
|
||||
self.dynamic_queue = self.queue_manager.createQueue("DYNAMIC")
|
||||
except Exception as ex:
|
||||
LOG.error("Exception has occured", exc_info=1)
|
||||
LOG.error(ex)
|
||||
|
||||
self.dynamic_queue = self.queue_manager.execute("GET_QUEUE",
|
||||
name="DYNAMIC")
|
||||
self.dynamic_queue = self.queue_manager.getQueue("DYNAMIC")
|
||||
|
||||
dynamic_worker = Worker(name="DYNAMIC",
|
||||
queue=self.dynamic_queue,
|
||||
quota=self.dynamic_quota,
|
||||
nova_manager=self.nova_manager)
|
||||
dynamic_worker = Worker("DYNAMIC",
|
||||
self.dynamic_queue,
|
||||
self.projects,
|
||||
self.nova_manager,
|
||||
self.keystone_manager)
|
||||
dynamic_worker.start()
|
||||
|
||||
self.workers.append(dynamic_worker)
|
||||
|
||||
self.notifications = Notifications(self.dynamic_quota)
|
||||
self.notifications = Notifications(self.projects)
|
||||
|
||||
target = self.nova_manager.execute("GET_TARGET",
|
||||
topic='notifications',
|
||||
exchange="nova")
|
||||
self.listener = self.nova_manager.execute(
|
||||
"GET_NOTIFICATION_LISTENER",
|
||||
target = self.nova_manager.getTarget(topic='notifications',
|
||||
exchange="nova")
|
||||
|
||||
self.listener = self.nova_manager.getNotificationListener(
|
||||
targets=[target],
|
||||
endpoints=[self.notifications])
|
||||
|
||||
LOG.info("listener created")
|
||||
|
||||
self.listener.start()
|
||||
self.configured = True
|
||||
return
|
||||
|
||||
for prj_id, project in self.dynamic_quota.getProjects().items():
|
||||
instances = project["instances"]["active"]
|
||||
TTL = self.projects[prj_id]["TTL"]
|
||||
servers = self.nova_manager.execute("GET_EXPIRED_SERVERS",
|
||||
prj_id=prj_id,
|
||||
instances=instances,
|
||||
TTL=TTL)
|
||||
for project in self.projects.values():
|
||||
users = self.keystone_manager.getUsers(prj_id=project.getId())
|
||||
|
||||
for uuid, state in servers.items():
|
||||
if state == "error":
|
||||
LOG.info("the server instance %r will be destroyed because"
|
||||
" it is in %s state (TTL=%s, prj_id=%r)"
|
||||
% (uuid, state, TTL, prj_id))
|
||||
else:
|
||||
LOG.info("the server instance %r will be destroyed because"
|
||||
" it exceeded its maximum time to live (TTL=%s, "
|
||||
"state=%s, prj_id=%r)"
|
||||
% (uuid, TTL, state, prj_id))
|
||||
|
||||
self.nova_manager.execute("DELETE_SERVER", id=uuid)
|
||||
for user in users:
|
||||
try:
|
||||
project.addUser(user)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
if self.workers:
|
||||
for queue_worker in self.workers:
|
||||
queue_worker.destroy()
|
||||
for queue_worker in self.workers:
|
||||
queue_worker.destroy()
|
||||
|
||||
def processRequest(self, request):
|
||||
server = request.getServer()
|
||||
|
||||
try:
|
||||
filter_properties = request["filter_properties"]
|
||||
instance = request["instance"]
|
||||
user_id = instance["nova_object.data"]["user_id"]
|
||||
prj_id = instance["nova_object.data"]["project_id"]
|
||||
uuid = instance["nova_object.data"]["uuid"]
|
||||
vcpus = instance["nova_object.data"]["vcpus"]
|
||||
memory_mb = instance["nova_object.data"]["memory_mb"]
|
||||
if request.getProjectId() in self.projects:
|
||||
self.nova_manager.setQuotaTypeServer(server)
|
||||
|
||||
if prj_id in self.projects:
|
||||
timestamp = instance["nova_object.data"]["created_at"]
|
||||
timestamp = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%SZ")
|
||||
priority = 0
|
||||
project = self.projects[request.getProjectId()]
|
||||
quota = project.getQuota()
|
||||
|
||||
try:
|
||||
if "retry" in filter_properties:
|
||||
retry = filter_properties["retry"]
|
||||
if server.isPermanent():
|
||||
if quota.allocate(server, blocking=False):
|
||||
self.nova_manager.buildServer(request)
|
||||
|
||||
LOG.info("new request: id=%r user_id=%s prj_id=%s "
|
||||
"quota=private" % (request.getId(),
|
||||
request.getUserId(),
|
||||
request.getProjectId()))
|
||||
else:
|
||||
self.nova_manager.deleteServer(server)
|
||||
LOG.info("request rejected (quota exceeded): "
|
||||
"id=%r user_id=%s prj_id=%s "
|
||||
"quota=private" % (request.getId(),
|
||||
request.getUserId(),
|
||||
request.getProjectId()))
|
||||
else:
|
||||
timestamp = request.getCreatedAt()
|
||||
priority = 0
|
||||
retry = request.getRetry()
|
||||
|
||||
if retry:
|
||||
num_attempts = retry["num_attempts"]
|
||||
|
||||
if num_attempts > 0:
|
||||
self.dynamic_quota.release(instance_id=uuid,
|
||||
prj_id=prj_id,
|
||||
cores=vcpus,
|
||||
ram=memory_mb)
|
||||
if num_attempts:
|
||||
quota.release(server)
|
||||
|
||||
priority = 99999999
|
||||
LOG.info("released resource uuid %s "
|
||||
"num_attempts %s" % (uuid, num_attempts))
|
||||
except Exception as ex:
|
||||
LOG.error("Exception has occured", exc_info=1)
|
||||
LOG.error(ex)
|
||||
LOG.info("released resource uuid %s num attempts"
|
||||
"%s" % (request.getId(), num_attempts))
|
||||
|
||||
if priority == 0:
|
||||
priority = self.fairshare_manager.execute(
|
||||
"CALCULATE_PRIORITY",
|
||||
user_id=user_id,
|
||||
prj_id=prj_id,
|
||||
timestamp=timestamp,
|
||||
retry=0)
|
||||
if priority == 0:
|
||||
priority = self.fairshare_manager.calculatePriority(
|
||||
user_id=request.getUserId(),
|
||||
prj_id=request.getProjectId(),
|
||||
timestamp=timestamp,
|
||||
retry=0)
|
||||
|
||||
self.dynamic_queue.insertItem(user_id,
|
||||
prj_id,
|
||||
priority=priority,
|
||||
data=request)
|
||||
context = request.getContext()
|
||||
|
||||
LOG.info("new request: instance_id=%s user_id=%s prj_id=%s "
|
||||
"priority=%s type=dynamic" % (uuid, user_id,
|
||||
prj_id, priority))
|
||||
km = self.keystone_manager
|
||||
token_user = km.validateToken(context["auth_token"])
|
||||
token_admin = km.getToken()
|
||||
|
||||
trusts = km.getTrusts(
|
||||
user_id=token_user.getUser().getId(), token=token_user)
|
||||
|
||||
if trusts:
|
||||
trust = trusts[0]
|
||||
else:
|
||||
trust = km.makeTrust(
|
||||
token_admin.getUser().getId(), token_user)
|
||||
|
||||
context["trust_id"] = trust.getId()
|
||||
|
||||
self.dynamic_queue.insertItem(request.getUserId(),
|
||||
request.getProjectId(),
|
||||
priority=priority,
|
||||
data=request.toDict())
|
||||
|
||||
LOG.info("new request: id=%r user_id=%s prj_id=%s priority"
|
||||
"=%s quota=shared" % (request.getId(),
|
||||
request.getUserId(),
|
||||
request.getProjectId(),
|
||||
priority))
|
||||
else:
|
||||
context = request["context"]
|
||||
admin_password = request["admin_password"]
|
||||
injected_files = request["injected_files"]
|
||||
requested_networks = request["requested_networks"]
|
||||
security_groups = request["security_groups"]
|
||||
block_device_mapping = request["block_device_mapping"]
|
||||
legacy_bdm = request["legacy_bdm"]
|
||||
image = request["image"]
|
||||
self.nova_manager.buildServer(request)
|
||||
|
||||
self.nova_manager.execute(
|
||||
"BUILD_SERVER",
|
||||
context=context,
|
||||
instance=instance,
|
||||
image=image,
|
||||
filter_properties=filter_properties,
|
||||
admin_password=admin_password,
|
||||
injected_files=injected_files,
|
||||
requested_networks=requested_networks,
|
||||
security_groups=security_groups,
|
||||
block_device_mapping=block_device_mapping,
|
||||
legacy_bdm=legacy_bdm)
|
||||
|
||||
LOG.info("new request: instance_id=%s user_id=%s "
|
||||
"prj_id=%s type=static" % (uuid, user_id, prj_id))
|
||||
self.nova_manager.setQuotaTypeServer(server)
|
||||
LOG.info("new request: id=%r user_id=%s prj_id=%s "
|
||||
"quota=private" % (request.getId(),
|
||||
request.getUserId(),
|
||||
request.getProjectId()))
|
||||
except Exception as ex:
|
||||
LOG.error("Exception has occured", exc_info=1)
|
||||
LOG.error(ex)
|
||||
|
0
synergy_scheduler_manager/tests/functional/__init__.py
Executable file
0
synergy_scheduler_manager/tests/functional/__init__.py
Executable file
@ -14,7 +14,8 @@ from datetime import datetime
|
||||
|
||||
from mock import MagicMock
|
||||
from mock import patch
|
||||
|
||||
from synergy_scheduler_manager.common.project import Project
|
||||
from synergy_scheduler_manager.common.user import User
|
||||
from synergy_scheduler_manager.fairshare_manager import FairShareManager
|
||||
from synergy_scheduler_manager.keystone_manager import KeystoneManager
|
||||
from synergy_scheduler_manager.queue_manager import QueueManager
|
||||
@ -44,66 +45,69 @@ class TestFairshareManager(base.TestCase):
|
||||
self.fsmanager.setup()
|
||||
|
||||
def test_add_project(self):
|
||||
self.fsmanager.addProject(prj_id=1, prj_name="test_project", share=5)
|
||||
project = Project()
|
||||
project.setId(1)
|
||||
project.setName("test_project")
|
||||
prj_share = project.getShare()
|
||||
prj_share.setValue(5)
|
||||
self.fsmanager.addProject(project)
|
||||
|
||||
self.assertEqual(1, self.fsmanager.projects[1]["id"])
|
||||
self.assertEqual("test_project", self.fsmanager.projects[1]["name"])
|
||||
self.assertEqual("dynamic", self.fsmanager.projects[1]["type"])
|
||||
self.assertEqual({}, self.fsmanager.projects[1]["users"])
|
||||
self.assertEqual({}, self.fsmanager.projects[1]["usage"])
|
||||
self.assertEqual(5, self.fsmanager.projects[1]["share"])
|
||||
self.assertEqual(1, self.fsmanager.projects[1].getId())
|
||||
self.assertEqual("test_project", self.fsmanager.projects[1].getName())
|
||||
self.assertEqual([], self.fsmanager.projects[1].getUsers())
|
||||
self.assertEqual(5, self.fsmanager.projects[1].getShare().getValue())
|
||||
|
||||
def test_add_project_no_share(self):
|
||||
self.fsmanager.addProject(prj_id=1, prj_name="test_project")
|
||||
project = Project()
|
||||
project.setId(1)
|
||||
project.setName("test_project")
|
||||
self.fsmanager.addProject(project)
|
||||
|
||||
self.assertEqual(1, self.fsmanager.projects[1]["id"])
|
||||
self.assertEqual("test_project", self.fsmanager.projects[1]["name"])
|
||||
self.assertEqual("dynamic", self.fsmanager.projects[1]["type"])
|
||||
self.assertEqual({}, self.fsmanager.projects[1]["users"])
|
||||
self.assertEqual({}, self.fsmanager.projects[1]["usage"])
|
||||
self.assertEqual(1, self.fsmanager.projects[1].getId())
|
||||
self.assertEqual("test_project", self.fsmanager.projects[1].getName())
|
||||
self.assertEqual([], self.fsmanager.projects[1].getUsers())
|
||||
self.assertEqual(self.fsmanager.default_share,
|
||||
self.fsmanager.projects[1]["share"])
|
||||
self.fsmanager.projects[1].getShare().getValue())
|
||||
|
||||
def test_get_project(self):
|
||||
self.fsmanager.addProject(prj_id=1, prj_name="test_project")
|
||||
project = Project()
|
||||
project.setId(1)
|
||||
project.setName("test_project")
|
||||
self.fsmanager.addProject(project)
|
||||
|
||||
expected_project = {
|
||||
"id": 1,
|
||||
"name": "test_project",
|
||||
"type": "dynamic",
|
||||
"users": {},
|
||||
"usage": {},
|
||||
"share": self.fsmanager.default_share}
|
||||
self.assertEqual(expected_project, self.fsmanager.getProject(1))
|
||||
self.assertEqual(project, self.fsmanager.getProject(1))
|
||||
|
||||
def test_get_projects(self):
|
||||
self.fsmanager.addProject(prj_id=1, prj_name="test1")
|
||||
self.fsmanager.addProject(prj_id=2, prj_name="test2")
|
||||
project1 = Project()
|
||||
project1.setId(1)
|
||||
project1.setName("test1")
|
||||
self.fsmanager.addProject(project1)
|
||||
|
||||
project2 = Project()
|
||||
project2.setId(2)
|
||||
project2.setName("test2")
|
||||
self.fsmanager.addProject(project2)
|
||||
|
||||
expected_projects = {
|
||||
1: {"id": 1,
|
||||
"name": "test1",
|
||||
"type": "dynamic",
|
||||
"users": {},
|
||||
"usage": {},
|
||||
"share": self.fsmanager.default_share},
|
||||
2: {"id": 2,
|
||||
"name": "test2",
|
||||
"type": "dynamic",
|
||||
"users": {},
|
||||
"usage": {},
|
||||
"share": self.fsmanager.default_share}}
|
||||
1: project1,
|
||||
2: project2}
|
||||
self.assertEqual(expected_projects, self.fsmanager.getProjects())
|
||||
|
||||
def test_remove_project(self):
|
||||
self.fsmanager.addProject(prj_id=1, prj_name="test")
|
||||
project = Project()
|
||||
project.setId(1)
|
||||
project.setName("test_project")
|
||||
self.fsmanager.addProject(project)
|
||||
|
||||
self.assertIn(1, self.fsmanager.projects)
|
||||
self.fsmanager.removeProject(1)
|
||||
self.assertNotIn(1, self.fsmanager.projects)
|
||||
|
||||
def test_calculate_priority_one_user(self):
|
||||
self.fsmanager.addProject(prj_id=1, prj_name="test")
|
||||
# self.fsmanager.addProject(prj_id=1, prj_name="test")
|
||||
project = Project()
|
||||
project.setId(1)
|
||||
project.setName("test_project")
|
||||
|
||||
# Define values used for computing the priority
|
||||
age_weight = self.fsmanager.age_weight = 1.0
|
||||
@ -116,9 +120,15 @@ class TestFairshareManager(base.TestCase):
|
||||
fairshare_ram = 50
|
||||
|
||||
# Add a user to the project
|
||||
self.fsmanager.projects[1]["users"] = {
|
||||
1: {"fairshare_cores": fairshare_cores,
|
||||
"fairshare_ram": fairshare_ram}}
|
||||
user = User()
|
||||
user.setId(22)
|
||||
user.setName("test_user")
|
||||
priority = user.getPriority()
|
||||
priority.setFairShare('vcpus', fairshare_cores)
|
||||
priority.setFairShare('memory', fairshare_ram)
|
||||
|
||||
project.addUser(user)
|
||||
self.fsmanager.addProject(project)
|
||||
|
||||
# Compute the expected priority given the previously defined values
|
||||
expected_priority = int(age_weight * minutes +
|
||||
@ -128,7 +138,7 @@ class TestFairshareManager(base.TestCase):
|
||||
with patch("synergy_scheduler_manager.fairshare_manager.datetime") \
|
||||
as datetime_mock:
|
||||
datetime_mock.utcnow.side_effect = (datetime_start, datetime_stop)
|
||||
priority = self.fsmanager.calculatePriority(user_id=1, prj_id=1)
|
||||
priority = self.fsmanager.calculatePriority(user_id=22, prj_id=1)
|
||||
|
||||
self.assertEqual(expected_priority, priority)
|
||||
|
@ -10,57 +10,87 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from mock import call
|
||||
from mock import create_autospec
|
||||
from mock import MagicMock
|
||||
from sqlalchemy.engine.base import Engine
|
||||
from synergy_scheduler_manager.common.project import Project
|
||||
|
||||
from synergy_scheduler_manager.queue_manager import Queue
|
||||
from synergy_scheduler_manager.queue_manager import QueueItem
|
||||
from synergy_scheduler_manager.quota_manager import DynamicQuota
|
||||
from synergy_scheduler_manager.common.queue import QueueDB
|
||||
from synergy_scheduler_manager.common.queue import QueueItem
|
||||
from synergy_scheduler_manager.scheduler_manager import Notifications
|
||||
from synergy_scheduler_manager.scheduler_manager import Worker
|
||||
from synergy_scheduler_manager.tests.unit import base
|
||||
|
||||
|
||||
class TestNotifications(base.TestCase):
|
||||
# TO COMPLETE
|
||||
def test_info_quota(self):
|
||||
|
||||
def test_info_dynamic_quota(self):
|
||||
"""Test that info() makes the correct call to DynamicQuota"""
|
||||
dynquota_mock = create_autospec(DynamicQuota)
|
||||
ns = Notifications(dynquota_mock)
|
||||
project1 = Project()
|
||||
project1.setId(1)
|
||||
project1.setName("test1")
|
||||
|
||||
project2 = Project()
|
||||
project2.setId(2)
|
||||
project2.setName("test2")
|
||||
|
||||
prjDict = {1: project1, 2: project2}
|
||||
|
||||
ns = Notifications(prjDict)
|
||||
payload = {
|
||||
"state": "deleted",
|
||||
"instance_type": "instance_type",
|
||||
"user_id": "user_id",
|
||||
"root_gb": "root_gb",
|
||||
"metadata": "metadata",
|
||||
"instance_id": 1,
|
||||
"tenant_id": 2,
|
||||
"memory_mb": 3,
|
||||
"vcpus": 4}
|
||||
|
||||
ns.info(ctxt=None,
|
||||
publisher_id=None,
|
||||
event_type="compute.instance.delete.end",
|
||||
payload=payload,
|
||||
metadata=None)
|
||||
|
||||
self.assertEqual(call(1, 2, 4, 3), dynquota_mock.release.call_args)
|
||||
quota = ns.projects[2].getQuota()
|
||||
self.assertEqual(0, quota.getUsage("memory", private=False))
|
||||
self.assertEqual(0, quota.getUsage("vcpus", private=False))
|
||||
|
||||
|
||||
class TestWorker(base.TestCase):
|
||||
|
||||
# TO COMPLETE
|
||||
def setUp(self):
|
||||
super(TestWorker, self).setUp()
|
||||
self.nova_manager_mock = MagicMock()
|
||||
self.keystone_manager_mock = MagicMock()
|
||||
db_engine_mock = create_autospec(Engine)
|
||||
|
||||
project1 = Project()
|
||||
project1.setId("1")
|
||||
project1.setName("test1")
|
||||
|
||||
project2 = Project()
|
||||
project2.setId("2")
|
||||
project2.setName("test2")
|
||||
|
||||
projects_list = {'1': project1, '2': project2}
|
||||
self.worker = Worker(
|
||||
name="test",
|
||||
queue=Queue("testq", db_engine_mock),
|
||||
quota=DynamicQuota(),
|
||||
nova_manager=self.nova_manager_mock)
|
||||
queue=QueueDB("testq", db_engine_mock),
|
||||
projects=projects_list,
|
||||
nova_manager=self.nova_manager_mock,
|
||||
keystone_manager=self.keystone_manager_mock)
|
||||
|
||||
def test_destroy(self):
|
||||
"""An empty worker can be destroyed without raising an exception."""
|
||||
self.worker.destroy()
|
||||
|
||||
def test_name(self):
|
||||
self.assertEqual('test', self.worker.getName())
|
||||
|
||||
def test_run_build_server(self):
|
||||
|
||||
def nova_exec_side_effect(command, *args, **kwargs):
|
||||
@ -90,16 +120,11 @@ class TestWorker(base.TestCase):
|
||||
nova_exec.side_effect = nova_exec_side_effect
|
||||
|
||||
# Mock quota allocation
|
||||
quota_allocate_mock = create_autospec(self.worker.quota.allocate)
|
||||
quota_allocate_mock = create_autospec(
|
||||
self.worker.projects['1'].getQuota().allocate)
|
||||
quota_allocate_mock.return_value = True
|
||||
self.worker.quota.allocate = quota_allocate_mock
|
||||
self.worker.projects['1'].getQuota().allocate = quota_allocate_mock
|
||||
|
||||
# Delete item from the queue
|
||||
delete_item_mock = create_autospec(self.worker.queue.deleteItem)
|
||||
self.worker.queue.deleteItem = delete_item_mock
|
||||
|
||||
# Check that we ask nova to BUILD_SERVER and the qitem is deleted
|
||||
self.worker.run()
|
||||
build_server_call = nova_exec.call_args_list[1] # second call
|
||||
self.assertEqual(("BUILD_SERVER",), build_server_call[0]) # check args
|
||||
self.assertEqual(call(qitem_mock), delete_item_mock.call_args)
|
@ -1,116 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from synergy_scheduler_manager.quota_manager import DynamicQuota
|
||||
from synergy_scheduler_manager.tests.unit import base
|
||||
|
||||
|
||||
class TestDynamicQuota(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestDynamicQuota, self).setUp()
|
||||
self.dyn_quota = DynamicQuota()
|
||||
|
||||
def test_get_add_project_no_usage(self):
|
||||
self.dyn_quota.addProject(prj_id=1, prj_name="test_project")
|
||||
|
||||
project = self.dyn_quota.getProject(1)
|
||||
self.assertEqual("test_project", project["name"])
|
||||
self.assertEqual(0, project["cores"])
|
||||
self.assertEqual(0, project["ram"])
|
||||
self.assertEqual({"active": [], "pending": []}, project["instances"])
|
||||
self.assertEqual(0, project["TTL"])
|
||||
|
||||
def test_get_add_project_with_usage(self):
|
||||
fake_usage = {"cores": 5, "ram": 12, "instances": ["a", "b"]}
|
||||
self.dyn_quota.addProject(prj_id=1, prj_name="test", usage=fake_usage)
|
||||
|
||||
project = self.dyn_quota.getProject(1)
|
||||
self.assertEqual("test", project["name"])
|
||||
self.assertEqual(5, project["cores"])
|
||||
self.assertEqual(12, project["ram"])
|
||||
self.assertEqual({"active": ["a", "b"], "pending": []},
|
||||
project["instances"])
|
||||
self.assertEqual(0, project["TTL"])
|
||||
self.assertEqual(12, self.dyn_quota.ram["in_use"])
|
||||
self.assertEqual(5, self.dyn_quota.cores["in_use"])
|
||||
|
||||
def test_get_size(self):
|
||||
size = self.dyn_quota.getSize()
|
||||
self.assertEqual(0, size["cores"])
|
||||
self.assertEqual(0, size["ram"])
|
||||
|
||||
def test_set_size(self):
|
||||
self.dyn_quota.setSize(cores=10, ram=20)
|
||||
self.assertEqual(10, self.dyn_quota.cores["limit"])
|
||||
self.assertEqual(20, self.dyn_quota.ram["limit"])
|
||||
|
||||
def test_get_projects(self):
|
||||
self.assertEqual(self.dyn_quota.projects, self.dyn_quota.getProjects())
|
||||
|
||||
def test_remove_project(self):
|
||||
self.dyn_quota.addProject(prj_id=1, prj_name="test")
|
||||
|
||||
self.assertIn(1, self.dyn_quota.projects)
|
||||
|
||||
self.dyn_quota.removeProject(1)
|
||||
self.assertNotIn(1, self.dyn_quota.projects)
|
||||
|
||||
def test_allocate_single_instance(self):
|
||||
self.dyn_quota.setSize(cores=20, ram=100)
|
||||
self.dyn_quota.addProject(prj_id=1, prj_name="test")
|
||||
|
||||
self.dyn_quota.allocate("a", prj_id=1, cores=5, ram=10)
|
||||
|
||||
project = self.dyn_quota.getProject(1)
|
||||
self.assertIn("a", project["instances"]["active"])
|
||||
self.assertEqual(5, project["cores"])
|
||||
self.assertEqual(10, project["ram"])
|
||||
self.assertEqual(5, self.dyn_quota.cores["in_use"])
|
||||
self.assertEqual(10, self.dyn_quota.ram["in_use"])
|
||||
|
||||
def test_allocate_multiple_instances(self):
|
||||
self.dyn_quota.setSize(cores=30, ram=100)
|
||||
self.dyn_quota.addProject(prj_id=1, prj_name="test")
|
||||
|
||||
self.dyn_quota.allocate("a", prj_id=1, cores=5, ram=10)
|
||||
self.dyn_quota.allocate("b", prj_id=1, cores=7, ram=20)
|
||||
self.dyn_quota.allocate("c", prj_id=1, cores=10, ram=20)
|
||||
|
||||
project = self.dyn_quota.getProject(1)
|
||||
self.assertIn("a", project["instances"]["active"])
|
||||
self.assertIn("b", project["instances"]["active"])
|
||||
self.assertIn("c", project["instances"]["active"])
|
||||
self.assertEqual(22, project["cores"])
|
||||
self.assertEqual(50, project["ram"])
|
||||
self.assertEqual(22, self.dyn_quota.cores["in_use"])
|
||||
self.assertEqual(50, self.dyn_quota.ram["in_use"])
|
||||
|
||||
def test_allocate_multiple_projects(self):
|
||||
self.dyn_quota.setSize(cores=20, ram=100)
|
||||
self.dyn_quota.addProject(prj_id=1, prj_name="project_A")
|
||||
self.dyn_quota.addProject(prj_id=2, prj_name="project_B")
|
||||
|
||||
# TODO(vincent): can we allocate the same instance to 2 projects?
|
||||
self.dyn_quota.allocate("a", prj_id=1, cores=3, ram=10)
|
||||
self.dyn_quota.allocate("a", prj_id=2, cores=5, ram=15)
|
||||
|
||||
project_a = self.dyn_quota.getProject(1)
|
||||
project_b = self.dyn_quota.getProject(2)
|
||||
self.assertIn("a", project_a["instances"]["active"])
|
||||
self.assertIn("a", project_b["instances"]["active"])
|
||||
self.assertEqual(3, project_a["cores"])
|
||||
self.assertEqual(10, project_a["ram"])
|
||||
self.assertEqual(5, project_b["cores"])
|
||||
self.assertEqual(15, project_b["ram"])
|
||||
self.assertEqual(8, self.dyn_quota.cores["in_use"])
|
||||
self.assertEqual(25, self.dyn_quota.ram["in_use"])
|
@ -1,335 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import heapq
|
||||
|
||||
from mock import call
|
||||
from mock import create_autospec
|
||||
from mock import patch
|
||||
from sqlalchemy.engine.base import Engine
|
||||
|
||||
from synergy_scheduler_manager.fairshare_manager import FairShareManager
|
||||
from synergy_scheduler_manager.queue_manager import PriorityQueue
|
||||
from synergy_scheduler_manager.queue_manager import Queue
|
||||
from synergy_scheduler_manager.queue_manager import QueueItem
|
||||
from synergy_scheduler_manager.tests.unit import base
|
||||
|
||||
|
||||
class TestQueueItem(base.TestCase):
|
||||
|
||||
def test_get_set_id(self):
|
||||
qitem = QueueItem(id=1,
|
||||
user_id=None,
|
||||
prj_id=None,
|
||||
priority=None,
|
||||
retry_count=None,
|
||||
creation_time=None,
|
||||
last_update=None,
|
||||
data=None)
|
||||
|
||||
self.assertEqual(1, qitem.getId())
|
||||
|
||||
qitem.setId(10)
|
||||
self.assertEqual(10, qitem.getId())
|
||||
|
||||
def test_get_set_userid(self):
|
||||
qitem = QueueItem(id=None,
|
||||
user_id=1,
|
||||
prj_id=None,
|
||||
priority=None,
|
||||
retry_count=None,
|
||||
creation_time=None,
|
||||
last_update=None,
|
||||
data=None)
|
||||
|
||||
self.assertEqual(1, qitem.getUserId())
|
||||
|
||||
qitem.setUserId(10)
|
||||
self.assertEqual(10, qitem.getUserId())
|
||||
|
||||
def test_get_set_projectid(self):
|
||||
qitem = QueueItem(id=None,
|
||||
user_id=None,
|
||||
prj_id=1,
|
||||
priority=None,
|
||||
retry_count=None,
|
||||
creation_time=None,
|
||||
last_update=None,
|
||||
data=None)
|
||||
|
||||
self.assertEqual(1, qitem.getProjectId())
|
||||
|
||||
qitem.setProjectId(10)
|
||||
self.assertEqual(10, qitem.getProjectId())
|
||||
|
||||
def test_get_set_priority(self):
|
||||
qitem = QueueItem(id=None,
|
||||
user_id=None,
|
||||
prj_id=None,
|
||||
priority=1,
|
||||
retry_count=None,
|
||||
creation_time=None,
|
||||
last_update=None,
|
||||
data=None)
|
||||
|
||||
self.assertEqual(1, qitem.getPriority())
|
||||
|
||||
qitem.setPriority(10)
|
||||
self.assertEqual(10, qitem.getPriority())
|
||||
|
||||
def test_retry_count(self):
|
||||
qitem = QueueItem(id=None,
|
||||
user_id=None,
|
||||
prj_id=None,
|
||||
priority=None,
|
||||
retry_count=1,
|
||||
creation_time=None,
|
||||
last_update=None,
|
||||
data=None)
|
||||
|
||||
self.assertEqual(1, qitem.getRetryCount())
|
||||
|
||||
qitem.setRetryCount(10)
|
||||
self.assertEqual(10, qitem.getRetryCount())
|
||||
|
||||
qitem.incRetryCount()
|
||||
self.assertEqual(11, qitem.getRetryCount())
|
||||
|
||||
def test_get_set_creation_time(self):
|
||||
qitem = QueueItem(id=None,
|
||||
user_id=None,
|
||||
prj_id=None,
|
||||
priority=None,
|
||||
retry_count=None,
|
||||
creation_time="now",
|
||||
last_update=None,
|
||||
data=None)
|
||||
|
||||
self.assertEqual("now", qitem.getCreationTime())
|
||||
|
||||
qitem.setCreationTime("later")
|
||||
self.assertEqual("later", qitem.getCreationTime())
|
||||
|
||||
def test_get_set_last_update(self):
|
||||
qitem = QueueItem(id=None,
|
||||
user_id=None,
|
||||
prj_id=None,
|
||||
priority=None,
|
||||
retry_count=None,
|
||||
creation_time=None,
|
||||
last_update="now",
|
||||
data=None)
|
||||
|
||||
self.assertEqual("now", qitem.getLastUpdate())
|
||||
|
||||
qitem.setLastUpdate("later")
|
||||
self.assertEqual("later", qitem.getLastUpdate())
|
||||
|
||||
def test_get_set_data(self):
|
||||
qitem = QueueItem(id=None,
|
||||
user_id=None,
|
||||
prj_id=None,
|
||||
priority=None,
|
||||
retry_count=None,
|
||||
creation_time=None,
|
||||
last_update=None,
|
||||
data=1)
|
||||
|
||||
self.assertEqual(1, qitem.getData())
|
||||
|
||||
qitem.setData(2)
|
||||
self.assertEqual(2, qitem.getData())
|
||||
|
||||
|
||||
class TestPriorityQueue(base.TestCase):
|
||||
|
||||
def test_put(self):
|
||||
pq = PriorityQueue()
|
||||
pq.put(0, "a")
|
||||
pq.put(5, "b")
|
||||
pq.put(10, "c")
|
||||
|
||||
self.assertIn((0, 0, "a"), pq.queue)
|
||||
self.assertIn((-5, 1, "b"), pq.queue)
|
||||
self.assertIn((-10, 2, "c"), pq.queue)
|
||||
|
||||
self.assertEqual(3, pq._index)
|
||||
|
||||
self.assertEqual((-10, 2, "c"), heapq.heappop(pq.queue))
|
||||
self.assertEqual((-5, 1, "b"), heapq.heappop(pq.queue))
|
||||
self.assertEqual((0, 0, "a"), heapq.heappop(pq.queue))
|
||||
|
||||
def test_get(self):
|
||||
pq = PriorityQueue()
|
||||
pq.put(0, "a")
|
||||
pq.put(5, "b")
|
||||
|
||||
self.assertEqual("b", pq.get())
|
||||
self.assertEqual("a", pq.get())
|
||||
|
||||
def test_size(self):
|
||||
pq = PriorityQueue()
|
||||
pq.put(0, "a")
|
||||
pq.put(5, "b")
|
||||
pq.put(10, "c")
|
||||
|
||||
self.assertEqual(3, pq.size())
|
||||
|
||||
|
||||
class TestQueue(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestQueue, self).setUp()
|
||||
|
||||
# Create a Queue that mocks database interaction
|
||||
self.db_engine_mock = create_autospec(Engine)
|
||||
self.q = Queue(name="test", db_engine=self.db_engine_mock)
|
||||
|
||||
def test_insert_item(self):
|
||||
self.q.insertItem(user_id=1, prj_id=2, priority=10, data="mydata")
|
||||
|
||||
# Check the db call of the item insert
|
||||
insert_call = call.connect().execute(
|
||||
'insert into `test` (user_id, prj_id, priority, data) '
|
||||
'values(%s, %s, %s, %s)', [1, 2, 10, '"mydata"'])
|
||||
self.assertIn(insert_call, self.db_engine_mock.mock_calls)
|
||||
|
||||
# Check the item existence and values in the in-memory queue
|
||||
priority, index, item = heapq.heappop(self.q.pqueue.queue)
|
||||
self.assertEqual(-10, priority)
|
||||
self.assertEqual(0, index)
|
||||
self.assertEqual(1, item.user_id)
|
||||
self.assertEqual(2, item.prj_id)
|
||||
self.assertEqual(10, item.priority)
|
||||
self.assertEqual(0, item.retry_count)
|
||||
self.assertIsNone(item.data) # TODO(vincent): should it be "mydata"?
|
||||
|
||||
def test_get_size(self):
|
||||
execute_mock = self.db_engine_mock.connect().execute
|
||||
execute_call = call('select count(*) from `test`')
|
||||
|
||||
fetchone_mock = execute_mock().fetchone
|
||||
fetchone_mock.return_value = [3]
|
||||
|
||||
# Check that getSize() uses the correct sqlalchemy method
|
||||
self.assertEqual(3, self.q.getSize())
|
||||
|
||||
# Check that getSize() uses the correct SQL statement
|
||||
self.assertEqual(execute_call, execute_mock.call_args)
|
||||
|
||||
def test_reinsert_item(self):
|
||||
# TODO(vincent): what is the purpose of this method?
|
||||
# It will lead to duplicates.
|
||||
pass
|
||||
|
||||
def test_get_item(self):
|
||||
# Insert the item and mock its DB insertion
|
||||
execute_mock = self.db_engine_mock.connect().execute
|
||||
execute_mock().lastrowid = 123
|
||||
self.q.insertItem(user_id=1, prj_id=2, priority=10, data="mydata")
|
||||
|
||||
# Mock the DB select by returning the same things we inserted before
|
||||
select_mock = self.db_engine_mock.connect().execute
|
||||
select_call = call("select user_id, prj_id, priority, retry_count, "
|
||||
"creation_time, last_update, data from `test` "
|
||||
"where id=%s", [123])
|
||||
fetchone_mock = select_mock().fetchone
|
||||
fetchone_mock.return_value = [1, 2, 10, 0, "now", "now", '"mydata"']
|
||||
|
||||
item = self.q.getItem()
|
||||
self.assertEqual(select_call, select_mock.call_args)
|
||||
self.assertEqual(123, item.id)
|
||||
self.assertEqual(1, item.user_id)
|
||||
self.assertEqual(2, item.prj_id)
|
||||
self.assertEqual(10, item.priority)
|
||||
self.assertEqual(0, item.retry_count)
|
||||
self.assertEqual("now", item.creation_time)
|
||||
self.assertEqual("now", item.last_update)
|
||||
self.assertEqual("mydata", item.data)
|
||||
|
||||
def test_delete_item(self):
|
||||
# Mock QueueItem to be deleted
|
||||
qitem = create_autospec(QueueItem)
|
||||
qitem.getId.return_value = 123
|
||||
|
||||
# Mock the DB delete
|
||||
execute_mock = self.db_engine_mock.connect().execute
|
||||
execute_call = call("delete from `test` where id=%s", [123])
|
||||
|
||||
self.q.deleteItem(qitem)
|
||||
self.assertEqual(execute_call, execute_mock.call_args)
|
||||
|
||||
def test_update_item(self):
|
||||
# Mock QueueItem to be updated
|
||||
qitem = create_autospec(QueueItem)
|
||||
qitem.getPriority.return_value = 10
|
||||
qitem.getRetryCount.return_value = 20
|
||||
qitem.getLastUpdate.return_value = "right_now"
|
||||
qitem.getId.return_value = 123
|
||||
|
||||
# Mock the DB update
|
||||
execute_mock = self.db_engine_mock.connect().execute
|
||||
execute_call = call("update `test` set priority=%s, retry_count=%s, "
|
||||
"last_update=%s where id=%s",
|
||||
[10, 20, "right_now", 123])
|
||||
|
||||
# Check the DB call and that the new QueueItem is in the queue
|
||||
self.q.updateItem(qitem)
|
||||
self.assertEqual(execute_call, execute_mock.call_args)
|
||||
self.assertIn((-10, 0, qitem), self.q.pqueue.queue)
|
||||
|
||||
def test_update_priority(self):
|
||||
qitem1 = QueueItem(
|
||||
id=1,
|
||||
user_id=None,
|
||||
prj_id=None,
|
||||
priority=0,
|
||||
retry_count=None,
|
||||
creation_time="0AC",
|
||||
last_update="before")
|
||||
qitem2 = QueueItem(
|
||||
id=2,
|
||||
user_id=None,
|
||||
prj_id=None,
|
||||
priority=10,
|
||||
retry_count=None,
|
||||
creation_time="0AC",
|
||||
last_update="before")
|
||||
|
||||
# TODO(vincent): priority on an item & priority in the queue,
|
||||
# shouldn't it be the same thing?
|
||||
self.q.pqueue.put(0, qitem1)
|
||||
self.q.pqueue.put(10, qitem2)
|
||||
|
||||
# Mock fairshare_mgr to fake computing the priority
|
||||
self.q.fairshare_manager = create_autospec(FairShareManager)
|
||||
self.q.fairshare_manager.execute.side_effect = [200, 100] # new prio.
|
||||
|
||||
# Mock DB update call
|
||||
execute_mock = self.db_engine_mock.connect().execute
|
||||
execute_call1 = call("update `test` set priority=%s, last_update=%s "
|
||||
"where id=%s", [200, "now", 2])
|
||||
execute_call2 = call("update `test` set priority=%s, last_update=%s "
|
||||
"where id=%s", [100, "now", 1])
|
||||
|
||||
# Mock datetime.now() call so it is predictable
|
||||
with patch("synergy_scheduler_manager.queue_manager.datetime") as mock:
|
||||
mock.now.return_value = "now"
|
||||
self.q.updatePriority()
|
||||
|
||||
# Check that that fsmanager.execute was correctly called
|
||||
self.assertIn(execute_call1, execute_mock.call_args_list)
|
||||
self.assertIn(execute_call2, execute_mock.call_args_list)
|
||||
|
||||
# Check new QueueItem with updated priority are in the pqueue
|
||||
self.assertEqual(200, qitem2.priority)
|
||||
self.assertEqual(100, qitem1.priority)
|
@ -33,9 +33,9 @@ class TestService(base.TestCase):
|
||||
self.service.setDescription('description')
|
||||
self.assertEqual('description', self.service.getDescription())
|
||||
|
||||
def test_set_get_Status(self):
|
||||
self.service.setStatus('enabled')
|
||||
self.assertEqual('enabled', self.service.getStatus())
|
||||
def test_set_get_Enabled(self):
|
||||
self.service.setEnabled(True)
|
||||
self.assertEqual(True, self.service.isEnabled())
|
||||
|
||||
def test_isEnable(self):
|
||||
self.assertEqual(False, self.service.isEnabled())
|
||||
|
Loading…
x
Reference in New Issue
Block a user