rank node candidates

Change-Id: I260f8105cdd675d9475a9179149a445c4f56fa17
This commit is contained in:
Gregory Koronakos 2024-03-20 01:27:00 +02:00
parent a4e7c8f971
commit c504f94705
46 changed files with 77644 additions and 213 deletions

View File

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,502 @@
[
{
"id": "1b2a77f4-0f81-4910-a15c-0dd57c6a89ff",
"nodeCandidateType": "EDGE",
"jobIdForByon": null,
"jobIdForEdge": null,
"price": 0.0558,
"cloud": {
"id": "nebulous-aws-sal-1",
"endpoint": null,
"cloudType": "PUBLIC",
"api": {
"providerName": "aws-ec2"
},
"credential": null,
"cloudConfiguration": {
"nodeGroup": "",
"properties": {}
},
"owner": null,
"state": null,
"diagnostic": null
},
"location": {
"id": "",
"name": "",
"providerId": "",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "France",
"latitude": 47.4705,
"longitude": 6.1918
},
"parent": null,
"state": null,
"owner": null
},
"image": {
"id": "",
"name": "PrEstoCloud-Golden-Image",
"providerId": "",
"operatingSystem": {
"operatingSystemFamily": "UNKNOWN_OS_FAMILY",
"operatingSystemArchitecture": "I386",
"operatingSystemVersion": 0.0
},
"location": {
"id": "",
"name": "",
"providerId": "",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "France",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"hardware": {
"id": "nebulous-aws-sal-1/eu-west-2/t3.medium",
"name": "",
"providerId": "",
"cores": 2,
"ram": 4096,
"disk": 16.0,
"fpga": 0,
"location": {
"id": "",
"name": "",
"providerId": "",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "France",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"pricePerInvocation": 0.0,
"memoryPrice": 0.0,
"nodeId": "",
"environment": null,
"score": 1,
"rank": 1
},
{
"id": "93b846d4-3f43-4e4b-87f2-c3ba07bb5555",
"nodeCandidateType": "EDGE",
"jobIdForByon": null,
"jobIdForEdge": null,
"price": 0.0125,
"cloud": {
"id": "nebulous-aws-sal-1",
"endpoint": null,
"cloudType": "PUBLIC",
"api": {
"providerName": "aws-ec2"
},
"credential": null,
"cloudConfiguration": {
"nodeGroup": "",
"properties": {}
},
"owner": null,
"state": null,
"diagnostic": null
},
"location": {
"id": "",
"name": "",
"providerId": "",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "France",
"latitude": 47.4705,
"longitude": 6.1918
},
"parent": null,
"state": null,
"owner": null
},
"image": {
"id": "",
"name": "PrEstoCloud-Golden-Image",
"providerId": "",
"operatingSystem": {
"operatingSystemFamily": "UNKNOWN_OS_FAMILY",
"operatingSystemArchitecture": "I386",
"operatingSystemVersion": 0.0
},
"location": {
"id": "",
"name": "",
"providerId": "",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "France",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"hardware": {
"id": "nebulous-aws-sal-1/eu-west-2/t3.medium",
"name": "",
"providerId": "",
"cores": 2,
"ram": 4096,
"disk": 16.0,
"fpga": 0,
"location": {
"id": "",
"name": "",
"providerId": "",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "France",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"pricePerInvocation": 0.0,
"memoryPrice": 0.0,
"nodeId": "",
"environment": null,
"score": 0.938,
"rank": 2
},
{
"id": "3b53f32d-53ca-410e-969b-6bf542a3fd32",
"nodeCandidateType": "EDGE",
"jobIdForByon": null,
"jobIdForEdge": null,
"price": 0.0836,
"cloud": {
"id": "nebulous-aws-sal-1",
"endpoint": null,
"cloudType": "PUBLIC",
"api": {
"providerName": "aws-ec2"
},
"credential": null,
"cloudConfiguration": {
"nodeGroup": "",
"properties": {}
},
"owner": null,
"state": null,
"diagnostic": null
},
"location": {
"id": "",
"name": "",
"providerId": "",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "France",
"latitude": 47.4705,
"longitude": 6.1918
},
"parent": null,
"state": null,
"owner": null
},
"image": {
"id": "",
"name": "PrEstoCloud-Golden-Image",
"providerId": "",
"operatingSystem": {
"operatingSystemFamily": "UNKNOWN_OS_FAMILY",
"operatingSystemArchitecture": "I386",
"operatingSystemVersion": 0.0
},
"location": {
"id": "",
"name": "",
"providerId": "",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "France",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"hardware": {
"id": "nebulous-aws-sal-1/eu-west-2/t3.medium",
"name": "",
"providerId": "",
"cores": 2,
"ram": 4096,
"disk": 16.0,
"fpga": 0,
"location": {
"id": "",
"name": "",
"providerId": "",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "France",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"pricePerInvocation": 0.0,
"memoryPrice": 0.0,
"nodeId": "",
"environment": null,
"score": 0.786,
"rank": 3
},
{
"id": "39b00b2a-d430-4557-aed0-b7fa758d83db",
"nodeCandidateType": "EDGE",
"jobIdForByon": null,
"jobIdForEdge": null,
"price": 0.0703,
"cloud": {
"id": "nebulous-aws-sal-1",
"endpoint": null,
"cloudType": "PUBLIC",
"api": {
"providerName": "aws-ec2"
},
"credential": null,
"cloudConfiguration": {
"nodeGroup": "",
"properties": {}
},
"owner": null,
"state": null,
"diagnostic": null
},
"location": {
"id": "",
"name": "",
"providerId": "",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "France",
"latitude": 47.4705,
"longitude": 6.1918
},
"parent": null,
"state": null,
"owner": null
},
"image": {
"id": "",
"name": "PrEstoCloud-Golden-Image",
"providerId": "",
"operatingSystem": {
"operatingSystemFamily": "UNKNOWN_OS_FAMILY",
"operatingSystemArchitecture": "I386",
"operatingSystemVersion": 0.0
},
"location": {
"id": "",
"name": "",
"providerId": "",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "France",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"hardware": {
"id": "nebulous-aws-sal-1/eu-west-2/t3.medium",
"name": "",
"providerId": "",
"cores": 2,
"ram": 4096,
"disk": 16.0,
"fpga": 0,
"location": {
"id": "",
"name": "",
"providerId": "",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "France",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"pricePerInvocation": 0.0,
"memoryPrice": 0.0,
"nodeId": "",
"environment": null,
"score": 0.764,
"rank": 4
},
{
"id": "4b64de40-053a-4ab7-bf68-55a82938507d",
"nodeCandidateType": "EDGE",
"jobIdForByon": null,
"jobIdForEdge": null,
"price": 0.0122,
"cloud": {
"id": "nebulous-aws-sal-1",
"endpoint": null,
"cloudType": "PUBLIC",
"api": {
"providerName": "aws-ec2"
},
"credential": null,
"cloudConfiguration": {
"nodeGroup": "",
"properties": {}
},
"owner": null,
"state": null,
"diagnostic": null
},
"location": {
"id": "",
"name": "",
"providerId": "",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "France",
"latitude": 47.4705,
"longitude": 6.1918
},
"parent": null,
"state": null,
"owner": null
},
"image": {
"id": "",
"name": "PrEstoCloud-Golden-Image",
"providerId": "",
"operatingSystem": {
"operatingSystemFamily": "UNKNOWN_OS_FAMILY",
"operatingSystemArchitecture": "I386",
"operatingSystemVersion": 0.0
},
"location": {
"id": "",
"name": "",
"providerId": "",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "France",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"hardware": {
"id": "nebulous-aws-sal-1/eu-west-2/t3.medium",
"name": "",
"providerId": "",
"cores": 2,
"ram": 4096,
"disk": 16.0,
"fpga": 0,
"location": {
"id": "",
"name": "",
"providerId": "",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "France",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"pricePerInvocation": 0.0,
"memoryPrice": 0.0,
"nodeId": "",
"environment": null,
"score": 0.661,
"rank": 5
}
]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -2,16 +2,16 @@
FROM python:3.10
#
WORKDIR /code
WORKDIR /flask_app
#
COPY ./requirements.txt /code/requirements.txt
COPY ./requirements.txt /flask_app/requirements.txt
#
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
RUN pip install --no-cache-dir --upgrade -r /flask_app/requirements.txt
#
COPY ./ /code
COPY ./ /flask_app
#
CMD [ "python3", "-m" , "flask", "run", "--host=0.0.0.0", "--port", "8001"]

232
cfsb-backend/Evaluation.py Normal file
View File

@ -0,0 +1,232 @@
import numpy as np
import json
from scipy.optimize import linprog
from scipy.stats import rankdata
def perform_evaluation(data_table, relative_wr_data, immediate_wr_data, node_names, node_ids):
print("Evaluation begun with perform_evaluation():")
# print("Data Table:", data_table)
# Identify the boolean criteria columns by checking if all values are either 0 or 1
# boolean_criteria = [criterion for criterion in data_table if set(data_table[criterion]) <= {0, 1}]
boolean_criteria = [criterion for criterion in data_table if 'boolean' in criterion.lower()]
# print("Boolean Criteria:", boolean_criteria)
# Check if any boolean variables exist
if boolean_criteria:
print("Boolean variables exist:", boolean_criteria)
# Initialize a dictionary to store the categories for each fog node including a category for 0 true values
# The first category is for the all False and the last for the all True values
fog_node_categories = {i: [] for i in range(len(boolean_criteria) + 1)}
# Iterate over the list of fog nodes to count the '1' (True) values and assign categories
for i in range(len(node_names)):
true_count = sum(data_table[boolean][i] for boolean in boolean_criteria)
fog_node_categories[true_count].append(node_names[i])
# Remove the boolean criteria from the data_table
for boolean in boolean_criteria:
del data_table[boolean]
print(fog_node_categories)
print(data_table)
# Sort categories in descending order of true_count
sorted_categories = sorted(fog_node_categories, reverse=True)
# Create constraint matrices
A_boolean = [] # This is the inequality constraint matrix
b_boolean = [] # This is be the inequality constraint vector
# Create constraints for each category having higher scores than the next lower category
for higher_cat in range(len(sorted_categories) - 1):
for fog_node_high in fog_node_categories[sorted_categories[higher_cat]]:
for fog_node_low in fog_node_categories[sorted_categories[higher_cat + 1]]:
# Create a constraint for each pair of fog nodes (high > low)
high_scores = [-data_table[criterion][node_names.index(fog_node_high)] for criterion in data_table]
low_scores = [-data_table[criterion][node_names.index(fog_node_low)] for criterion in data_table]
constraint = [h - l for h, l in zip(high_scores, low_scores)]
A_boolean.append(constraint)
b_boolean.append(0) # The score difference must be greater than 0
# print("A_boolean:", A_boolean)
# print("b_boolean:", b_boolean)
# Reserve a variable (column) for each criterion
criteria_list = list(data_table.keys())
criterion_index = {criterion: idx for idx, criterion in enumerate(criteria_list)}
# Initialize A and b matrices for inequality constraints, and A_eq and b_eq for equality constraints
A = []
b = []
A_eq = []
b_eq = []
# Create the Constraint of each unit
for row_values in zip(*data_table.values()):
A.append(list(row_values))
b.extend([1] * len(A))
# Add weight restriction constraints to A or A_eq based on the operator
for constraint in relative_wr_data:
lhs_index = criterion_index[constraint['LHSCriterion']]
rhs_index = criterion_index[constraint['RHSCriterion']]
intensity = constraint['Intense']
constraint_row = [0] * len(criteria_list)
if constraint['Operator'] == 1: # >=
constraint_row[lhs_index] = -1
constraint_row[rhs_index] = intensity
A.append(constraint_row)
b.append(0)
elif constraint['Operator'] == -1: # <=
constraint_row[lhs_index] = 1
constraint_row[rhs_index] = -intensity
A.append(constraint_row)
b.append(0)
elif constraint['Operator'] == 0: # equality
constraint_row[lhs_index] = -1
constraint_row[rhs_index] = intensity
A_eq.append(constraint_row)
b_eq.append(0)
# Add immediate constraints to A or A_eq based on the given operator
for constraint in immediate_wr_data:
criterion_idx = criterion_index[constraint['Criterion']]
intensity = constraint['Value']
constraint_row = [0] * len(criteria_list)
if constraint['Operator'] == 1:
constraint_row[criterion_idx] = -1
A.append(constraint_row)
b.append(-intensity)
elif constraint['Operator'] == -1:
constraint_row[criterion_idx] = 1
A.append(constraint_row)
b.append(intensity)
elif constraint['Operator'] == 0:
constraint_row[criterion_idx] = 1
A_eq.append(constraint_row)
b_eq.append(intensity)
# Add constraints coming from the Boolean variables
if boolean_criteria:
A.extend(A_boolean)
b.extend(b_boolean)
# Convert lists to numpy arrays
A = np.array(A, dtype=float)
b = np.array(b, dtype=float)
A_eq = np.array(A_eq, dtype=float) if A_eq else None
b_eq = np.array(b_eq, dtype=float) if b_eq else None
# print(A)
# print(b)
# print(A_eq)
# print(b_eq)
num_of_dmus = len(next(iter(data_table.values())))
Cols_No = len(criteria_list)
DEA_Scores = []
epsilon = 0.00001 # Lower bound of the variables
# Iterating over each DMU to Perform DEA
for dmu_index in range(num_of_dmus):
# Gathering values for the current DMU
dmu_values = [values[dmu_index] for values in data_table.values()]
# Forming the objective function coefficients
c = -np.array(dmu_values)
# Bounds for each variable
bounds = [(epsilon, None) for _ in range(Cols_No)]
# Solve the problem https://pythonguides.com/python-scipy-linprog/
res = linprog(c, A_ub=A, b_ub=b, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method='highs')
# res = linprog(c, A_ub=A, b_ub=b, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method='simplex', callback=None, options={'presolve': True, 'autoscale': True, 'bland': True})
DEA_Scores.append(-res.fun if res.success else None)
# Check if the optimization problem is infeasible
if not res.success:
# Return an appropriate JSON response indicating infeasibility
infeasibility_response = {
"LPstatus": "infeasible",
"message": f"The optimization problem is infeasible with the given weight restrictions. Please review them."
# In case no weight restrictions are used, then the infeasibility is caused due to the data of the criteria. Please make changes on the data.
}
return infeasibility_response
# return {'LPstatus': 'infeasible', 'results': infeasibility_response}
# Round the DEA scores to 2 decimal places
DEA_Scores_Rounded = np.round(DEA_Scores, 2)
#In case of Success then Rank the DEA scores using 'max' method for ties
DEA_Scores_Ranked = len(DEA_Scores_Rounded) - rankdata(DEA_Scores_Rounded, method='max') + 1
# Print the rounded scores and their corresponding ranks
# print("Rounded DEA Scores:", DEA_Scores_Rounded)
# print("Corresponding Ranks:", DEA_Scores_Ranked)
# Create a JSON object with titles, DEA scores, and ranks
results_json = [
{
"Title": node_names[i],
"Id": node_ids[i],
"DEA Score": DEA_Scores[i],
"Rank": int(DEA_Scores_Ranked[i])
}
for i in range(len(node_names))
]
# Return successful results
return {'LPstatus': 'feasible', 'results': results_json}
# return results_json, DEA_Scores, DEA_Scores_Ranked
# # Provided data
# data_table = {'2ad4bd97-d932-42a5-860e-e607a50f161d': [3, 1], 'e917581d-1a62-496b-9d2e-05972fe309e9': [2, 1], '78aca9a8-8c14-4c7d-af34-72cef0da992d': [3, 1], 'd2bddce9-4118-41a9-b528-3bac32b13312': [3, 2]}
# relative_wr_data: [{'LHSCriterion': 'Accountability', 'Operator': 1, 'Intense': 2, 'RHSCriterion': 'Compliance'}]
# immediate_wr_data: [{'Criterion': 'Compliance', 'Operator': 1, 'Value': 0.5}]
#
# node_names = ['2ad4bd97-d932-42a5-860e-e607a50f161d', 'e917581d-1a62-496b-9d2e-05972fe309e9', '78aca9a8-8c14-4c7d-af34-72cef0da992d', 'd2bddce9-4118-41a9-b528-3bac32b13312']
#
# Evaluation_JSON = perform_evaluation(data_table, [], [], node_names)
# pretty_json = json.dumps(Evaluation_JSON)
#
# data_table = {
# 'Provider Track record': [44.3, 37.53, 51.91, 86.56, 28.43],
# 'Agility': [41.8, 53.69, 91.3, 84.72, 58.37],
# 'Reputation': [2, 1, 3, 1, 3],
# 'Brand Name': [71.39, 83.11, 20.72, 91.07, 89.49],
# 'Boolean1': [1, 0, 1, 1, 0],
# 'Boolean2': [0, 1, 0, 1, 0]
# }
#
# relative_wr_data = [
# {'LHSCriterion': 'Reputation', 'Operator': 1, 'Intense': 1.5, 'RHSCriterion': 'Brand Name'},
# {'LHSCriterion': 'Brand Name', 'Operator': -1, 'Intense': 1, 'RHSCriterion': 'Agility'},
# # {'LHSCriterion': 'Brand Name', 'Operator': 0, 'Intense': 0.5, 'RHSCriterion': 'Provider Track record'}
# ]
# immediate_wr_data = [
# {'Criterion': 'Brand Name', 'Operator': 1, 'Value': 0.000000001}
# ]
# # immediate_wr_data = [
# # {'Criterion': 'Reputation', 'Operator': 1, 'Value': 0.2},
# # {'Criterion': 'Reputation', 'Operator': -1, 'Value': 0.5},
# # {'Criterion': 'Agility', 'Operator': -1, 'Value': 0.75},
# # {'Criterion': 'Brand Name', 'Operator': 0, 'Value': 0.3}
# # ]
# #
# # # "immediate_wr_data":[{"Criterion":"Accountability","Operator":1,"Value":0.2}]}
# # # w1>=0.2 and w1<=0.5
# #
# node_names = ['Fog Node 1', 'Fog Node 2', 'Fog Node 3', 'Fog Node 4', 'Fog Node 5']
#
# Evaluation_JSON = perform_evaluation(data_table, relative_wr_data, immediate_wr_data, node_names)
# print("Evaluation_JSON:", Evaluation_JSON)
# Evaluation_JSON = perform_evaluation(data_table, [], [], node_names)
# pretty_json = json.dumps(Evaluation_JSON)
# print(pretty_json)
# print("Evaluation_JSON:", Evaluation_JSON)
# # print("DEA Scores:", DEA_Scores)
# # print("Ranked DEA Scores:", DEA_Scores_Ranked)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,519 @@
import os
# import read_file
import get_data as file
import random
import json
from datetime import datetime
import data_types as attr_data_types
from Evaluation import perform_evaluation
from data_types import get_attr_data_type
import db.db_functions as db_functions
# Boolean_Variables = ['Extend offered network capacity', 'Extend offered processing capacity', 'Extend offered memory capacity',
# 'Fog resources addition', 'Edge resources addition', 'Solid State Drive']
Boolean_Variables = [
"fd871ec6-d953-430d-a354-f13c66fa8bc9", "dcedb196-2c60-4c29-a66d-0e768cfd698a",
"0cf00a53-fd33-4887-bb38-e0bbb04e3f3e", "d95c1dae-1e22-4fb4-9cdc-743e96d0dddc",
"8cd09fe9-c119-4ccd-b651-0f18334dbbe4", "7147995c-8e68-4106-ab24-f0a7673eb5f5", "c1c5b3c9-6178-4d67-a7e3-0285c2bf98ef"]
# Used to transform SAL's response before sending to DataGrid
# This version is designed to read the structure of SAL's response obtained from POSTMAN
def extract_node_candidate_data(json_file_path):
with open(json_file_path, 'r') as file:
json_data = json.load(file)
extracted_data = []
node_ids = []
node_names = []
for item in json_data:
hardware_info = item.get("nodeCandidate", {}).get("hardware", {})
node_data = {
"name": item['name'],
"id": item['id'],
"nodeId": item.get("nodeCandidate", {}).get("nodeId"),
"nodeCandidateType": item.get("nodeCandidate", {}).get("nodeCandidateType"),
"price": item.get("nodeCandidate", {}).get("price", 0.0),
"pricePerInvocation": item.get("nodeCandidate", {}).get("pricePerInvocation", 0.0),
"memoryPrice": item.get("nodeCandidate", {}).get("memoryPrice", 0.0),
"hardware": {
"id": hardware_info.get("id"),
"name": hardware_info.get("name"),
"providerId": hardware_info.get("providerId"),
"cores": hardware_info.get("cores"),
"ram": hardware_info.get("ram") * 1024 if hardware_info.get("ram") else None, # Assuming RAM needs conversion from GB to MB
"disk": hardware_info.get("disk"),
"fpga": hardware_info.get("fpga")
}
}
extracted_data.append(node_data)
node_ids.append(item['id'])
node_names.append(item.get('name', ''))
number_of_nodes = len(json_data)
return extracted_data, number_of_nodes, node_ids, node_names
# Only 50 nodes
def extract_SAL_node_candidate_data(json_string):
try:
json_data = json.loads(json_string) # Ensure json_data is a list of dictionaries
except json.JSONDecodeError as e:
print(f"Error parsing JSON: {e}")
return [], 0, [], []
extracted_data = []
for item in json_data:
# Ensure each item is a dictionary before accessing it
if isinstance(item, dict):
node_data = {
"nodeId": item.get("nodeId", ''),
"id": item.get('id', ''),
"nodeCandidateType": item.get("nodeCandidateType", ''),
"price": item.get("price", 0.0),
"pricePerInvocation": item.get("pricePerInvocation", 0.0),
"memoryPrice": item.get("memoryPrice", 0.0),
"hardware": item.get("hardware", {})
}
extracted_data.append(node_data)
else:
print(f"Unexpected item format: {item}")
number_of_nodes = len(extracted_data)
node_ids = [node['id'] for node in extracted_data]
node_names = [node['id'] for node in extracted_data]
return extracted_data, number_of_nodes, node_ids, node_names
# Used to transform SAL's response all nodes
# def extract_SAL_node_candidate_data(sal_reply):
# # Parse the JSON string in the body of the SAL reply
# body = sal_reply.get('body', '')
# extracted_data = []
#
# try:
# json_data = json.loads(body)
# except json.JSONDecodeError as e:
# print(f"Error parsing JSON: {e}")
# return extracted_data
#
# for item in json_data:
# node_data = {
# "name": item.get('name', ''),
# "name": item.get('id', ''),
# "id": item.get('id', ''),
# "nodeId": item.get("nodeId", ''),
# "nodeCandidateType": item.get("nodeCandidateType", ''),
# "price": item.get("price", 0.0),
# "pricePerInvocation": item.get("pricePerInvocation", 0.0),
# "memoryPrice": item.get("memoryPrice", 0.0),
# "hardware": item.get("hardware", {})
# }
# extracted_data.append(node_data)
#
# number_of_nodes = len(extracted_data)
# node_ids = [node['id'] for node in extracted_data]
# node_names = [node['name'] for node in extracted_data]
# if not node_names:
# node_names = node_ids
#
# return extracted_data, number_of_nodes, node_ids, node_names
# Used to map the criteria from SAL's response with the selected criteria (from frontend)
def create_criteria_mapping(selected_items, extracted_data):
field_mapping = {
# "Cost": "price",
"Operating cost": "price",
"Memory Price": "memoryPrice",
"Number of CPU Cores": "cores",
"Memory Size": "ram",
"Storage Capacity": "disk"
}
return field_mapping
# Used to create the required structure for the Evaluation
def transform_grid_data_to_table(json_data):
grid_data = json_data.get('gridData', [])
relative_wr_data = json_data.get('relativeWRData', [])
immediate_wr_data = json_data.get('immediateWRData', [])
node_names = json_data.get('nodeNames', [])
node_ids = []
# Initialize temporary dictionaries to organize the data
temp_data_table = {}
criteria_titles = []
# Mapping for ordinal values
ordinal_value_mapping = {"High": 3, "Medium": 2, "Low": 1}
boolean_value_mapping = {"True": 1, "False": 0}
for node in grid_data:
node_name = node.get('name')
node_ids.append(node.get('id'))
criteria_data = {}
for criterion in node.get('criteria', []):
title = criterion.get('title')
value = criterion.get('value')
data_type = criterion.get('data_type')
if data_type == 1: # Ordinal data type
numeric_value = ordinal_value_mapping.get(value, None)
if numeric_value is not None:
criteria_data[title] = numeric_value
elif data_type == 5: # Boolean data type
boolean_value = boolean_value_mapping.get(value, None)
if boolean_value is not None:
criteria_data[title] = boolean_value
else: # Numeric and other types
try:
criteria_data[title] = float(value)
except ValueError:
# Handle or log the error for values that can't be converted to float
pass
temp_data_table[node_name] = criteria_data
# Collect all criteria titles
criteria_titles.extend(criteria_data.keys())
# Remove duplicates from criteria titles
criteria_titles = list(set(criteria_titles))
# Initialize the final data table
data_table = {title: [] for title in criteria_titles}
# Populate the final data table
for node_name, criteria_data in temp_data_table.items():
for title, value in criteria_data.items():
data_table[title].append(value)
# Format relative weight restriction data
formatted_relative_wr_data = []
for relative_wr in relative_wr_data:
formatted_relative_wr = {
'LHSCriterion': relative_wr.get('LHSCriterion'),
'Operator': relative_wr.get('Operator'),
'Intense': relative_wr.get('Intense'),
'RHSCriterion': relative_wr.get('RHSCriterion')
}
formatted_relative_wr_data.append(formatted_relative_wr)
# Format immediate weight restriction data
formatted_immediate_wr_data = []
for immediate_wr in immediate_wr_data:
formatted_immediate_wr = {
'Criterion': immediate_wr.get('Criterion'),
'Operator': immediate_wr.get('Operator'),
'Value': immediate_wr.get('Value')
}
formatted_immediate_wr_data.append(formatted_immediate_wr)
return data_table, formatted_relative_wr_data, formatted_immediate_wr_data, node_names, node_ids
# Used to save data for each application from Frontend
def save_app_data(json_data):
# Extract app data and app_id
app_data = json_data[0][0] # Assuming the first element contains the app_id
app_id = app_data['app_id']
# Directory setup
app_dir = f"app_dirs/{app_id}"
if not os.path.exists(app_dir):
os.makedirs(app_dir)
# New data structure with additional attributes
structured_data = {
"app_id": app_id,
"nodeNames": json_data[1],
"selectedCriteria": json_data[2],
"gridData": json_data[3],
"relativeWRData": json_data[4],
"immediateWRData": json_data[5],
"results": json_data[6]
}
# Save the newly structured data to a JSON file
# with open(os.path.join(app_dir, "data.json"), 'w', encoding='utf-8') as f:
# json.dump(structured_data, f, ensure_ascii=False, indent=4)
with open(os.path.join(app_dir, f"{app_id}_data.json"), 'w', encoding='utf-8') as f:
json.dump(structured_data, f, ensure_ascii=False, indent=4)
return app_data
# Used to check if a JSON file for a given application ID exists
def check_json_file_exists(app_id):
app_dir = f"app_dirs/{app_id}" # The directory where the JSON files are stored
file_path = os.path.join(app_dir, f"{app_id}_data.json")
return os.path.exists(file_path)
# Used to read ALL the saved Data for an Application
# def read_application_data(app_id):
# # Directory path and file path
# app_dir = os.path.join("app_dirs", app_id)
# file_path = os.path.join(app_dir, "data.json")
#
# # Check if the file exists
# if os.path.exists(file_path):
# # Read and parse the JSON file
# with open(file_path, 'r', encoding='utf-8') as f:
# data = json.load(f)
# # Extract specific parts of the data
# # selected_criteria = data.get("selectedCriteria", None)
# data_table, relative_wr_data, immediate_wr_data, node_names, node_ids = transform_grid_data_to_table(data)
# else:
# print(f"No data found for application ID {app_id}.") # Return everything empty
# data_table, relative_wr_data, immediate_wr_data, node_names, node_ids = [], [], [], [], []
#
# return data_table, relative_wr_data, immediate_wr_data, node_names, node_ids
# Used to read the saved Data of the Application ONLY for the Nodes returned by SAL
def read_application_data(app_id, node_ids_SAL):
# Directory path and file path
app_dir = os.path.join("app_dirs", app_id)
file_path = os.path.join(app_dir, f"{app_id}_data.json")
# Initialize variables to return in case of no data or an error
data_table, relative_wr_data, immediate_wr_data, node_names, node_ids = [], [], [], [], []
# Check if the file exists
if os.path.exists(file_path):
# Read and parse the JSON file
with open(file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
# Filter gridData based on node_ids_SAL
filtered_grid_data = [node for node in data['gridData'] if node['id'] in node_ids_SAL]
# Create a new JSON structure with filtered gridData
filtered_json_data = {
"gridData": filtered_grid_data,
"relativeWRData": data['relativeWRData'],
"immediateWRData": data['immediateWRData'],
"nodeNames": [node['name'] for node in filtered_grid_data], # Assuming you want to filter nodeNames as well
"nodeIds": node_ids_SAL # Assuming you want to include nodeIds from the filtered list
}
# Call transform_grid_data_to_table with the new filtered JSON data
data_table, relative_wr_data, immediate_wr_data, node_names, node_ids = transform_grid_data_to_table(filtered_json_data)
else:
print(f"No data found for application ID {app_id}.")
return data_table, relative_wr_data, immediate_wr_data, node_names
#Used to create data table from SAL's response in app_side
def create_data_table(selected_criteria, extracted_data, field_mapping):
# Initialize the data table with lists for each criterion
data_table = {criterion: [] for criterion in selected_criteria}
# Loop over each node in the extracted data
for node in extracted_data:
# For each selected criterion, retrieve the corresponding value from the node's data
for criterion in selected_criteria:
# Determine the field name using the mapping, defaulting to the criterion name itself
field_name = field_mapping.get(criterion, criterion)
value = None # Default value if field is not found
# Special case for hardware attributes
if 'hardware' in node and field_name in node['hardware']:
value = node['hardware'][field_name]
elif field_name in node:
value = node[field_name]
# Replace zero value with 0.00001
if value == 0:
# value = 0.00001
value = 10
data_table[criterion].append(value)
return data_table
# Used to Append "Score" and "Rank" for each node in SAL's response JSON
# def append_evaluation_results(SALs_JSON_filename, raw_evaluation_results):
# # Load the JSON content from the file
# with open(SALs_JSON_filename, 'r') as file:
# SALs_JSON = json.load(file)
#
# # Check if raw_evaluation_results is a string and parse it, otherwise use it directly
# if isinstance(raw_evaluation_results, str):
# try:
# evaluation_results = json.loads(raw_evaluation_results)
# except json.JSONDecodeError as e:
# print(f"An error occurred while decoding the JSON data: {e}")
# return
# else:
# evaluation_results = raw_evaluation_results
#
# eval_results_dict = {result['Id']: (result['DEA Score'], result['Rank']) for result in evaluation_results}
#
# for node in SALs_JSON:
# node_id = node.get("id")
# if node_id in eval_results_dict:
# score, rank = eval_results_dict[node_id]
# node["Score"] = score
# node["Rank"] = rank
#
# return SALs_JSON
# # # Write the updated SALs_JSON to a new JSON file
# # with open('updated_SALs_JSON.json', 'w') as file:
# # json.dump(SALs_JSON, file, indent=4)
# def append_evaluation_results(sal_reply_body, scores_and_ranks):
# # Create a dictionary mapping Ids to scores and ranks
# eval_results_dict = {result['Id']: (result['DEA Score'], result['Rank'])
# for result in scores_and_ranks}
#
# # Iterate over each node in sal_reply_body and append Score and Rank
# for node in sal_reply_body:
# node_id = node.get('id') # Assuming the ID is directly under the node
# if node_id in eval_results_dict:
# score, rank = eval_results_dict[node_id]
# node["Score"] = score
# node["Rank"] = rank
#
# return sal_reply_body
# def append_evaluation_results(sal_reply_body, scores_and_ranks):
# # Check if sal_reply_body is a string and convert it to a Python object
# if isinstance(sal_reply_body, str):
# sal_reply_body = json.loads(sal_reply_body)
#
# # Create a dictionary mapping Ids to scores and ranks
# eval_results_dict = {result['Id']: (result['DEA Score'], result['Rank'])
# for result in scores_and_ranks}
#
# # Iterate over each node in sal_reply_body and append Score and Rank
# for node in sal_reply_body:
# node_id = node.get('id') # Assuming the ID is directly under the node
# if node_id in eval_results_dict:
# score, rank = eval_results_dict[node_id]
# node["score"] = score
# node["rank"] = rank
#
# return sal_reply_body
# Used to parse Patini's JSON
def parse_device_info_from_file(file_path):
with open(file_path, 'r') as file:
json_data = json.load(file)
device_names = []
device_info = {
'id': json_data['_id'],
'name': json_data['name'], # Save the device name
'deviceInfo': json_data['deviceInfo'],
'creationDate': json_data['creationDate'],
'lastUpdateDate': json_data['lastUpdateDate'],
'status': json_data['status'],
'metrics': {
'cpu': json_data['metrics']['metrics']['cpu'],
'uptime': json_data['metrics']['metrics']['uptime'],
'disk': json_data['metrics']['metrics']['disk'],
'ram': json_data['metrics']['metrics']['ram']
}
}
# Example of converting and handling ISODate strings, adjust accordingly
device_info['creationDate'] = datetime.fromisoformat(device_info['creationDate'].replace("ISODate('", "").replace("')", ""))
device_info['lastUpdateDate'] = datetime.fromisoformat(device_info['lastUpdateDate'].replace("ISODate('", "").replace("')", ""))
device_info['creationDate'] = device_info['creationDate'].isoformat()
device_info['lastUpdateDate'] = device_info['lastUpdateDate'].isoformat()
# Update the global device_names list
device_names.append({'id': device_info['id'], 'name': device_info['name']})
return device_names, device_info
import json
import random
def append_evaluation_results(sal_reply_body, scores_and_ranks):
# Check if sal_reply_body is a string and convert it to a Python object
if isinstance(sal_reply_body, str):
sal_reply_body = json.loads(sal_reply_body)
if scores_and_ranks:
# Create a dictionary mapping Ids to scores and ranks
eval_results_dict = {result['Id']: (result['DEA Score'], result['Rank'])
for result in scores_and_ranks}
# Iterate over each node in sal_reply_body and append Score and Rank
for node in sal_reply_body:
node_id = node.get('id') # Assuming the ID is directly under the node
if node_id in eval_results_dict:
score, rank = eval_results_dict[node_id]
node["score"] = score
node["rank"] = rank
else:
# If scores_and_ranks is empty
for index, node in enumerate(sal_reply_body):
if index == 0:
# First node gets a score of 1 and rank of 1
node["score"] = 1
node["rank"] = 1
else:
# Assign random scores between 0.33 and 0.93 to the rest
node["score"] = random.uniform(0.33, 0.93)
# Sort nodes by score in descending order to calculate ranks
sorted_nodes = sorted(sal_reply_body[1:], key=lambda x: x["score"], reverse=True)
# Assign ranks based on sorted order, starting from 2 since the first node is ranked 1
for rank, node in enumerate(sorted_nodes, start=2):
node["rank"] = rank
# Combine the first node with the rest
sal_reply_body = [sal_reply_body[0]] + sorted_nodes
return sal_reply_body
# Example usage
# extracted_data, NUMBER_OF_FOG_NODES, node_names = extract_node_candidate_data('dummy_data_node_candidates.json')
# print(NUMBER_OF_FOG_NODES)
# print(node_names)
# app_id = 'd535cf554ea66fbebfc415ac837a5828'
# data_table, relative_wr_data, immediate_wr_data, node_names, node_ids = read_app_specific_data(app_id)
#
# print("Node Names:", node_names)
# print("data_table:", data_table)
# print("Relative WR Data:", relative_wr_data)
# print("Immediate WR Data:", immediate_wr_data)
#
# evaluation_results = perform_evaluation(data_table, relative_wr_data, immediate_wr_data, node_names, node_ids)
# print("evaluation_results:", evaluation_results)
#
# # Extracting the results and saving them into a variable
# ScoresAndRanks = evaluation_results['results']
# print("ScoresAndRanks:", ScoresAndRanks)
# append_evaluation_results('SAL_Response_11EdgeDevs.json', ScoresAndRanks)

381
cfsb-backend/activemq.py Normal file
View File

@ -0,0 +1,381 @@
# ActiveMQ communication logic
import sys
import threading
import json
import time
sys.path.insert(0,'../exn')
import logging
from dotenv import load_dotenv
load_dotenv()
from proton import Message
from exn import core
from exn.connector import EXN
from exn.core.consumer import Consumer
from exn.core.synced_publisher import SyncedPublisher
from exn.core.publisher import Publisher
from exn.core.context import Context
from exn.core.handler import Handler
from exn.handler.connector_handler import ConnectorHandler
from User_Functions import *
import uuid
# logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# logging.getLogger('exn.connector').setLevel(logging.CRITICAL)
class SyncedHandler(Handler):
def on_message(self, key, address, body, message: Message, context=None):
# logging.info(f"[SyncedHandler] Received {key} => {address}: {body}")
# logging.info("on_message in SyncedHandler is executed")
# logging.info(f"[body] {body}")
# Triggered by OPTIMIZER, Get app id, correlation id and filters
# if address == "topic://eu.nebulouscloud.cfsb.get_node_candidates":
if key == "OPT-triggering":
# logging.info("Entered in OPT-triggering'")
# Save the correlation_id (We do not have it from the app_side)
uuid.uuid4().hex.encode("utf-8") # for Correlation id
# Optimizer_correlation_id = '88334290cad34ad9b21eb468a9f8ff11' # dummy correlation_id
correlation_id_optimizer = message.correlation_id
# logging.info(f"Optimizer_correlation_id {message.correlation_id}")
print("Optimizer Correlation Id: ", correlation_id_optimizer)
# application_id_optimizer = message.properties.application # can be taken also from message.annotations.application
application_id_optimizer = message.subject
# application_id_optimizer = 'd535cf554ea66fbebfc415ac837a5828' #dummy application_id_optimizer
print("Application Id: ", application_id_optimizer)
try:
# Read the Message Sent from Optimizer
opt_message_data = body
print("Whole Message Sent from Optimizer:", opt_message_data)
# Extract 'body' from opt_message_data
body_sent_from_optimizer = opt_message_data.get('body', {})
# 100 Nodes
# body_sent_from_optimizer = [
# {
# "type": "NodeTypeRequirement",
# "nodeTypes": ["IAAS"],
# "jobIdForByon": "dummy-app-id",
# "jobIdForEDGE": "dummy-app-id"
# }
# ]
# 58 Nodes
# body_sent_from_optimizer = [
# {
# "type": "NodeTypeRequirement",
# "nodeTypes": ["IAAS"],
# "jobIdForByon": "dummy-app-id",
# "jobIdForEDGE": "dummy-app-id"
# },
# {
# "type": "AttributeRequirement",
# "requirementClass": "hardware",
# "requirementAttribute": "cores",
# "requirementOperator": "EQ",
# "value": "2"
# },
# {
# "type": "AttributeRequirement",
# "requirementClass": "hardware",
# "requirementAttribute": "ram",
# "requirementOperator": "EQ",
# "value": "4096"
# }
# ]
# "jobIdForByon": "null",
# "jobIdForEDGE": "null"
# body_sent_from_optimizer =[
# {
# "type": "NodeTypeRequirement",
# "nodeTypes": ["IAAS"]
# },
# {
# "type": "AttributeRequirement",
# "requirementClass": "image",
# "requirementAttribute": "operatingSystem.family",
# "requirementOperator": "IN","value":"UBUNTU"},
# {
# "type":"AttributeRequirement",
# "requirementClass":"hardware",
# "requirementAttribute":"ram",
# "requirementOperator":"GEQ",
# "value":"4096"
# },
# {"type":"AttributeRequirement","requirementClass":"hardware","requirementAttribute":"cores",
# "requirementOperator":"GEQ","value":"4"}
# ]
# body_sent_from_optimizer = [
# {
# "type": "NodeTypeRequirement",
# "nodeTypes": ["IAAS"]
# }
# ]
# "nodeTypes": ["EDGE"]
# "nodeTypes": ["IAAS", "PAAS", "FAAS", "BYON", "EDGE", "SIMULATION"]
# "jobIdForEDGE": "FCRnewLight0"
# "jobIdForByon":"dummy-app-id",
# "jobIdForEDGE":"dummy-app-id"
# body_sent_from_optimizer = [
# {
# "type": "NodeTypeRequirement",
# "nodeTypes": ["IAAS"]
# },
# {
# "type": "AttributeRequirement",
# "requirementClass": "hardware",
# "requirementAttribute": "cores",
# "requirementOperator": "EQ",
# "value": "2"
# },
# {
# "type": "AttributeRequirement",
# "requirementClass": "hardware",
# "requirementAttribute": "ram",
# "requirementOperator": "EQ",
# "value": "4096"
# }
# ]
# body_sent_from_optimizer =[
# {
# "type": "NodeTypeRequirement",
# "nodeTypes": ["IAAS"],
# "jobIdForByon": "dummy-app-id",
# "jobIdForEDGE": "dummy-app-id"
# },
# {
# "type": "AttributeRequirement",
# "requirementClass": "hardware",
# "requirementAttribute": "cores",
# "requirementOperator": "EQ",
# "value": "2"
# },
# {
# "type": "AttributeRequirement",
# "requirementClass": "hardware",
# "requirementAttribute": "ram",
# "requirementOperator": "EQ",
# "value": "4096"
# }
# ]
# logging.info(body_sent_from_optimizer)
# print("Extracted body from Optimizer Message:", body_sent_from_optimizer)
## Prepare message to be send to SAL
# Convert the body data to a JSON string
# body_json_string = json.dumps(body_sent_from_optimizer)
body_json_string = body_sent_from_optimizer
RequestToSal = { # Dictionary
"metaData": {"user": "admin"}, # key [String "metaData"] value [dictionary]
"body": body_json_string # key [String "body"] value [JSON String]
}
# logging.info("RequestToSal: %s", RequestToSal)
print("RequestToSal:", RequestToSal)
# print("Is RequestToSal a valid dictionary:", isinstance(RequestToSal, dict))
# print("Is the 'body' string in RequestToSal a valid JSON string:", is_json(RequestToSal["body"]))
## Request the node candidates from SAL
sal_reply = context.publishers['SAL-GET'].send_sync(RequestToSal)
## Process SAL's Reply
# sal_reply_body = sal_reply.get('body')
sal_body = sal_reply.get('body') # Get the 'body' as a JSON string
# try:
# # Parse the JSON string to a Python object
# nodes_data = json.loads(sal_body)
# total_nodes = len(nodes_data) # Get the total number of nodes
#
# # Check if more than 51 nodes exist
# if total_nodes > 58:
# print("More than 58 nodes exist. Only the first 51 nodes will be processed.")
# # Filter to only include the first 51 nodes
# sal_reply_body = nodes_data[:60]
# else:
# print(f"Total {total_nodes} nodes found. Processing all nodes.")
# sal_reply_body = sal_reply.get('body')
#
# except json.JSONDecodeError as e:
# print(f"Error parsing JSON: {e}")
# filename = 'SAL_Response_10EdgeDevs.json'
# with open(filename, 'r') as file:
# sal_reply_body = json.load(file)
# print("SAL's Reply from JSON File:", sal_reply_body)
try:
# Parse the JSON string to a Python object
nodes_data = json.loads(sal_body)
total_nodes = len(nodes_data) # Get the total number of nodes
# Check if more than 58 nodes exist
if total_nodes > 58:
print("More than 58 nodes exist. Only the first 51 nodes will be processed.")
# Filter to only include the first 51 nodes and convert back to JSON string
sal_reply_body = json.dumps(nodes_data[:15])
else:
print(f"Total {total_nodes} nodes found. Processing all nodes.")
# Keep sal_reply_body as is since it's already a JSON string
sal_reply_body = sal_body
except json.JSONDecodeError as e:
print(f"Error parsing JSON: {e}")
sal_reply_body = "[]" # Default to an empty JSON array as a string in case of error
if sal_reply_body: # Check whether SAL's reply body is empty
# logging.info(f"Whole reply Received from SAL: {sal_reply}")
# print("SAL reply Body:", sal_reply_body)
# Search for application_id, Read JSON and create data to pass to Evaluation
if check_json_file_exists(application_id_optimizer): # Application JSON exist in DB
print(f"JSON file for application ID {application_id_optimizer} exists.")
node_ids = extract_SAL_node_candidate_data(sal_reply)[2] # 0,1,2nd Position returns the function
# node_ids = ['8a7482868df473cc018df47d8ea60003', '8a7482868df473cc018df47d8fc70005', '8a7482868df473cc018df47d90e70007', '8a7482868df473cc018df47d92090009', '8a7482868df473cc018df47d9326000b', '8a7482868df473cc018df47d9445000d', '8a7482868df473cc018df47d957f000f', '8a7482868df473cc018df47d96a50011', '8a7482868df473cc018df47d97c70013', '8a7482868df473cc018df47d98e30015']
# print("node_ids_SAL:", node_ids_SAL)
# Check if there is any difference in available nodes between saved data in DB and SAL's reply
data_table, relative_wr_data, immediate_wr_data, node_names = read_application_data(application_id_optimizer, node_ids)
if not node_names:
node_names = node_ids
print("data_table filtered from DB:", data_table)
print("node_ids filtered from DB:", node_ids)
print("node_names filtered from DB:", node_names)
# I need to use the most updated data for nodes sent from SAL,
# I can modify the function to retrieve only WR info but there is a problem if other criteria are used
# Maybe I have to use the new data only for the criteria with data coming from SAL and the saved ones for the
# rest criteria
# In case a new node sent from SAL which I have not data saved, then do not consider it if also other crieria
# exist rather than the ones
else: # Application JSON does not exist in DB
print(f"JSON file for application ID {application_id_optimizer} does not exist.")
# Read data from SAL's response by calling the function extract_node_candidate_data()
# extracted_data, number_of_nodes, node_ids, node_names = extract_node_candidate_data('SAL_Response_11EdgeDevs.json')
extracted_data, number_of_nodes, node_ids, node_names = extract_SAL_node_candidate_data(sal_reply_body)
# print("extracted_data:", extracted_data)
print("node_ids:", node_ids)
# Use the create_criteria_mapping() to get the criteria mappings
# selected_criteria = ["Operating cost", "Memory Price", "Number of CPU Cores", "Memory Size", "Storage Capacity"]
selected_criteria = ["Number of CPU Cores", "Memory Size"]
field_mapping = create_criteria_mapping(selected_criteria, extracted_data)
# Create data_table:
data_table = create_data_table(selected_criteria, extracted_data, field_mapping)
relative_wr_data = []
immediate_wr_data = []
print("created_data_table:", data_table)
# Check the number of nodes before Evaluation
print("There are " + str(len(node_ids)) + " elements in node_ids")
## Run evaluation
evaluation_results = perform_evaluation(data_table, relative_wr_data, immediate_wr_data, node_names, node_ids)
# print("Evaluation Results:", evaluation_results)
## Extract and save the results
# ScoresAndRanks = evaluation_results['results']
ScoresAndRanks = evaluation_results.get('results', [])
print("Scores and Ranks:", ScoresAndRanks)
# Append the Score and Rank of each node to SAL's Response
SAL_and_Scores_Body = append_evaluation_results(sal_reply_body, ScoresAndRanks)
# SAL_and_Scores_Body = append_evaluation_results('SAL_Response_11EdgeDevs.json', ScoresAndRanks)
# print("SAL_and_Scores_Body:", SAL_and_Scores_Body)
## Prepare message to be sent to OPTIMIZER
# CFSBResponse = read_dummy_response_data_toOpt('CFSB_Body_Response.json') # Data and Scores for 5 Nodes
CFSBResponse = {
"metaData": {"user": "admin"},
"body": SAL_and_Scores_Body
}
print("CFSBResponse:", CFSBResponse)
formatted_json = json.dumps(CFSBResponse, indent=4)
# Writing the formatted JSON to a file named test.json
with open('CFSBResponse.json', 'w') as file:
file.write(formatted_json)
print("Formatted JSON has been saved to CFSBResponse.json")
else: # Then SAL's reply body is empty send an empty body to Optimizer
print("No Body in reply from SAL!")
# Send [] to Optimizer
CFSBResponse = {
"metaData": {"user": "admin"},
"body": {}
}
## Send message to OPTIMIZER
context.get_publisher('SendToOPT').send(CFSBResponse, application_id_optimizer, properties={'correlation_id': correlation_id_optimizer}, raw=True)
except json.JSONDecodeError as e:
logging.error(f"Failed to parse message body from Optimizer as JSON: {e}")
class Bootstrap(ConnectorHandler):
context = None
def ready(self, context: Context):
self.context = context
def start_exn_connector_in_background():
def run_connector():
# connector_handler = Bootstrap() # Initialize the connector handler
# eu.nebulouscloud.exn.sal.nodecandidate.*
addressSAL_GET = 'eu.nebulouscloud.exn.sal.nodecandidate.get'
#addressSAL_GET_REPLY = 'eu.nebulouscloud.exn.sal.nodecandidate.get.reply'
addressOPTtriggering = 'eu.nebulouscloud.cfsb.get_node_candidates'
addressSendToOPT = 'eu.nebulouscloud.cfsb.get_node_candidates.reply'
connector = EXN('ui', url="localhost", port=5672, username="admin", password="admin",
handler=Bootstrap(),
publishers=[
SyncedPublisher('SAL-GET', addressSAL_GET, True, True),
core.publisher.Publisher('SendToOPT', addressSendToOPT, True, True)
],
consumers=[
# Consumer('SAL-GET-REPLY', addressSAL_GET, handler=SyncedHandler(), topic=True, fqdn=True),
Consumer('OPT-triggering', addressOPTtriggering, handler=SyncedHandler(), topic=True, fqdn=True)
])
connector.start()
# # Start the EXN connector in a separate thread
thread = threading.Thread(target=run_connector)
thread.daemon = True # Daemon threads will shut down immediately when the program exits
thread.start()
# Used to read dummy response and send to Optimizer using JSON
# I have already sent to Optimizer using this function
def read_dummy_response_data_toOpt(file_path):
with open(file_path, 'r') as file:
data = json.load(file)
# Encapsulating the data within the "body" structure
encapsulated_data = {
"metaData": {"user": "admin"},
"body": data
}
return encapsulated_data
def is_json(myjson):
try:
json_object = json.loads(myjson)
except ValueError as e:
return False
except TypeError as e: # includes simplejson.decoder.JSONDecodeError
return False
return True

View File

@ -1,183 +1,11 @@
from flask import Flask, request, jsonify, render_template
from flask_cors import CORS, cross_origin
# import read_file
import get_data as file
import random
import json
import data_types as attr_data_types
from DEA import perform_evaluation
from data_types import get_attr_data_type
from app_factory import create_app
from activemq import start_exn_connector_in_background
from activemqOLD import start_exn_connector_in_background1
from app_factory import create_app # Import your Flask app factory
app = Flask(__name__)
CORS(app) # This enables CORS for all routes
# CORS(app, resources={r"/api/*": {"origins": "http://localhost:8080"}})
# Store evaluation results globally
evaluation_results_global = {}
criteria_titles = []
# Global variable for the number of rows
NUMBER_OF_FOG_NODES = 7
def create_fog_node_titles(NUMBER_OF_FOG_NODES):
return [f"Fog Node {i+1}" for i in range(NUMBER_OF_FOG_NODES)]
FOG_NODES_TITLES = create_fog_node_titles(NUMBER_OF_FOG_NODES)
# List of items with Ordinal Data
Ordinal_Variables = ['attr-reputation', 'attr-assurance']
NoData_Variables = ['attr-security', 'attr-performance-capacity', 'attr-performance-suitability']
Cont_Variables = ['attr-performance', 'attr-financial', 'attr-performance-capacity-memory',
'attr-performance-capacity-memory-speed']
# TODO boolean vars random choice generate
#Bool_Variables = []
@app.route('/get_hierarchical_category_list')
def get_hierarchical_category_list():
data = file.get_level_1_items()
# TODO order by something
return jsonify(data)
# Receives the Selected Criteria and Generates data
@app.route('/process_selected_items', methods=['POST'])
def process_selected_items():
try:
data = request.json
selected_items = data.get('selectedItems', [])
global criteria_titles
criteria_titles = [file.get_subject_data(file.SMI_prefix + item)["title"] for item in selected_items]
# Generate random values for each selected item
grid_data = {}
for item in selected_items:
item_data = {}
item_data["data_type"] = get_attr_data_type(item)
if item in Ordinal_Variables:
# grid_data[item] = [random.choice(["High", "Medium", "Low"]) for _ in range(NUMBER_OF_FOG_NODES)]
item_data["data_values"] = [random.choice(["High", "Medium", "Low"]) for _ in
range(NUMBER_OF_FOG_NODES)]
item_data_dict = file.get_subject_data(file.SMI_prefix + item)
item_data["title"] = item_data_dict["title"]
elif item in NoData_Variables:
# Leave blank for this items
item_data["data_values"] = ['' for _ in range(NUMBER_OF_FOG_NODES)]
item_data_dict = file.get_subject_data(file.SMI_prefix + item)
item_data["title"] = item_data_dict["title"]
elif item in Cont_Variables:
# grid_data[item] = [round(random.uniform(50.5, 312.3), 2) for _ in range(NUMBER_OF_FOG_NODES)]
item_data["data_values"] = [round(random.uniform(50.5, 312.3), 2) for _ in range(NUMBER_OF_FOG_NODES)]
item_data_dict = file.get_subject_data(file.SMI_prefix + item)
item_data["title"] = item_data_dict["title"]
else:
# Default data generation for other items
# grid_data[item] = [round(random.uniform(1, 100), 2) for _ in range(NUMBER_OF_FOG_NODES)]
item_data["data_values"] = [round(random.uniform(1, 100), 2) for _ in range(NUMBER_OF_FOG_NODES)]
item_data_dict = file.get_subject_data(file.SMI_prefix + item)
item_data["title"] = item_data_dict["title"]
grid_data[item] = item_data
return jsonify({'success': True, 'gridData': grid_data})
except Exception as e:
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/show_selected_items/<items>')
@cross_origin()
def show_selected_items(items):
return render_template('selected_items.html', items=items.split(','))
@app.route('/get-criteria-titles', methods=['GET'])
def get_criteria_titles():
return jsonify(criteria_titles)
@app.route('/get-fog-nodes-titles', methods=['GET'])
def get_fog_nodes_titles():
return jsonify(FOG_NODES_TITLES)
# # Process the Grid Data and the WR Data
# @app.route('/process-evaluation-data', methods=['POST'])
# def process_evaluation_data():
# global evaluation_results_global
# try:
# data = request.get_json()
# data_table, wr_data = transform_grid_data_to_table(data)
# print(data_table)
# print(wr_data)
# evaluation_results_global = perform_evaluation(data_table, wr_data,FOG_NODES_TITLES)
# return jsonify({'status': 'success', 'message': 'Evaluation completed successfully'})
# except Exception as e:
# app.logger.error(f"Error processing evaluation data: {str(e)}")
# return jsonify({'status': 'error', 'message': str(e)}), 500
@app.route('/process-evaluation-data', methods=['POST'])
def process_evaluation_data():
global evaluation_results_global
try:
# Log the incoming request data
request_data = request.get_data(as_text=True)
app.logger.info(f"Received data: {request_data}")
data = request.get_json()
if data is None:
raise ValueError("Received data is not in JSON format or 'Content-Type' header is not set to 'application/json'")
app.logger.info(f"Parsed JSON data: {data}")
data_table, wr_data = transform_grid_data_to_table(data)
app.logger.info(f"Data table: {data_table}, WR data: {wr_data}")
evaluation_results_global = perform_evaluation(data_table, wr_data, FOG_NODES_TITLES)
return jsonify({'status': 'success', 'message': 'Evaluation completed successfully'})
except Exception as e:
error_message = str(e)
app.logger.error(f"Error processing evaluation data: {error_message}")
return jsonify({'status': 'error', 'message': error_message}), 500
def transform_grid_data_to_table(json_data):
grid_data = json_data.get('gridData', {}).get('gridData', {})
wr_data = json_data.get('wrData', [])
# if not wr_data:
# # return a default value
# wr_data = default_wr_data()
data_table = {}
row_count = None
# Mapping for ordinal values
ordinal_value_mapping = {"High": 3, "Medium": 2, "Low": 1}
boolean_value_mapping = {"True": 2, "False": 1}
for key, value in grid_data.items():
title = value.get('title')
data_values = value.get('data_values', [])
# Replace ordinal values with their numeric counterparts
numeric_data_values = [ordinal_value_mapping.get(val, val) for val in data_values]
# Initialize row_count if not set
if row_count is None:
row_count = len(numeric_data_values)
if len(numeric_data_values) != row_count:
raise ValueError(f"Inconsistent row count for {title}")
data_table[title] = numeric_data_values
return data_table, wr_data
# Endpoint to transfer the results to Results.vue
@app.route('/get-evaluation-results', methods=['GET'])
def get_evaluation_results():
return jsonify(evaluation_results_global)
app = create_app()
# Start the EXN connector in the background
start_exn_connector_in_background()
if __name__ == '__main__':
app.run(debug=True)
app.run(debug=True)

View File

@ -0,0 +1,336 @@
{
"app_id": "d535cf554ea66fbebfc415ac837a5828",
"nodeNames": [
"test0",
"AWS-Raspberry-Madrid",
"test2",
"test3",
"test4",
"test5",
"test6",
"test7",
"test8",
"test9",
"test10"
],
"selectedCriteria": [
{
"name": "attr-accountability",
"type": 1,
"title": "Accountability"
},
{
"name": "attr-agility",
"type": 1,
"title": "Agility"
},
{
"name": "attr-assurance",
"type": 1,
"title": "Assurance"
}
],
"gridData": [
{
"name": "test0",
"id": "8a7482868df473cc018df47d8d7d0001",
"criteria": [
{
"title": "Accountability",
"value": "High",
"data_type": 1
},
{
"title": "Agility",
"value": "Low",
"data_type": 1
},
{
"title": "Assurance",
"value": "Low",
"data_type": 1
}
]
},
{
"name": "AWS-Raspberry-Madrid",
"id": "8a7482868df473cc018df47d8ea60003",
"criteria": [
{
"title": "Accountability",
"value": "High",
"data_type": 1
},
{
"title": "Agility",
"value": "Low",
"data_type": 1
},
{
"title": "Assurance",
"value": "Low",
"data_type": 1
}
]
},
{
"name": "test2",
"id": "8a7482868df473cc018df47d8fc70005",
"criteria": [
{
"title": "Accountability",
"value": "High",
"data_type": 1
},
{
"title": "Agility",
"value": "Medium",
"data_type": 1
},
{
"title": "Assurance",
"value": "High",
"data_type": 1
}
]
},
{
"name": "test3",
"id": "8a7482868df473cc018df47d90e70007",
"criteria": [
{
"title": "Accountability",
"value": "Medium",
"data_type": 1
},
{
"title": "Agility",
"value": "Medium",
"data_type": 1
},
{
"title": "Assurance",
"value": "Low",
"data_type": 1
}
]
},
{
"name": "test4",
"id": "8a7482868df473cc018df47d92090009",
"criteria": [
{
"title": "Accountability",
"value": "High",
"data_type": 1
},
{
"title": "Agility",
"value": "Medium",
"data_type": 1
},
{
"title": "Assurance",
"value": "Medium",
"data_type": 1
}
]
},
{
"name": "test5",
"id": "8a7482868df473cc018df47d9326000b",
"criteria": [
{
"title": "Accountability",
"value": "Low",
"data_type": 1
},
{
"title": "Agility",
"value": "Low",
"data_type": 1
},
{
"title": "Assurance",
"value": "Low",
"data_type": 1
}
]
},
{
"name": "test6",
"id": "8a7482868df473cc018df47d9445000d",
"criteria": [
{
"title": "Accountability",
"value": "Low",
"data_type": 1
},
{
"title": "Agility",
"value": "Low",
"data_type": 1
},
{
"title": "Assurance",
"value": "Medium",
"data_type": 1
}
]
},
{
"name": "test7",
"id": "8a7482868df473cc018df47d957f000f",
"criteria": [
{
"title": "Accountability",
"value": "High",
"data_type": 1
},
{
"title": "Agility",
"value": "Medium",
"data_type": 1
},
{
"title": "Assurance",
"value": "High",
"data_type": 1
}
]
},
{
"name": "test8",
"id": "8a7482868df473cc018df47d96a50011",
"criteria": [
{
"title": "Accountability",
"value": "Medium",
"data_type": 1
},
{
"title": "Agility",
"value": "Low",
"data_type": 1
},
{
"title": "Assurance",
"value": "Low",
"data_type": 1
}
]
},
{
"name": "test9",
"id": "8a7482868df473cc018df47d97c70013",
"criteria": [
{
"title": "Accountability",
"value": "Low",
"data_type": 1
},
{
"title": "Agility",
"value": "Medium",
"data_type": 1
},
{
"title": "Assurance",
"value": "Medium",
"data_type": 1
}
]
},
{
"name": "test10",
"id": "8a7482868df473cc018df47d98e30015",
"criteria": [
{
"title": "Accountability",
"value": "High",
"data_type": 1
},
{
"title": "Agility",
"value": "Low",
"data_type": 1
},
{
"title": "Assurance",
"value": "High",
"data_type": 1
}
]
}
],
"relativeWRData": [],
"immediateWRData": [],
"results": [
{
"DEA Score": 0.9999699999999998,
"Id": "8a7482868df473cc018df47d8d7d0001",
"Rank": 1,
"Title": "test0"
},
{
"DEA Score": 0.9999699999999998,
"Id": "8a7482868df473cc018df47d8ea60003",
"Rank": 1,
"Title": "AWS-Raspberry-Madrid"
},
{
"DEA Score": 1,
"Id": "8a7482868df473cc018df47d8fc70005",
"Rank": 1,
"Title": "test2"
},
{
"DEA Score": 0.99997,
"Id": "8a7482868df473cc018df47d90e70007",
"Rank": 1,
"Title": "test3"
},
{
"DEA Score": 0.99999,
"Id": "8a7482868df473cc018df47d92090009",
"Rank": 1,
"Title": "test4"
},
{
"DEA Score": 0.49999000000000005,
"Id": "8a7482868df473cc018df47d9326000b",
"Rank": 11,
"Title": "test5"
},
{
"DEA Score": 0.6666533333333332,
"Id": "8a7482868df473cc018df47d9445000d",
"Rank": 9,
"Title": "test6"
},
{
"DEA Score": 1,
"Id": "8a7482868df473cc018df47d957f000f",
"Rank": 1,
"Title": "test7"
},
{
"DEA Score": 0.6666533333333332,
"Id": "8a7482868df473cc018df47d96a50011",
"Rank": 9,
"Title": "test8"
},
{
"DEA Score": 0.99997,
"Id": "8a7482868df473cc018df47d97c70013",
"Rank": 1,
"Title": "test9"
},
{
"DEA Score": 0.9999899999999998,
"Id": "8a7482868df473cc018df47d98e30015",
"Rank": 1,
"Title": "test10"
}
]
}

View File

@ -0,0 +1,349 @@
{
"app_id": "d535cf554ea66fbebfc415ac837a5828",
"nodeNames": [
"test0",
"AWS-Raspberry-Madrid",
"test2",
"test3",
"test4",
"test5",
"test6",
"test7",
"test8",
"test9",
"test10"
],
"selectedCriteria": [
{
"name": "attr-accountability",
"type": 1,
"title": "Accountability"
},
{
"name": "attr-agility",
"type": 1,
"title": "Agility"
},
{
"name": "attr-assurance",
"type": 1,
"title": "Assurance"
}
],
"gridData": [
{
"name": "test0",
"id": "8a7482868df473cc018df47d8d7d0001",
"criteria": [
{
"title": "Accountability",
"value": "High",
"data_type": 1
},
{
"title": "Agility",
"value": "High",
"data_type": 1
},
{
"title": "Assurance",
"value": "Medium",
"data_type": 1
}
]
},
{
"name": "AWS-Raspberry-Madrid",
"id": "8a7482868df473cc018df47d8ea60003",
"criteria": [
{
"title": "Accountability",
"value": "Low",
"data_type": 1
},
{
"title": "Agility",
"value": "High",
"data_type": 1
},
{
"title": "Assurance",
"value": "Low",
"data_type": 1
}
]
},
{
"name": "test2",
"id": "8a7482868df473cc018df47d8fc70005",
"criteria": [
{
"title": "Accountability",
"value": "High",
"data_type": 1
},
{
"title": "Agility",
"value": "Medium",
"data_type": 1
},
{
"title": "Assurance",
"value": "Medium",
"data_type": 1
}
]
},
{
"name": "test3",
"id": "8a7482868df473cc018df47d90e70007",
"criteria": [
{
"title": "Accountability",
"value": "Medium",
"data_type": 1
},
{
"title": "Agility",
"value": "Medium",
"data_type": 1
},
{
"title": "Assurance",
"value": "High",
"data_type": 1
}
]
},
{
"name": "test4",
"id": "8a7482868df473cc018df47d92090009",
"criteria": [
{
"title": "Accountability",
"value": "Medium",
"data_type": 1
},
{
"title": "Agility",
"value": "Medium",
"data_type": 1
},
{
"title": "Assurance",
"value": "Medium",
"data_type": 1
}
]
},
{
"name": "test5",
"id": "8a7482868df473cc018df47d9326000b",
"criteria": [
{
"title": "Accountability",
"value": "High",
"data_type": 1
},
{
"title": "Agility",
"value": "Low",
"data_type": 1
},
{
"title": "Assurance",
"value": "High",
"data_type": 1
}
]
},
{
"name": "test6",
"id": "8a7482868df473cc018df47d9445000d",
"criteria": [
{
"title": "Accountability",
"value": "High",
"data_type": 1
},
{
"title": "Agility",
"value": "Medium",
"data_type": 1
},
{
"title": "Assurance",
"value": "Medium",
"data_type": 1
}
]
},
{
"name": "test7",
"id": "8a7482868df473cc018df47d957f000f",
"criteria": [
{
"title": "Accountability",
"value": "Low",
"data_type": 1
},
{
"title": "Agility",
"value": "Low",
"data_type": 1
},
{
"title": "Assurance",
"value": "High",
"data_type": 1
}
]
},
{
"name": "test8",
"id": "8a7482868df473cc018df47d96a50011",
"criteria": [
{
"title": "Accountability",
"value": "Low",
"data_type": 1
},
{
"title": "Agility",
"value": "Low",
"data_type": 1
},
{
"title": "Assurance",
"value": "Medium",
"data_type": 1
}
]
},
{
"name": "test9",
"id": "8a7482868df473cc018df47d97c70013",
"criteria": [
{
"title": "Accountability",
"value": "Medium",
"data_type": 1
},
{
"title": "Agility",
"value": "Medium",
"data_type": 1
},
{
"title": "Assurance",
"value": "Medium",
"data_type": 1
}
]
},
{
"name": "test10",
"id": "8a7482868df473cc018df47d98e30015",
"criteria": [
{
"title": "Accountability",
"value": "High",
"data_type": 1
},
{
"title": "Agility",
"value": "Medium",
"data_type": 1
},
{
"title": "Assurance",
"value": "Medium",
"data_type": 1
}
]
}
],
"relativeWRData": [
{
"LHSCriterion": "Accountability",
"Operator": 1,
"Intense": 0.5,
"RHSCriterion": "Agility"
}
],
"immediateWRData": [
{
"Criterion": "Accountability",
"Operator": 1,
"Value": 0.1
}
],
"results": [
{
"DEA Score": 1,
"Id": "8a7482868df473cc018df47d8d7d0001",
"Rank": 1,
"Title": "test0"
},
{
"DEA Score": 0.7777722222222222,
"Id": "8a7482868df473cc018df47d8ea60003",
"Rank": 10,
"Title": "AWS-Raspberry-Madrid"
},
{
"DEA Score": 0.99999,
"Id": "8a7482868df473cc018df47d8fc70005",
"Rank": 1,
"Title": "test2"
},
{
"DEA Score": 1,
"Id": "8a7482868df473cc018df47d90e70007",
"Rank": 1,
"Title": "test3"
},
{
"DEA Score": 0.8,
"Id": "8a7482868df473cc018df47d92090009",
"Rank": 7,
"Title": "test4"
},
{
"DEA Score": 1,
"Id": "8a7482868df473cc018df47d9326000b",
"Rank": 1,
"Title": "test5"
},
{
"DEA Score": 0.99999,
"Id": "8a7482868df473cc018df47d9445000d",
"Rank": 1,
"Title": "test6"
},
{
"DEA Score": 0.7999999999999999,
"Id": "8a7482868df473cc018df47d957f000f",
"Rank": 7,
"Title": "test7"
},
{
"DEA Score": 0.6,
"Id": "8a7482868df473cc018df47d96a50011",
"Rank": 11,
"Title": "test8"
},
{
"DEA Score": 0.8,
"Id": "8a7482868df473cc018df47d97c70013",
"Rank": 7,
"Title": "test9"
},
{
"DEA Score": 0.99999,
"Id": "8a7482868df473cc018df47d98e30015",
"Rank": 1,
"Title": "test10"
}
]
}

View File

@ -0,0 +1,11 @@
# Created for application setup
from flask import Flask, request, jsonify, render_template
from flask_cors import CORS, cross_origin
from routes import main_routes # Adjusted to absolute import
def create_app():
app = Flask(__name__)
# CORS(app)
CORS(app, supports_credentials=True) # Enable CORS and allow credentials
app.register_blueprint(main_routes)
return app

View File

@ -1151,7 +1151,7 @@ attr:c1c5b3c9-6178-4d67-a7e3-0285c2bf98ef
dcterms:created "2023-11-23T14:10:20.077Z"^^xsd:dateTime ;
dcterms:identifier "c1c5b3c9-6178-4d67-a7e3-0285c2bf98ef" ;
dcterms:modified "2023-11-23T14:10:20.077Z"^^xsd:dateTime ;
dcterms:title "Solid State Drive " ;
dcterms:title "Solid State Drive" ;
skos:broader attr:attr-performance-capacity .
attr:ffebba96-d53d-44c9-be75-3258de80ed70

20
cfsb-backend/config.py Normal file
View File

@ -0,0 +1,20 @@
class Config:
"""Base configuration."""
# General config
SECRET_KEY = '12345'
# Other configurations
class DevelopmentConfig(Config):
"""Development configuration."""
DEBUG = True
# Development-specific configurations
class TestingConfig(Config):
"""Testing configuration."""
TESTING = True
# Testing-specific configurations
class ProductionConfig(Config):
"""Production configuration."""
DEBUG = False
# Production-specific configurations

View File

@ -9,7 +9,7 @@ Linguistic_bad = 7
# 1 for LOW, 2 for MEDIUM, etc.
linguistic_low_choices = ["Low", "Medium", "High"]
linguistic_very_low_choices = ["Very low", "LOW", "MEDIUM", "HIGH", "VERY HIGH", "PERFECT"]
linguistic_bad_choices = ["BAD", "OK", "GOOD"]
linguistic_bad_choices = ["Bad", "OK", "Good"]
boolean_choices = ["True", "False"]
linguistic_low_attributes = [
@ -28,6 +28,7 @@ linguistic_low_attributes = [
"attr-usability-understandability",
"attr-usability-reusability",
"d503cabe-17d7-4b9b-9231-a8b211f3ce11",
'attr-reputation',
"attr-reputation-contracting-experience",
"attr-reputation-ease-of-doing-business",
"attr-reputation-provider-ethicality",
@ -46,42 +47,73 @@ linguistic_bad_attributes = [
]
boolean_attributes = [
"fd871ec6-d953-430d-a354-f13c66fa8bc9",
"dcedb196-2c60-4c29-a66d-0e768cfd698a",
"0cf00a53-fd33-4887-bb38-e0bbb04e3f3e",
"d95c1dae-1e22-4fb4-9cdc-743e96d0dddc",
"8cd09fe9-c119-4ccd-b651-0f18334dbbe4",
"7147995c-8e68-4106-ab24-f0a7673eb5f5",
"c1c5b3c9-6178-4d67-a7e3-0285c2bf98ef"
"fd871ec6-d953-430d-a354-f13c66fa8bc9", "dcedb196-2c60-4c29-a66d-0e768cfd698a",
"0cf00a53-fd33-4887-bb38-e0bbb04e3f3e", "d95c1dae-1e22-4fb4-9cdc-743e96d0dddc", "8cd09fe9-c119-4ccd-b651-0f18334dbbe4",
"7147995c-8e68-4106-ab24-f0a7673eb5f5", "c1c5b3c9-6178-4d67-a7e3-0285c2bf98ef",
"16030149-6fd5-4066-ac80-8da605dc964f", # Desired Move Support
"attr-assurance-serviceability-free-support", # Free Support
"c1c5b3c9-6178-4d67-a7e3-0285c2bf98ef", # Solid State Drive
"attr-security-access-control-privilege-management-rbac",
"bced9c2a-7234-44f8-9f51-ccd9da39f15e", # Attribute based Access Control supported(ABAC)
"attr-security-data-privacy-loss-audit-trailing", "attr-security-proactive-threat-vulnerability-management-firewall-utm",
"attr-security-management-encrypted-storage", "attr-security-management-transport-security-guarantees",
"5759cddd-ec82-4273-88c4-5f55981469d0" # Process Transparency
]
time_in_seconds_attributes = [
"attr-assurance-reliability",
]
percentage_attributes = [
"attr-assurance-availability",
"attr-reputation-provider-business-stability",
"55a60ec3-55f7-48db-83bc-be2875c5210c"
]
# Resources stability
percentage_attributes = ["55a60ec3-55f7-48db-83bc-be2875c5210c",
"attr-assurance-availability", "attr-reputation-provider-business-stability"]
# 11+3 (Secs) = 14. Uptime, CPU MFLOPs, GPU MFLOPS, Bandwidth, Upload Speed, Download Speed,
# Proximity to Data Source, Proximity to POI
float_attributes = ["49c8d03f-5ceb-4994-8257-cd319190a62a", "3b414a80-83b4-472c-8166-715d4c9d7508",
"b945c916-2873-4528-bc4a-e3b0c9d603d9", "876397bf-599f-40a7-91ec-93cca7c392b4",
"ea2e12db-b52a-42f4-86cb-f654cfe09a92", "e8180e25-d58c-49d3-862e-cbb18dd1820e",
"9f5706e3-08bd-412d-8d59-04f464e867a8", "b9f4f982-3809-4eac-831c-37288c046133",
"attr-reputation-sustainability-energy-consumption", "attr-reputation-sustainability-carbon-footprint",
"attr-assurance-recoverability-recovery-time", "attr-financial-cost-operation-cost",
"attr-performance-capacity-clock-speed"]
# Geographic Coverage, Total number of available Fog resources, Total number of available Edge devices,
# Number of GPU Cores, Storage, Network Throughput
integer_attributes = ["8968013b-e2af-487b-9140-25e6f857204c", "2da82ab2-8ae9-4aa2-a842-0d3f846c4b47",
"203ecada-25fd-469c-92f6-dd84f2c7cba6", "7a77f809-8aba-4550-9f0c-8b619183b1cd",
"47a2c5e9-f74d-4ff3-98fe-4c66b98eaaef", "6e648d7b-c09b-4c69-8c70-5030b2d21eed",
"attr-financial-cost-data-inbound", "attr-security-management-encryption-type",
"attr-financial-cost-data-outbound", "attr-performance-capacity-num-of-cores",
"attr-performance-capacity-memory-speed", "attr-performance-capacity-memory",
"attr-performance-capacity-storage-capacity", "attr-performance-capacity-storage-throughput",
"attr-agility-elasticity-time"]
# Features
unordered_set_attributes = ["7104ee2b-52ba-4655-991f-845a1397d850", "attr-assurance-serviceability-type-of-support",
"attr-security-access-control-privilege-management-authentication-schemes"]
def get_attr_data_type(attribute):
data = {}
print("get type for " + attribute)
# print("get type for " + attribute)
if attribute in linguistic_low_attributes:
data["type"] = 1
data["values"] = linguistic_low_choices
elif attribute in linguistic_very_low_attributes:
data["type"] = 1
data["values"] = linguistic_low_choices
elif attribute in boolean_attributes:
data["type"] = 5
data["values"] = boolean_choices
elif attribute in linguistic_bad_attributes:
data["type"] = 7
data["type"] = 1 # Instead of 7
data["values"] = linguistic_low_choices
# elif attribute in boolean_attributes:
# data["type"] = 5
# data["values"] = boolean_choices
else:
data["type"] = 0 # all other cases
print(data)
elif attribute in float_attributes or attribute in percentage_attributes or attribute in integer_attributes:
data["type"] = 2 # float, seconds or percentage or Integer
# elif attribute in integer_attributes:
# data["type"] = 6 # Integer
else: # all other cases Ordinal
data["type"] = 1
data["values"] = linguistic_low_choices
#print(data)
return data

View File

@ -0,0 +1,82 @@
import psycopg2
def db_open():
# Connect to the database
conn = psycopg2.connect(database="fog_broker", user="dbuser", password="pass123", host="localhost", port="5432")
# create a cursor
cur = conn.cursor()
return conn, cur
def db_close(conn, cur):
cur.close()
conn.close()
def insert_user(data):
username = data['username']
password = data['password']
uuid = data['uuid']
query = "INSERT INTO users (username, password, uuid) VALUES (%s, %s, %s)"
conn, cur = db_open()
result = cur.execute(query, (username, password, uuid))
conn.commit()
db_close(conn, cur)
return result
def get_user(data):
username = data['username']
password = data['password']
query = "SELECT * FROM users WHERE username = %s and password = %s"
conn, cur = db_open()
cur.execute(query, (username, password))
# Fetch the data
result = cur.fetchall()
db_close(conn, cur)
return result
def get_user_apps(data):
uuid = data['uuid']
query = "SELECT * FROM apps WHERE user_uuid = '"+uuid+"'"
conn, cur = db_open()
cur.execute(query)
# Fetch the data
result = cur.fetchall()
db_close(conn, cur)
return result
def insert_app(data):
title = data['title']
description = data['description']
uuid = data['uuid']
app_id = data['app_id']
query = "INSERT INTO apps (title, description, user_uuid, app_id) VALUES (%s, %s, %s, %s)"
conn, cur = db_open()
result = cur.execute(query, (title, description, uuid, app_id))
conn.commit()
db_close(conn, cur)
return result
def get_app(data):
app_id = data['app_id']
query = "SELECT * FROM apps WHERE app_id = '" + app_id + "'"
conn, cur = db_open()
cur.execute(query)
# Fetch the data
result = cur.fetchall()
db_close(conn, cur)
return result

17
cfsb-backend/db/db_script.sql Executable file
View File

@ -0,0 +1,17 @@
CREATE TABLE IF NOT EXISTS users (
id SERIAL PRIMARY KEY,
uuid VARCHAR(255) NOT NULL,
username VARCHAR(255) NOT NULL,
password VARCHAR(255) NOT NULL
);
CREATE TABLE IF NOT EXISTS apps (
id SERIAL PRIMARY KEY,
user_uuid VARCHAR(255) NOT NULL,
title VARCHAR(255) NOT NULL,
description TEXT,
app_id VARCHAR(255) NOT NULL
);
INSERT INTO users (username, password, uuid) VALUES ('greg', '12345', 'e3ff4006-be5f-4e00-bbe1-e49a88b2541a');
INSERT INTO apps (user_uuid, title, description, app_id) VALUES ('e3ff4006-be5f-4e00-bbe1-e49a88b2541a', 'Demo App', 'Demo App description', '2f7cc63df4b1da7532756f44345758da');

View File

@ -0,0 +1,24 @@
version: '3.0'
services:
web:
build:
context: .
dockerfile: Dockerfile
ports:
- "8001:8001"
depends_on:
- db
db:
image: postgres:16
ports:
- "5432:5432"
environment:
- POSTGRES_USER=dbuser
- POSTGRES_PASSWORD=pass123
- POSTGRES_DB=fog_broker
volumes:
- postgres_data:/var/lib/postgresql/data/
- ./db/db_script.sql:/docker-entrypoint-initdb.d/db_script.sql
volumes:
postgres_data:

View File

@ -0,0 +1,396 @@
{
"nodes":[
{
"id": "2ad4bd97-d932-42a5-860e-e607a50f161d",
"nodeCandidateType": "EDGE",
"jobIdForByon": null,
"jobIdForEdge": null,
"price": 0.0702,
"cloud": {
"id": "nebulous-aws-sal-1",
"endpoint": null,
"cloudType": "PUBLIC",
"api": {
"providerName": "aws-ec2"
},
"credential": null,
"cloudConfiguration": {
"nodeGroup": "",
"properties": {}
},
"owner": null,
"state": null,
"diagnostic": null
},
"location": {
"id": "nebulous-aws-sal-2/eu-west-1",
"name": "eu-west-1",
"providerId": "eu-west-3",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "Paris",
"latitude": 43.5334,
"longitude": 2.2543
},
"parent": null,
"state": null,
"owner": null
},
"image": {
"id": "nebulous-aws-sal-1/eu-west-3/ami-0dcef913833a35715",
"name": "PrEstoCloud-Golden-Image-191205-6",
"providerId": "ami-0dcef913833a35715",
"operatingSystem": {
"operatingSystemFamily": "UNKNOWN_OS_FAMILY",
"operatingSystemArchitecture": "I386",
"operatingSystemVersion": 0.0
},
"location": {
"id": "nebulous-aws-sal-1/eu-west-3",
"name": "eu-west-3",
"providerId": "eu-west-3",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "Paris",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"hardware": {
"id": "nebulous-aws-sal-1/eu-west-2/t3.medium",
"name": "t3.medium",
"providerId": "t3.medium",
"cores": 2,
"ram": 4096,
"disk": 32.0,
"fpga": 0,
"location": {
"id": "nebulous-aws-sal-1/eu-west-3",
"name": "eu-west-3",
"providerId": "eu-west-3",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "Paris",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"pricePerInvocation": 0.0,
"memoryPrice": 0.0,
"nodeId": "2c9280838d55ecbc018d55ef4ade0015",
"environment": null
},
{
"id": "e917581d-1a62-496b-9d2e-05972fe309e9",
"nodeCandidateType": "EDGE",
"jobIdForByon": null,
"jobIdForEdge": null,
"price": 0.0438,
"cloud": {
"id": "nebulous-aws-sal-1",
"endpoint": null,
"cloudType": "PUBLIC",
"api": {
"providerName": "aws-ec2"
},
"credential": null,
"cloudConfiguration": {
"nodeGroup": "",
"properties": {}
},
"owner": null,
"state": null,
"diagnostic": null
},
"location": {
"id": "nebulous-aws-sal-2/eu-west-1",
"name": "eu-west-1",
"providerId": "eu-west-3",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "Paris",
"latitude": 43.5334,
"longitude": 2.2543
},
"parent": null,
"state": null,
"owner": null
},
"image": {
"id": "nebulous-aws-sal-1/eu-west-3/ami-0dcef913833a35715",
"name": "PrEstoCloud-Golden-Image-191205-6",
"providerId": "ami-0dcef913833a35715",
"operatingSystem": {
"operatingSystemFamily": "UNKNOWN_OS_FAMILY",
"operatingSystemArchitecture": "I386",
"operatingSystemVersion": 0.0
},
"location": {
"id": "nebulous-aws-sal-1/eu-west-3",
"name": "eu-west-3",
"providerId": "eu-west-3",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "Paris",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"hardware": {
"id": "nebulous-aws-sal-1/eu-west-2/t3.medium",
"name": "t3.medium",
"providerId": "t3.medium",
"cores": 4,
"ram": 4096,
"disk": 64.0,
"fpga": 0,
"location": {
"id": "nebulous-aws-sal-1/eu-west-3",
"name": "eu-west-3",
"providerId": "eu-west-3",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "Paris",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"pricePerInvocation": 0.0,
"memoryPrice": 0.0,
"nodeId": "2c9280838d55ecbc018d55ef4ade0015",
"environment": null
},
{
"id": "78aca9a8-8c14-4c7d-af34-72cef0da992d",
"nodeCandidateType": "EDGE",
"jobIdForByon": null,
"jobIdForEdge": null,
"price": 0.0381,
"cloud": {
"id": "nebulous-aws-sal-1",
"endpoint": null,
"cloudType": "PUBLIC",
"api": {
"providerName": "aws-ec2"
},
"credential": null,
"cloudConfiguration": {
"nodeGroup": "",
"properties": {}
},
"owner": null,
"state": null,
"diagnostic": null
},
"location": {
"id": "nebulous-aws-sal-2/eu-west-1",
"name": "eu-west-1",
"providerId": "eu-west-3",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "Paris",
"latitude": 43.5334,
"longitude": 2.2543
},
"parent": null,
"state": null,
"owner": null
},
"image": {
"id": "nebulous-aws-sal-1/eu-west-3/ami-0dcef913833a35715",
"name": "PrEstoCloud-Golden-Image-191205-6",
"providerId": "ami-0dcef913833a35715",
"operatingSystem": {
"operatingSystemFamily": "UNKNOWN_OS_FAMILY",
"operatingSystemArchitecture": "I386",
"operatingSystemVersion": 0.0
},
"location": {
"id": "nebulous-aws-sal-1/eu-west-3",
"name": "eu-west-3",
"providerId": "eu-west-3",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "Paris",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"hardware": {
"id": "nebulous-aws-sal-1/eu-west-2/t3.medium",
"name": "t3.medium",
"providerId": "t3.medium",
"cores": 8,
"ram": 4096,
"disk": 128.0,
"fpga": 0,
"location": {
"id": "nebulous-aws-sal-1/eu-west-3",
"name": "eu-west-3",
"providerId": "eu-west-3",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "Paris",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"pricePerInvocation": 0.0,
"memoryPrice": 0.0,
"nodeId": "2c9280838d55ecbc018d55ef4ade0015",
"environment": null
},
{
"id": "d2bddce9-4118-41a9-b528-3bac32b13312",
"nodeCandidateType": "EDGE",
"jobIdForByon": null,
"jobIdForEdge": null,
"price": 0.0255,
"cloud": {
"id": "nebulous-aws-sal-1",
"endpoint": null,
"cloudType": "PUBLIC",
"api": {
"providerName": "aws-ec2"
},
"credential": null,
"cloudConfiguration": {
"nodeGroup": "",
"properties": {}
},
"owner": null,
"state": null,
"diagnostic": null
},
"location": {
"id": "nebulous-aws-sal-2/eu-west-1",
"name": "eu-west-1",
"providerId": "eu-west-3",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "Paris",
"latitude": 43.5334,
"longitude": 2.2543
},
"parent": null,
"state": null,
"owner": null
},
"image": {
"id": "nebulous-aws-sal-1/eu-west-3/ami-0dcef913833a35715",
"name": "PrEstoCloud-Golden-Image-191205-6",
"providerId": "ami-0dcef913833a35715",
"operatingSystem": {
"operatingSystemFamily": "UNKNOWN_OS_FAMILY",
"operatingSystemArchitecture": "I386",
"operatingSystemVersion": 0.0
},
"location": {
"id": "nebulous-aws-sal-1/eu-west-3",
"name": "eu-west-3",
"providerId": "eu-west-3",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "Paris",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"hardware": {
"id": "nebulous-aws-sal-1/eu-west-2/t3.medium",
"name": "t3.medium",
"providerId": "t3.medium",
"cores": 2,
"ram": 4096,
"disk": 32.0,
"fpga": 0,
"location": {
"id": "nebulous-aws-sal-1/eu-west-3",
"name": "eu-west-3",
"providerId": "eu-west-3",
"locationScope": "REGION",
"isAssignable": true,
"geoLocation": {
"city": "Paris",
"country": "Paris",
"latitude": 48.8607,
"longitude": 2.3281
},
"parent": null,
"state": null,
"owner": null
},
"state": null,
"owner": null
},
"pricePerInvocation": 0.0,
"memoryPrice": 0.0,
"nodeId": "2c9280838d55ecbc018d55ef4ade0015",
"environment": null
}
]
}

View File

@ -0,0 +1,80 @@
{
"_id": "b4ce322c-698a-43b9-a889-bf0da2a4dcb9",
"os": "LINUX",
"name": "Test VM #0001",
"owner": "admin",
"ipAddress": "10.10.0.6",
"location": {
"name": "laptop",
"latitude": 12.345,
"longitude": 56.789
},
"username": "ubuntu",
"password": [
"u",
"b",
"u",
"n",
"t",
"u"
],
"publicKey": [],
"deviceInfo": {
"CPU_SOCKETS": "1",
"CPU_CORES": "10",
"CPU_PROCESSORS": "20",
"RAM_TOTAL_KB": "16218480",
"RAM_AVAILABLE_KB": "13366788",
"RAM_FREE_KB": "10943372",
"RAM_USED_KB": "5275108",
"RAM_UTILIZATION": "32.5253",
"DISK_TOTAL_KB": "1055762868",
"DISK_FREE_KB": "976527612",
"DISK_USED_KB": "79235256",
"DISK_UTILIZATION": "7.50502",
"OS_ARCHITECTURE": "x86_64",
"OS_KERNEL": "Linux",
"OS_KERNEL_RELEASE": "5.15.133.1-microsoft-standard-WSL2"
},
"requestId": "eb6441fc-613a-482e-ba94-b16db57ecd36",
"creationDate": "2024-01-15T13:23:40.602Z",
"lastUpdateDate": "2024-01-15T14:32:43.485Z",
"status": "HEALTHY",
"nodeReference": "40ed1989-49ba-4496-a5c5-3d8ca1a18972",
"messages": [],
"statusUpdate": {
"ipAddress": "10.10.0.6",
"clientId": "VM-LINUX-TEST-VM-0001-Test VM #0001-DEFAULT-10.10.0.6-_",
"state": "REGISTERED",
"stateLastUpdate": "2024-01-15T13:23:47.463Z",
"reference": "40ed1989-49ba-4496-a5c5-3d8ca1a18972",
"errors": []
},
"metrics": {
"ipAddress": "10.10.0.6",
"clientId": "VM-LINUX-TEST-VM-0001-Test VM",
"timestamp": "2024-01-15T14:32:33.467Z",
"metrics": {
"count-total-events-failures": 0,
"count-total-events-text": 0,
"tx": 0,
"count-total-events-other": 0,
"count-event-forwards-success": 0,
"count-event-forwards-failure": 0,
"rx": 0,
"count-total-events": 0,
"cpu": 0.6,
"uptime": 10742,
"count-event-local-publish-failure": 0,
"count-total-events-object": 0,
"disk": 2.48262,
"count-event-local-publish-success": 0,
"updatetime": 1705318391,
"currdatetime": 1705329133,
"ram": 23.7719
},
"latestEvents": []
},
"retries": 0,
"_class": "eu.nebulous.resource.discovery.monitor.model.Device"
}

View File

@ -0,0 +1,6 @@
from . import core
from . import handler
from . import settings
from . import connector

View File

@ -0,0 +1,84 @@
import logging
import os
from proton.reactor import Container
from exn.core import state_publisher, schedule_publisher
from exn.core.context import Context
from .core.manager import Manager
from .settings import base
from .handler import connector_handler
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
_logger = logging.getLogger(__name__)
class EXN:
context = None
container = None
def __init__(self, component=None,
handler:connector_handler.ConnectorHandler = None,
publishers=None,
consumers=None,
**kwargs):
# Load .env file
# Validate and set connector
if not component:
_logger.error("Component cannot be empty or None")
raise ValueError("Component cannot be empty or None")
self.component = component
self.url = kwargs.get('url',os.getenv('NEBULOUS_BROKER_URL'))
self.port = kwargs.get('port', os.getenv('NEBULOUS_BROKER_PORT'))
self.username = kwargs.get('username',os.getenv('NEBULOUS_BROKER_USERNAME'))
self.password = kwargs.get('password', os.getenv('NEBULOUS_BROKER_PASSWORD'))
self.handler = handler
# Validate attributes
if not self.url:
_logger.error("URL cannot be empty or None")
raise ValueError("URL cannot be empty or None")
if not self.port:
_logger.error("PORT cannot be empty or None")
raise ValueError("PORT cannot be empty or None")
if not self.username:
_logger.error("USERNAME cannot be empty or None")
raise ValueError("USERNAME cannot be empty or None")
if not self.password:
_logger.error("PASSWORD cannot be empty or None")
raise ValueError("PASSWORD cannot be empty or None")
self.context = Context(base=f"{base.NEBULOUS_BASE_NAME}.{self.component}")
if not publishers:
publishers = []
if not consumers:
consumers = []
compiled_publishers = publishers
if kwargs.get("enable_state",False):
compiled_publishers.append(state_publisher.Publisher())
if kwargs.get("enable_health",False):
compiled_publishers.append(schedule_publisher.Publisher(
base.NEBULOUS_DEFAULT_HEALTH_CHECK_TIMEOUT,
'health',
'health',
topic=True))
for c in consumers:
self.context.register_consumers(c)
for p in compiled_publishers:
self.context.register_publisher(p)
def start(self):
self.context.start(Manager(f"{self.url}:{self.port}"),self.handler)
def stop(self):
self.context.stop()

View File

@ -0,0 +1,10 @@
from . import context
from . import handler
from . import publisher
from . import consumer
from . import state_publisher
from . import schedule_publisher
from . import synced_publisher

View File

@ -0,0 +1,49 @@
import logging
import threading
from proton import Event
from .handler import Handler
from . import link
from proton.handlers import MessagingHandler
_logger = logging.getLogger(__name__)
_logger.setLevel(level=logging.DEBUG)
class Consumer(link.Link, MessagingHandler):
application = None
def __init__(self, key, address, handler: Handler, application=None, topic=False, fqdn=False):
super(Consumer, self).__init__(key, address, topic, fqdn)
self.application = application
self.handler = handler
self.handler._consumer = self
def should_handle(self, event: Event):
should = event.link.name == self._link.name and \
(self.application is None or event.message.subject == self.application)
_logger.debug(f"[{self.key}] checking if link is the same {event.link.name}={self._link.name} "
f" and application {self.application}={event.message.subject} == {should} "
f" and correlation_id={event.message.correlation_id}")
return should
def on_start(self, event: Event) -> None:
_logger.debug(f"[{self.key}] on_start")
def on_message(self, event):
_logger.debug(f"[{self.key}] handling event with address => {event.message.address}")
try:
if self.should_handle(event):
event.delivery.settle()
t = threading.Thread(target=self.handler.on_message, args=[self.key, event.message.address, event.message.body, event.message, self.context])
t.start()
else:
event.delivery.abort()
except Exception as e:
_logger.error(f"Received message: {e}")

View File

@ -0,0 +1,108 @@
import logging
from proton.reactor import Container
from . import link
from .manager import Manager
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
class Context:
base = None
handler = None
publishers = {}
consumers = {}
_manager = None
def __init__(self, base):
self.base = base
def start(self, manager:Manager, handler):
self._manager = manager
def on_ready():
_logger.debug("[context] on_ready" )
for key,publisher in self.publishers.items():
self._manager.start_publisher(self,publisher)
for key,consumer in self.consumers.items():
self._manager.start_consumer(self,consumer)
handler.ready(context=self)
self._manager._on_ready=on_ready
self._manager.start()
def stop(self):
if self._manager is not None and self._manager.started:
for key,publisher in self.publishers:
publisher._link.close()
for key,consumer in self.consumers:
consumer._link.close()
self._manager.close()
def register_publisher(self, publisher):
if publisher.key in self.publishers:
_logger.warning("[context] Trying to register publisher that already exists")
return
_logger.info(f"[context] registering publisher {publisher.key} {publisher.address}" )
self.publishers[publisher.key] = publisher
if self._manager is not None and self._manager.started:
self._manager.start_publisher(self,publisher)
def get_publisher(self, key):
if key in self.publishers:
return self.publishers[key]
return None
def has_publisher(self, key):
return key in self.publishers
def has_consumer(self, key):
return key in self.consumers
def register_consumers(self, consumer):
if consumer.key in self.consumers:
_logger.warning("[context] Trying to register consumer that already exists")
return
self.consumers[consumer.key] = consumer
if self._manager is not None and self._manager.started:
self._manager.start_consumer(self,consumer)
def unregister_consumer(self, key):
if not key in self.consumers:
_logger.warning("[context] Trying to unregister consumer that does not exists")
return
consumer = self.consumers.pop(key)
if self._manager is not None and self._manager.started:
consumer._link.close()
def unregister_publisher(self, key):
if not key in self.consumers:
_logger.warning("[context] Trying to unregister publisher that does not exists")
return
publisher = self.publishers.pop(key)
if self._manager is not None and self._manager.started:
publisher._link.close()
def build_address_from_link(self, link: link.Link):
if link.fqdn:
address = link.address
if link.topic and not link.address.startswith("topic://"):
address = f"topic://{address}"
return address
address = f"{self.base}.{link.address}"
if link.topic:
address = f"topic://{address}"
return address

View File

@ -0,0 +1,11 @@
import logging
from proton import Message
_logger = logging.getLogger(__name__)
class Handler:
def on_message(self, key, address, body, message: Message, context):
_logger.info(f"You should really override this... {key}=>{address}")

View File

@ -0,0 +1,21 @@
from proton import Link as pLink
class Link:
fqdn=False
context=None
def __init__(self, key, address, topic=False, fqdn=False):
super().__init__()
self.key = key
self.address = address
self.topic= topic
self.fqdn= fqdn
self._link = None
def set(self, link:pLink):
# The proton container creates a sender
# so we just use composition instead of extension
self._link = link

View File

@ -0,0 +1,115 @@
import logging
import threading
import time
import types
import proton
from proton import Event, Connection, Session, Message
from proton.handlers import MessagingHandler
from proton.reactor import Container,ReceiverOption,Filter,Selector
from .consumer import Consumer
from .publisher import Publisher
from .handler import Handler
from .synced_publisher import SyncedPublisher
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
class SessionPerConsumer(object):
def session(self, connection: Connection) -> Session:
session = connection.session()
session.open()
return session
class Manager(MessagingHandler):
uri = None
started = False
container = None
connection = None
_on_ready = None
def __init__(self, uri):
super(Manager, self).__init__()
self.uri = uri
def start(self):
_logger.info(f"[manager] starting")
self.container = Container(self)
self.container.run()
def on_start(self, event: Event) -> None:
self.connection = self.container.connect(self.uri,)
self.connection._session_policy = SessionPerConsumer()
def connection_state():
while self.connection.state != 18:
time.sleep(0.05)
self.started = True
_logger.debug(f"[manager] on_start")
if self._on_ready is not None:
self._on_ready()
threading.Thread(target=connection_state).start()
def on_message(self, event: Event) -> None:
_logger.warning(f"[manager] received generic on_message make sure you have set up your handlers"
f" properly ")
def close(self):
_logger.info(f"[manager] closing")
if self.container:
self.container.stop()
if self.connection:
self.connection.close()
def start_publisher(self, context, publisher: Publisher):
address = context.build_address_from_link(publisher)
_logger.info(f"[manager] starting publisher {publisher.key} => {address}")
publisher.set(self.container.create_sender(self.connection, address))
publisher.context = context
if hasattr(publisher, "delay"):
_logger.debug(f"{context.base} registering timer {hasattr(publisher, 'delay')}")
self.container.schedule(publisher.delay, handler=publisher)
if hasattr(publisher, "reply_address"):
_logger.info(f"[manager] starting Synced consumer for {publisher.key} => {publisher.reply_address}")
def on_my_message(self, key, address, body, message: Message, context=None):
_logger.info(f"[{publisher.key}] handler received {key} => {message.correlation_id}")
if publisher.match_correlation_id(message.correlation_id):
_logger.info(f"[{publisher.key}] handler received {key} / matched => response {body} ")
publisher._replied = body
r_handler = Handler()
r_handler.on_message= types.MethodType(on_my_message,r_handler)
self.start_consumer(
context,
Consumer(publisher.key+"-reply",
publisher.reply_address,
handler=r_handler,
topic=publisher.reply_topic,
fqdn=publisher.reply_fqdn
)
)
def start_consumer(self, context, consumer: Consumer):
address = context.build_address_from_link(consumer)
consumer.context = context
if consumer.application:
_logger.info(f"[manager] starting consumer {consumer.key} => {address} and application={consumer.application}")
consumer.set(self.container.create_receiver(
self.connection,
address,
handler=consumer,
options=Selector(u"application = '"+consumer.application+"'"))
)
else:
_logger.info(f"[manager] starting consumer {consumer.key} => {address}")
consumer.set(self.container.create_receiver(self.connection, address, handler=consumer))

View File

@ -0,0 +1,52 @@
import datetime
import logging
from proton import Message,AnnotationDict
from . import link
_logger = logging.getLogger(__name__)
class Publisher(link.Link):
def send(self, body=None, application=None, properties=None, raw=False):
if not body:
body = {}
_logger.info(f"[{self.key}] sending to {self._link.target.address} for application={application} - {body} "
f" properties= {properties}")
msg = self._prepare_message(body,properties=properties, raw=raw)
if application:
msg.subject = application
msg.properties={
'application': application
}
self._link.send(msg)
def _prepare_message(self, body=None, properties=None, raw=False):
send = {}
if not body:
body = {}
if not raw:
send = {"when": datetime.datetime.utcnow().isoformat()}
send.update(body)
msg = Message(
address=self._link.target.address,
body=send
)
if properties:
if 'correlation_id' in properties:
msg.correlation_id=properties['correlation_id']
msg.content_type = 'application/json'
return msg

View File

@ -0,0 +1,24 @@
import logging
from proton.handlers import MessagingHandler
from .publisher import Publisher
_logger = logging.getLogger(__name__)
class Publisher(Publisher, MessagingHandler):
send_next = False
delay = 15
def __init__(self, delay, key, address, application=None, topic=False, fqdn=False):
super(Publisher, self).__init__(key, address, topic,fqdn)
self.delay = delay
self.application = application
def on_timer_task(self, event):
_logger.debug(f"[manager] on_timer_task")
self.send()
event.reactor.schedule(self.delay, self)
def send(self, body=None, application=None):
super(Publisher, self).send(body, self.application)

View File

@ -0,0 +1,45 @@
import datetime
import json
from enum import Enum
from proton import Message
from . import publisher
import logging
_logger = logging.getLogger(__name__)
class States(Enum):
STARTING = "starting"
STARTED = "started"
READY = "ready"
STOPPING = "stopping"
STOPPED = "stopped"
class Publisher(publisher.Publisher):
def __init__(self):
super().__init__("state","state", True)
def _send_message(self, message_type):
self.send({"state": message_type,"message": None})
def starting(self):
self._send_message(States.STARTING.value)
def started(self):
self._send_message(States.STARTED.value)
def ready(self):
self._send_message(States.READY.value)
def stopping(self):
self._send_message(States.STOPPING.value)
def stopped(self):
self._send_message(States.STOPPED.value)
def custom(self, state):
self._send_message(state)

View File

@ -0,0 +1,60 @@
import logging
import threading
import time
from proton.handlers import MessagingHandler
from .publisher import Publisher
import uuid
_logger = logging.getLogger(__name__)
class SyncedPublisher(Publisher):
_replied = None
reply_address = None
correlation_id = None
def __init__(self, key, address, topic=False, fqdn=False, reply_address="reply", timeout=30):
super(Publisher, self).__init__(key, address, topic,fqdn)
self.reply_address=address+"."+reply_address
self.reply_topic = topic
self.reply_fqdn = fqdn
self._timeout = timeout
def send_sync(self, body=None, application=None, properties=None, raw=False):
self.correlation_id=uuid.uuid4().hex
if properties and 'correlation_id' in properties:
self.correlation_id = properties['correlation_id']
self._replied=None
def _wait_for_reply():
super(SyncedPublisher, self).send(body=body, application=application, properties= {'correlation_id':self.correlation_id}, raw=raw)
timeout = self._timeout
while not self._replied:
time.sleep(0.05)
timeout = timeout - 0.05
if timeout < 0:
_logger.warning(f"[synced_publisher] {self._link.target.address} - timeout ({self._timeout}s) waiting for response, consider increasing this" )
break
_logger.debug(f"[synced_publisher] {self._link.target.address} - starting blocking thread" )
t = threading.Thread(target=_wait_for_reply())
t.start()
t.join()
_logger.debug(f"[synced_publisher] {self._link.target.address} - moving on {self._replied}" )
#wait for the reply
ret = self._replied
self._replied = None
return ret
def match_correlation_id(self, correlation_id):
return self.correlation_id is not None and correlation_id is not None and self.correlation_id == correlation_id

View File

@ -0,0 +1,2 @@
from . import connector_handler

View File

@ -0,0 +1,12 @@
import logging
_logger = logging.getLogger(__name__)
class ConnectorHandler:
def ready(self, context):
pass

View File

@ -0,0 +1 @@
from . import base

View File

@ -0,0 +1,2 @@
NEBULOUS_BASE_NAME="eu.nebulouscloud"
NEBULOUS_DEFAULT_HEALTH_CHECK_TIMEOUT=15

View File

@ -0,0 +1,62 @@
import json
import uuid
import random
def generate_dummy_nodes(k):
nodes = []
for _ in range(k):
eu_west = random.randint(1, 3)
city, country = random.choice([("Paris", "France"), ("Lyon", "France"), ("Marseille", "France")])
latitude = round(random.uniform(40.0, 50.0), 4)
longitude = round(random.uniform(2.0, 8.0), 4)
node = {
"id": str(uuid.uuid4()),
"nodeCandidateType": random.choice(["EDGE", "IAAS", "PAAS", "SAAS"]),
"jobIdForByon": None,
"jobIdForEdge": None,
"price": round(random.uniform(0.01, 0.1), 4),
"cloud": {
"id": f"nebulous-aws-sal-{eu_west}",
"endpoint": None,
"cloudType": "PUBLIC",
"api": {"providerName": "aws-ec2"},
"credential": None,
"cloudConfiguration": {"nodeGroup": "", "properties": {}},
"owner": None,
"state": None,
"diagnostic": None
},
"location": {
"id": f"nebulous-aws-sal-{eu_west}/eu-west-{eu_west}",
"name": f"eu-west-{eu_west}",
"providerId": f"eu-west-{eu_west}",
"locationScope": "REGION",
"isAssignable": True,
"geoLocation": {"city": city, "country": country, "latitude": latitude, "longitude": longitude},
"parent": None,
"state": None,
"owner": None
},
# Repeating for 'image' and 'hardware' with appropriate modifications
"score": round(random.uniform(0.7, 1.0), 5),
# 'rank' will be assigned after sorting by score
}
# Additional details for 'image', 'hardware', etc., should follow the same pattern
nodes.append(node)
# Assign ranks after sorting nodes by score
nodes_sorted_by_score = sorted(nodes, key=lambda x: x['score'], reverse=True)
for index, node in enumerate(nodes_sorted_by_score):
node['rank'] = index + 1
return {"nodes": nodes_sorted_by_score}
# Assuming the function is defined as above, here's how you'd call it and use json.dump():
nodes_data = generate_dummy_nodes(1000)
file_path = 'CFSB_Body_Response_1000.json' # Replace with your desired file path
with open(file_path, 'w') as file:
json.dump(nodes_data, file, indent=4)
print(f"Data for 1000 nodes has been saved to {file_path}")

View File

@ -1,4 +1,5 @@
from rdflib import Graph, URIRef
from data_types import get_attr_data_type
# Create a new RDF graph
g = Graph()
@ -36,6 +37,9 @@ def get_level_1_items():
item_dict["description"] = item_data_dict["description"]
item_dict["name"] = attribute
item_dict["children"] = []
criterion_type_values = get_attr_data_type(item_dict["name"])
item_dict["type"] = criterion_type_values['type']
# item_dict["values"] = criterion_type_values['values'] # they do not have all criteria
items_list.append(item_dict)
items_2_list = get_level_2_items(level_1_items_list, items_list)
return items_2_list
@ -59,6 +63,8 @@ def get_level_2_items(level_1_items_list, level_1_items_dict_list):
item_dict["parent"] = object_str
item_dict["name"] = level_2_attribute
item_dict["children"] = []
criterion_type_values = get_attr_data_type(item_dict["name"])
item_dict["type"] = criterion_type_values['type']
items_list.append(item_dict)
items_3_list = get_level_3_items(level_2_items_list, items_list, level_1_items_dict_list)
return items_3_list
@ -82,6 +88,8 @@ def get_level_3_items(level_2_items_list, level_2_items_dict_list, level_1_items
item_dict["parent"] = object_str
item_dict["name"] = level_3_attribute
item_dict["children"] = []
criterion_type_values = get_attr_data_type(item_dict["name"])
item_dict["type"] = criterion_type_values['type']
items_list.append(item_dict)
level_2_children_list = insert_level_2_children(level_1_items_dict_list, level_2_items_dict_list, items_list)
return level_2_children_list
@ -99,24 +107,39 @@ def insert_level_2_children(level_1_items_dict_list, level_2_items_dict_list, le
# level_2_children_list.append(item_dict)
level_2_children_list.append(level_3_item)
# here to append the list at the correct position of level_2_items_dict_list
level_2_item["children"] = level_2_children_list
# Sort the children by their title
level_2_item["children"] = sorted(level_2_children_list, key=lambda x: x['title'])
items_dict_list = insert_level_1_children(level_1_items_dict_list, level_2_items_dict_list)
# return level_2_items_dict_list
return items_dict_list
# def insert_level_1_children(level_1_items_dict_list, level_2_items_dict_list):
# for level_1_item in level_1_items_dict_list:
# level_1_children_list = []
# # print("level_1_item = " + level_1_item["name"])
# for level_2_item in level_2_items_dict_list:
# # print("level_2_item = " + level_2_item["name"])
# if level_2_item["parent"] == level_1_item["name"]:
# # print("Children of " + level_1_item["name"] + " is " + level_2_item["name"])
# level_1_children_list.append(level_2_item)
# # here to append the list at the correct position of level_1_items_dict_list
# level_1_item["children"] = level_1_children_list
# return level_1_items_dict_list
def insert_level_1_children(level_1_items_dict_list, level_2_items_dict_list):
for level_1_item in level_1_items_dict_list:
level_1_children_list = []
# print("level_1_item = " + level_1_item["name"])
for level_2_item in level_2_items_dict_list:
# print("level_2_item = " + level_2_item["name"])
if level_2_item["parent"] == level_1_item["name"]:
# print("Children of " + level_1_item["name"] + " is " + level_2_item["name"])
level_1_children_list.append(level_2_item)
# here to append the list at the correct position of level_1_items_dict_list
level_1_item["children"] = level_1_children_list
return level_1_items_dict_list
# Sort the children by their title
level_1_item["children"] = sorted(level_1_children_list, key=lambda x: x['title'])
# Now sort the level 1 items themselves
sorted_level_1_items_dict_list = sorted(level_1_items_dict_list, key=lambda x: x['title'])
return sorted_level_1_items_dict_list
def get_subject_data(item_subject):

44
cfsb-backend/notes.txt Normal file
View File

@ -0,0 +1,44 @@
https://158.39.75.54/projects/nebulous-collaboration-hub/wiki/accessing-the-demo-activemq-broker
https://openproject.nebulouscloud.eu/projects/nebulous-collaboration-hub/wiki/accessing-the-nebulous-dev-k8s
activemq
terminal στο Documents/projects/koronakos/nebulous_certs
export KUBECONFIG=kubeconfig.yaml
kubectl -n nebulous port-forward service/nebulous-activemq 5672:5672 8161:8161 61616:61616
OPTIMIZER waits for a reply on eu.nebulouscloud.cfsb.get_node_candidates.reply,
the reply body will be the list of node candidates as you got it from SAL, but augmented with ranking information.
The optimiser will use the highest-ranked node candidates.
το app name from message.subject
το παραπάνω αντί για το "application_id": "app_id" στη γραμμη 74
passwords
gregkoron
Olympi@kos789
-------------
SAL
SAL POSTMAN: https://158.39.75.54/projects/nebulous-collaboration-hub/wiki/deployment-manager-sal-1
Port Forwarding to enable POSTMAN for SAL:
kubectl port-forward -n nebulous service/sal 8080:8080
Use this in POSTMAN Request: https://localhost:8080/sal/pagateway/connect
---
kubectl port-forward -n nebulous service/sal1 8080:8080
------If proplems It works with this-----
kubectl -n nebulous get pod | findstr sal
kubectl -n nebulous delete pod sal-all-6b8bbc54cf-hxsj6
-----------
----------FLUX--------------
kubectl scale -n flux-system --replicas=0 deploy/sal-all-flux
kubectl scale -n flux-system --replicas=1 deploy/sal-all-flux
kubectl -n flux-system port-forward service/sal-flux <YOUR_FAV_PORT>:8080
kubectl scale -n flux-system --replicas=0 deploy/sal-all-flux
kubectl scale -n flux-system --replicas=1 deploy/sal-all-flux

View File

@ -1,5 +1,7 @@
blinker==1.7.0
cffi==1.16.0
click==8.1.7
docopt==0.6.2
Flask==3.0.0
Flask-Cors==4.0.0
isodate==0.6.1
@ -7,8 +9,14 @@ itsdangerous==2.1.2
Jinja2==3.1.2
MarkupSafe==2.1.3
numpy==1.26.3
psycopg2-binary==2.9.9
pycparser==2.21
pyparsing==3.1.1
python-dotenv==1.0.1
python-qpid-proton==0.39.0
rdflib==7.0.0
scipy==1.11.4
six==1.16.0
stomp.py==8.1.0
websocket-client==1.7.0
Werkzeug==3.0.1

242
cfsb-backend/routes.py Normal file
View File

@ -0,0 +1,242 @@
from flask import Blueprint, request, jsonify, render_template, session
from User_Functions import *
from API_Functions import *
import data_types as attr_data_types
from Evaluation import perform_evaluation
from data_types import get_attr_data_type
import db.db_functions as db_functions
import os
import time
import activemq
# from activemq import connector_handler
import traceback
main_routes = Blueprint('main', __name__)
# List of items with Ordinal Data
Ordinal_Variables = ['attr-reputation', 'attr-assurance']
NoData_Variables = ['attr-security', 'attr-performance-capacity', 'attr-performance-suitability']
Cont_Variables = ['attr-performance', 'attr-financial', 'attr-performance-capacity-memory',
'attr-performance-capacity-memory-speed']
#Used in HomePage.vue to save app_id and user_id
# @main_routes.route('/save_ids', methods=['POST'])
# def save_ids():
# data = request.json
# app_id = data.get('app_id')
# user_id = data.get('user_id')
# print("user_id:", user_id)
# # Respond back with a success message
# return jsonify({"message": "IDs received successfully.", "app_id": app_id, "user_id": user_id})
#Used in CriteriaSelection.vue
@main_routes.route('/get_hierarchical_category_list')
def get_hierarchical_category_list():
# TODO order by title in every level
items_list = file.get_level_1_items() # Assume this function returns the list correctly
if items_list is not None:
# Return the list as a JSON response
return jsonify(items_list)
else:
# Return an empty list or an error message if items_list is None
return jsonify([]), 404 # or return jsonify({"error": "No items found"}), 404
# Used in DataGrid.vue
@main_routes.route('/process_selected_criteria', methods=['POST'])
def process_selected_criteria():
try:
data = request.json
# Selected Criteria by the User from the List
selected_criteria = data.get('selectedItems', [])
# Extract app_id, user_id
application_id = data.get('app_id') # Take it from local storage from frontend
# application_id = 'd535cf554ea66fbebfc415ac837a5828' #dummy application_id_optimizer
user_id = data.get('user_id') # Take it from local storage from frontend
print("user_id:", user_id)
print("application_id:", application_id)
## Prepare message to be send to SAL
message_for_SAL = [ # User side so ask SAL for every available node
{
"type": "NodeTypeRequirement",
"nodeTypes": ["IAAS", "PAAS", "FAAS", "BYON", "EDGE", "SIMULATION"]
# "jobIdForEDGE": "FCRnewLight0"
}
]
# Convert the body data to a JSON string
body_json_string = json.dumps(message_for_SAL)
RequestToSal = { # Dictionary
"metaData": {"user": "admin"}, # key [String "metaData"] value [dictionary]
"body": body_json_string # key [String "body"] value [JSON String]
}
print("RequestToSal:", RequestToSal)
# print("Is RequestToSal a valid dictionary:", isinstance(RequestToSal, dict))
# print("Is the 'body' string in RequestToSal a valid JSON string:", is_json(RequestToSal["body"]))
## Request the node candidates from SAL
# sal_reply = activemq.context.publishers['SAL-GET'].send_sync(RequestToSal)
## Process SAL's Reply
# extracted_data, number_of_nodes, node_ids, node_names = extract_SAL_node_candidate_data(sal_reply)
# extracted_data, number_of_nodes, node_names = extract_node_candidate_data('dummy_data_node_candidates.json')
extracted_data, number_of_nodes, node_ids, node_names = extract_node_candidate_data('SAL_Response_11EdgeDevs.json')
print("extracted_data:", extracted_data)
# Use the create_criteria_mapping() to get the criteria mappings
field_mapping = create_criteria_mapping(selected_criteria, extracted_data)
grid_data = {name: [] for name in node_names}
# Prepare the data to be sent to DataGrid.vue
# Blank by default for the Selected Criteria not found in mapping
for node_data in extracted_data:
node_name = node_data.get('name') # Using name to match
node_id = node_data.get('id') # Extract the node ID
grid_data[node_name] = {"id": node_id, "criteria": []}
if node_name in grid_data: # Check if node_name exists in grid_data keys
for item in selected_criteria:
criterion_data = {}
criterion_data["data_type"] = get_attr_data_type(item)
item_data_dict = file.get_subject_data(file.SMI_prefix + item)
criterion_data["title"] = item_data_dict["title"]
field_name = field_mapping.get(criterion_data["title"], item)
# Check if the field_name is a direct key or nested inside 'hardware'
if field_name in node_data:
value = node_data[field_name]
elif 'hardware' in node_data and field_name in node_data['hardware']:
value = node_data['hardware'][field_name]
else:
# Generate random or default values for unmapped criteria or missing data
item_data_type_value = criterion_data["data_type"].get('type')
if item_data_type_value == 1:
value = random.choice(["High", "Medium", "Low"])
elif item_data_type_value == 5:
value = random.choice(["True", "False"])
else:
value = round(random.uniform(1, 100), 2)
criterion_data["value"] = value if value != 0 else 0.00001
# grid_data[node_id].append(criterion_data)
# grid_data[node_name].append(criterion_data) # Use node_name as key
grid_data[node_name]["criteria"].append(criterion_data)
# Conversion to list format remains unchanged
# grid_data_with_names = [{'name': name, 'criteria': data} for name, data in grid_data.items()]
grid_data_with_names = [{'name': name, 'id': data["id"], 'criteria': data["criteria"]} for name, data in grid_data.items()]
print("grid_data_with_names:", grid_data_with_names)
# Send the comprehensive grid_data_with_names to the frontend
return jsonify({
'success': True,
'gridData': grid_data_with_names,
'NodeNames': node_names
})
except Exception as e:
print(f"Error processing selected items: {e}")
traceback.print_exc()
return jsonify({'success': False, 'error': str(e)}), 500
# Used in WR.vue
@main_routes.route('/process-evaluation-data', methods=['POST'])
def process_evaluation_data():
try:
data = request.get_json()
if data is None:
raise ValueError("Received data is not in JSON format or 'Content-Type' header is not set to 'application/json'")
print("JSON data:", data)
# Transform grid data to table and get node names directly from the function
data_table, relative_wr_data, immediate_wr_data, node_names, node_ids = transform_grid_data_to_table(data)
# print("data_table:", data_table)
# print("relative_wr_data:", relative_wr_data)
# print("immediate_wr_data:", immediate_wr_data)
# print("node_names:", node_names)
# Run Optimization - Perform evaluation
results = perform_evaluation(data_table, relative_wr_data, immediate_wr_data, node_names, node_ids)
# print(results)
# Return the results
return jsonify({'status': 'success', 'results': results})
except Exception as e:
error_message = str(e)
return jsonify({'status': 'error', 'message': error_message}), 500
#Creates a new user
@main_routes.route('/user', methods=['POST'])
def create_user():
data = request.json
result = db_functions.insert_user(data)
return jsonify(result)
# Used in front end to authenticate the user
@main_routes.route('/login', methods=['POST'])
def select_user():
data = request.json
result = db_functions.get_user(data)
return jsonify(result)
# Returns the user's apps
@main_routes.route('/apps', methods=['POST'])
def select_user_apps():
data = request.json
result = db_functions.get_user_apps(data)
return jsonify(result)
# Creates a new app in db
@main_routes.route('/app/create', methods=['POST'])
def create_app():
data = request.json
result = db_functions.insert_app(data)
return jsonify(result)
# Checks if app exists or inserts it in db
@main_routes.route('/app', methods=['POST'])
def check_for_app():
data = request.json
result = db_functions.get_app(data)
if not result:
data['title'] = "Demo App"
data['description'] = "Demo App description"
result = db_functions.insert_app(data)
return jsonify(result)
# Get app from db
@main_routes.route('/app/get', methods=['POST'])
def get_app():
data = request.json
result = db_functions.get_app(data)
return jsonify(result)
# Called by save project in .VUE
@main_routes.route('/app/save', methods=['POST'])
def save_app():
data = request.get_json()
result = save_app_data(data)
return result
# Emulate ActMQ functionality
@main_routes.route('/test_sender', methods=['POST'])
def send():
data = request.get_json()
body = data['body']
application_id = data['application_id']
correlation_id = data['correlation_id']
key = data['key']
sender = activemq.test_send(data)
return data

File diff suppressed because it is too large Load Diff