env and remove unnecessary files

Change-Id: I678feeec5e0c0481633fe93d7666d3ad7aae3647
This commit is contained in:
Gregory Koronakos 2024-03-20 15:18:08 +02:00
parent c504f94705
commit ac6e1c1300
11 changed files with 63 additions and 1673 deletions

9
cfsb-backend/.env.prod Normal file
View File

@ -0,0 +1,9 @@
NEBULOUS_BROKER_URL=158.37.63.86
NEBULOUS_BROKER_PORT=31609
NEBULOUS_BROKER_USERNAME=admin
NEBULOUS_BROKER_PASSWORD=admin
POSTGRES_DB_HOST=localhost
POSTGRES_DB_NAME=fog_broker
POSTGRES_DB_PORT=5432
POSTGRES_DB_USER=dbuser
POSTGRES_DB_PASS=pass123

View File

@ -124,7 +124,7 @@ def perform_evaluation(data_table, relative_wr_data, immediate_wr_data, node_nam
num_of_dmus = len(next(iter(data_table.values())))
Cols_No = len(criteria_list)
DEA_Scores = []
epsilon = 0.00001 # Lower bound of the variables
epsilon = 0.00000 # Lower bound of the variables
# Iterating over each DMU to Perform DEA
for dmu_index in range(num_of_dmus):

View File

@ -340,145 +340,67 @@ def create_data_table(selected_criteria, extracted_data, field_mapping):
return data_table
# Used to Append "Score" and "Rank" for each node in SAL's response JSON
# def append_evaluation_results(SALs_JSON_filename, raw_evaluation_results):
# # Load the JSON content from the file
# with open(SALs_JSON_filename, 'r') as file:
# SALs_JSON = json.load(file)
#
# # Check if raw_evaluation_results is a string and parse it, otherwise use it directly
# if isinstance(raw_evaluation_results, str):
# try:
# evaluation_results = json.loads(raw_evaluation_results)
# except json.JSONDecodeError as e:
# print(f"An error occurred while decoding the JSON data: {e}")
# return
# else:
# evaluation_results = raw_evaluation_results
#
# eval_results_dict = {result['Id']: (result['DEA Score'], result['Rank']) for result in evaluation_results}
#
# for node in SALs_JSON:
# node_id = node.get("id")
# if node_id in eval_results_dict:
# score, rank = eval_results_dict[node_id]
# node["Score"] = score
# node["Rank"] = rank
#
# return SALs_JSON
# # # Write the updated SALs_JSON to a new JSON file
# # with open('updated_SALs_JSON.json', 'w') as file:
# # json.dump(SALs_JSON, file, indent=4)
# def append_evaluation_results(sal_reply_body, scores_and_ranks):
# # Create a dictionary mapping Ids to scores and ranks
# eval_results_dict = {result['Id']: (result['DEA Score'], result['Rank'])
# for result in scores_and_ranks}
#
# # Iterate over each node in sal_reply_body and append Score and Rank
# for node in sal_reply_body:
# node_id = node.get('id') # Assuming the ID is directly under the node
# if node_id in eval_results_dict:
# score, rank = eval_results_dict[node_id]
# node["Score"] = score
# node["Rank"] = rank
#
# return sal_reply_body
import random
# def append_evaluation_results(sal_reply_body, scores_and_ranks):
# # Check if sal_reply_body is a string and convert it to a Python object
# if isinstance(sal_reply_body, str):
# sal_reply_body = json.loads(sal_reply_body)
#
# # Create a dictionary mapping Ids to scores and ranks
# eval_results_dict = {result['Id']: (result['DEA Score'], result['Rank'])
# for result in scores_and_ranks}
# if scores_and_ranks:
# # Create a dictionary mapping Ids to scores and ranks
# eval_results_dict = {result['Id']: (result['DEA Score'], result['Rank'])
# for result in scores_and_ranks}
#
# # Iterate over each node in sal_reply_body and append Score and Rank
# for node in sal_reply_body:
# node_id = node.get('id') # Assuming the ID is directly under the node
# if node_id in eval_results_dict:
# score, rank = eval_results_dict[node_id]
# node["score"] = score
# # Iterate over each node in sal_reply_body and append Score and Rank
# for node in sal_reply_body:
# node_id = node.get('id') # Assuming the ID is directly under the node
# if node_id in eval_results_dict:
# score, rank = eval_results_dict[node_id]
# node["score"] = score
# node["rank"] = rank
# else:
# # If scores_and_ranks is empty
# for index, node in enumerate(sal_reply_body):
# if index == 0:
# # First node gets a score of 1 and rank of 1
# node["score"] = 1
# node["rank"] = 1
# else:
# # Assign random scores between 0.33 and 0.93 to the rest
# node["score"] = random.uniform(0.33, 0.93)
#
# # Sort nodes by score in descending order to calculate ranks
# sorted_nodes = sorted(sal_reply_body[1:], key=lambda x: x["score"], reverse=True)
#
# # Assign ranks based on sorted order, starting from 2 since the first node is ranked 1
# for rank, node in enumerate(sorted_nodes, start=2):
# node["rank"] = rank
#
# # Combine the first node with the rest
# sal_reply_body = [sal_reply_body[0]] + sorted_nodes
#
# return sal_reply_body
# Used to parse Patini's JSON
def parse_device_info_from_file(file_path):
with open(file_path, 'r') as file:
json_data = json.load(file)
device_names = []
device_info = {
'id': json_data['_id'],
'name': json_data['name'], # Save the device name
'deviceInfo': json_data['deviceInfo'],
'creationDate': json_data['creationDate'],
'lastUpdateDate': json_data['lastUpdateDate'],
'status': json_data['status'],
'metrics': {
'cpu': json_data['metrics']['metrics']['cpu'],
'uptime': json_data['metrics']['metrics']['uptime'],
'disk': json_data['metrics']['metrics']['disk'],
'ram': json_data['metrics']['metrics']['ram']
}
}
# Example of converting and handling ISODate strings, adjust accordingly
device_info['creationDate'] = datetime.fromisoformat(device_info['creationDate'].replace("ISODate('", "").replace("')", ""))
device_info['lastUpdateDate'] = datetime.fromisoformat(device_info['lastUpdateDate'].replace("ISODate('", "").replace("')", ""))
device_info['creationDate'] = device_info['creationDate'].isoformat()
device_info['lastUpdateDate'] = device_info['lastUpdateDate'].isoformat()
# Update the global device_names list
device_names.append({'id': device_info['id'], 'name': device_info['name']})
return device_names, device_info
import json
import random
def append_evaluation_results(sal_reply_body, scores_and_ranks):
# Check if sal_reply_body is a string and convert it to a Python object
if isinstance(sal_reply_body, str):
sal_reply_body = json.loads(sal_reply_body)
if scores_and_ranks:
# Create a dictionary mapping Ids to scores and ranks
eval_results_dict = {result['Id']: (result['DEA Score'], result['Rank'])
for result in scores_and_ranks}
# Create a dictionary mapping Ids to scores and ranks
eval_results_dict = {result['Id']: (result['DEA Score'], result['Rank'])
for result in scores_and_ranks}
# Iterate over each node in sal_reply_body and append Score and Rank
for node in sal_reply_body:
node_id = node.get('id') # Assuming the ID is directly under the node
if node_id in eval_results_dict:
score, rank = eval_results_dict[node_id]
node["score"] = score
node["rank"] = rank
else:
# If scores_and_ranks is empty
for index, node in enumerate(sal_reply_body):
if index == 0:
# First node gets a score of 1 and rank of 1
node["score"] = 1
node["rank"] = 1
else:
# Assign random scores between 0.33 and 0.93 to the rest
node["score"] = random.uniform(0.33, 0.93)
# Sort nodes by score in descending order to calculate ranks
sorted_nodes = sorted(sal_reply_body[1:], key=lambda x: x["score"], reverse=True)
# Assign ranks based on sorted order, starting from 2 since the first node is ranked 1
for rank, node in enumerate(sorted_nodes, start=2):
# Iterate over each node in sal_reply_body and append Score and Rank
for node in sal_reply_body:
node_id = node.get('id') # Assuming the ID is directly under the node
if node_id in eval_results_dict:
score, rank = eval_results_dict[node_id]
node["score"] = score
node["rank"] = rank
# Combine the first node with the rest
sal_reply_body = [sal_reply_body[0]] + sorted_nodes
return sal_reply_body
@ -495,7 +417,6 @@ def append_evaluation_results(sal_reply_body, scores_and_ranks):
# Example usage
# extracted_data, NUMBER_OF_FOG_NODES, node_names = extract_node_candidate_data('dummy_data_node_candidates.json')
# print(NUMBER_OF_FOG_NODES)

View File

@ -36,8 +36,9 @@ class SyncedHandler(Handler):
# Save the correlation_id (We do not have it from the app_side)
uuid.uuid4().hex.encode("utf-8") # for Correlation id
# Optimizer_correlation_id = '88334290cad34ad9b21eb468a9f8ff11' # dummy correlation_id
correlation_id_optimizer = message.correlation_id
if not correlation_id_optimizer:
correlation_id_optimizer = '88334290cad34ad9b21eb468a9f8ff11' # dummy correlation_id
# logging.info(f"Optimizer_correlation_id {message.correlation_id}")
print("Optimizer Correlation Id: ", correlation_id_optimizer)
@ -89,85 +90,6 @@ class SyncedHandler(Handler):
# }
# ]
# "jobIdForByon": "null",
# "jobIdForEDGE": "null"
# body_sent_from_optimizer =[
# {
# "type": "NodeTypeRequirement",
# "nodeTypes": ["IAAS"]
# },
# {
# "type": "AttributeRequirement",
# "requirementClass": "image",
# "requirementAttribute": "operatingSystem.family",
# "requirementOperator": "IN","value":"UBUNTU"},
# {
# "type":"AttributeRequirement",
# "requirementClass":"hardware",
# "requirementAttribute":"ram",
# "requirementOperator":"GEQ",
# "value":"4096"
# },
# {"type":"AttributeRequirement","requirementClass":"hardware","requirementAttribute":"cores",
# "requirementOperator":"GEQ","value":"4"}
# ]
# body_sent_from_optimizer = [
# {
# "type": "NodeTypeRequirement",
# "nodeTypes": ["IAAS"]
# }
# ]
# "nodeTypes": ["EDGE"]
# "nodeTypes": ["IAAS", "PAAS", "FAAS", "BYON", "EDGE", "SIMULATION"]
# "jobIdForEDGE": "FCRnewLight0"
# "jobIdForByon":"dummy-app-id",
# "jobIdForEDGE":"dummy-app-id"
# body_sent_from_optimizer = [
# {
# "type": "NodeTypeRequirement",
# "nodeTypes": ["IAAS"]
# },
# {
# "type": "AttributeRequirement",
# "requirementClass": "hardware",
# "requirementAttribute": "cores",
# "requirementOperator": "EQ",
# "value": "2"
# },
# {
# "type": "AttributeRequirement",
# "requirementClass": "hardware",
# "requirementAttribute": "ram",
# "requirementOperator": "EQ",
# "value": "4096"
# }
# ]
# body_sent_from_optimizer =[
# {
# "type": "NodeTypeRequirement",
# "nodeTypes": ["IAAS"],
# "jobIdForByon": "dummy-app-id",
# "jobIdForEDGE": "dummy-app-id"
# },
# {
# "type": "AttributeRequirement",
# "requirementClass": "hardware",
# "requirementAttribute": "cores",
# "requirementOperator": "EQ",
# "value": "2"
# },
# {
# "type": "AttributeRequirement",
# "requirementClass": "hardware",
# "requirementAttribute": "ram",
# "requirementOperator": "EQ",
# "value": "4096"
# }
# ]
# logging.info(body_sent_from_optimizer)
# print("Extracted body from Optimizer Message:", body_sent_from_optimizer)
@ -216,17 +138,16 @@ class SyncedHandler(Handler):
# sal_reply_body = json.load(file)
# print("SAL's Reply from JSON File:", sal_reply_body)
try:
# Parse the JSON string to a Python object
nodes_data = json.loads(sal_body)
total_nodes = len(nodes_data) # Get the total number of nodes
# Check if more than 58 nodes exist
if total_nodes > 58:
if total_nodes > 400:
print("More than 58 nodes exist. Only the first 51 nodes will be processed.")
# Filter to only include the first 51 nodes and convert back to JSON string
sal_reply_body = json.dumps(nodes_data[:15])
sal_reply_body = json.dumps(nodes_data[:400])
else:
print(f"Total {total_nodes} nodes found. Processing all nodes.")
# Keep sal_reply_body as is since it's already a JSON string
@ -341,7 +262,7 @@ def start_exn_connector_in_background():
addressOPTtriggering = 'eu.nebulouscloud.cfsb.get_node_candidates'
addressSendToOPT = 'eu.nebulouscloud.cfsb.get_node_candidates.reply'
connector = EXN('ui', url="localhost", port=5672, username="admin", password="admin",
connector = EXN('ui', url=os.getenv('NEBULOUS_BROKER_URL'), port=os.getenv('NEBULOUS_BROKER_PORT'), username=os.getenv('NEBULOUS_BROKER_USERNAME'), password=os.getenv('NEBULOUS_BROKER_PASSWORD'),
handler=Bootstrap(),
publishers=[
SyncedPublisher('SAL-GET', addressSAL_GET, True, True),

View File

@ -1,8 +1,11 @@
from app_factory import create_app
from dotenv import load_dotenv
from activemq import start_exn_connector_in_background
from activemqOLD import start_exn_connector_in_background1
from app_factory import create_app # Import your Flask app factory
load_dotenv()
app = create_app()
# Start the EXN connector in the background
start_exn_connector_in_background()

View File

@ -1,88 +0,0 @@
from flask import Flask, request, jsonify, render_template, redirect, url_for
from flask_cors import CORS, cross_origin
import json
app = Flask(__name__, template_folder='templates')
#app = Flask(__name__)
CORS(app)
#CORS(app, resources={r"/get_hierarchical_category_list": {"origins": "http://localhost:8080"}})
hierarchy_data = hierarchical_list = [
{
"name": "Level 1 - Item 1",
"children": [
{
"name": "Level 2 - Item 1.1",
"children": [
{"name": "Level 3 - Item 1.1.1"},
{"name": "Level 3 - Item 1.1.2"},
],
},
{
"name": "Level 2 - Item 1.2",
"children": [
{"name": "Level 3 - Item 1.2.1"},
{"name": "Level 3 - Item 1.2.2"},
],
},
],
},
{
"name": "Level 1 - Item 2",
"children": [
{
"name": "Level 2 - Item 2.1",
"children": [
{"name": "Level 3 - Item 2.1.1"},
{"name": "Level 3 - Item 2.1.2"},
],
},
{
"name": "Level 2 - Item 2.2",
"children": [
{"name": "Level 3 - Item 2.2.1"},
{"name": "Level 3 - Item 2.2.2"},
],
},
],
},
# Add more items as needed
]
# print(json.dumps(hierarchical_list, indent=2))
'''
def traverse_hierarchy(node, selected_items, required_count):
if node['name'] in selected_items:
required_count -= 1
for child in node.get('children', []):
required_count = traverse_hierarchy(child, selected_items, required_count)
return required_count
'''
@app.route('/get_hierarchical_category_list')
def get_hierarchical_category_list():
return jsonify(hierarchy_data)
@app.route('/process_selected_items', methods=['POST'])
def process_selected_items():
try:
data = request.get_json()
selected_items = data.get('selectedItems', [])
# Print selected items for debugging
print("Selected Items:", selected_items)
# Continue processing the selected items
# For example, you can print or process the selected items here
# Redirect to the show_selected_items route
return redirect(url_for('show_selected_items', items=','.join(selected_items)))
except Exception as e:
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/show_selected_items/<items>')
@cross_origin()
def show_selected_items(items):
return render_template('selected_items.html', items=items.split(','))
if __name__ == '__main__':
app.run(debug=True)

View File

@ -1,9 +1,11 @@
import os
import psycopg2
def db_open():
# Connect to the database
conn = psycopg2.connect(database="fog_broker", user="dbuser", password="pass123", host="localhost", port="5432")
conn = psycopg2.connect(database=os.getenv('POSTGRES_DB_NAME'), user=os.getenv('POSTGRES_DB_USER'), password=os.getenv('POSTGRES_DB_PASS'), host=os.getenv('POSTGRES_DB_HOST'), port=os.getenv('POSTGRES_DB_PORT'))
# create a cursor
cur = conn.cursor()
return conn, cur

View File

@ -6,6 +6,8 @@ services:
dockerfile: Dockerfile
ports:
- "8001:8001"
env_file:
- .env.prod
depends_on:
- db
db:

View File

@ -1,80 +0,0 @@
{
"_id": "b4ce322c-698a-43b9-a889-bf0da2a4dcb9",
"os": "LINUX",
"name": "Test VM #0001",
"owner": "admin",
"ipAddress": "10.10.0.6",
"location": {
"name": "laptop",
"latitude": 12.345,
"longitude": 56.789
},
"username": "ubuntu",
"password": [
"u",
"b",
"u",
"n",
"t",
"u"
],
"publicKey": [],
"deviceInfo": {
"CPU_SOCKETS": "1",
"CPU_CORES": "10",
"CPU_PROCESSORS": "20",
"RAM_TOTAL_KB": "16218480",
"RAM_AVAILABLE_KB": "13366788",
"RAM_FREE_KB": "10943372",
"RAM_USED_KB": "5275108",
"RAM_UTILIZATION": "32.5253",
"DISK_TOTAL_KB": "1055762868",
"DISK_FREE_KB": "976527612",
"DISK_USED_KB": "79235256",
"DISK_UTILIZATION": "7.50502",
"OS_ARCHITECTURE": "x86_64",
"OS_KERNEL": "Linux",
"OS_KERNEL_RELEASE": "5.15.133.1-microsoft-standard-WSL2"
},
"requestId": "eb6441fc-613a-482e-ba94-b16db57ecd36",
"creationDate": "2024-01-15T13:23:40.602Z",
"lastUpdateDate": "2024-01-15T14:32:43.485Z",
"status": "HEALTHY",
"nodeReference": "40ed1989-49ba-4496-a5c5-3d8ca1a18972",
"messages": [],
"statusUpdate": {
"ipAddress": "10.10.0.6",
"clientId": "VM-LINUX-TEST-VM-0001-Test VM #0001-DEFAULT-10.10.0.6-_",
"state": "REGISTERED",
"stateLastUpdate": "2024-01-15T13:23:47.463Z",
"reference": "40ed1989-49ba-4496-a5c5-3d8ca1a18972",
"errors": []
},
"metrics": {
"ipAddress": "10.10.0.6",
"clientId": "VM-LINUX-TEST-VM-0001-Test VM",
"timestamp": "2024-01-15T14:32:33.467Z",
"metrics": {
"count-total-events-failures": 0,
"count-total-events-text": 0,
"tx": 0,
"count-total-events-other": 0,
"count-event-forwards-success": 0,
"count-event-forwards-failure": 0,
"rx": 0,
"count-total-events": 0,
"cpu": 0.6,
"uptime": 10742,
"count-event-local-publish-failure": 0,
"count-total-events-object": 0,
"disk": 2.48262,
"count-event-local-publish-success": 0,
"updatetime": 1705318391,
"currdatetime": 1705329133,
"ram": 23.7719
},
"latestEvents": []
},
"retries": 0,
"_class": "eu.nebulous.resource.discovery.monitor.model.Device"
}

View File

@ -1,44 +0,0 @@
https://158.39.75.54/projects/nebulous-collaboration-hub/wiki/accessing-the-demo-activemq-broker
https://openproject.nebulouscloud.eu/projects/nebulous-collaboration-hub/wiki/accessing-the-nebulous-dev-k8s
activemq
terminal στο Documents/projects/koronakos/nebulous_certs
export KUBECONFIG=kubeconfig.yaml
kubectl -n nebulous port-forward service/nebulous-activemq 5672:5672 8161:8161 61616:61616
OPTIMIZER waits for a reply on eu.nebulouscloud.cfsb.get_node_candidates.reply,
the reply body will be the list of node candidates as you got it from SAL, but augmented with ranking information.
The optimiser will use the highest-ranked node candidates.
το app name from message.subject
το παραπάνω αντί για το "application_id": "app_id" στη γραμμη 74
passwords
gregkoron
Olympi@kos789
-------------
SAL
SAL POSTMAN: https://158.39.75.54/projects/nebulous-collaboration-hub/wiki/deployment-manager-sal-1
Port Forwarding to enable POSTMAN for SAL:
kubectl port-forward -n nebulous service/sal 8080:8080
Use this in POSTMAN Request: https://localhost:8080/sal/pagateway/connect
---
kubectl port-forward -n nebulous service/sal1 8080:8080
------If proplems It works with this-----
kubectl -n nebulous get pod | findstr sal
kubectl -n nebulous delete pod sal-all-6b8bbc54cf-hxsj6
-----------
----------FLUX--------------
kubectl scale -n flux-system --replicas=0 deploy/sal-all-flux
kubectl scale -n flux-system --replicas=1 deploy/sal-all-flux
kubectl -n flux-system port-forward service/sal-flux <YOUR_FAV_PORT>:8080
kubectl scale -n flux-system --replicas=0 deploy/sal-all-flux
kubectl scale -n flux-system --replicas=1 deploy/sal-all-flux

File diff suppressed because it is too large Load Diff