Initial check-in: application skeleton

no functionality yet, except

- Try to parse the sample yaml app files from T2.5

- Have a dockerfile, unit tests etc. available

Change-Id: If15c1197f61a53db3d7ab51328c24100b1641732
This commit is contained in:
Rudi Schlatte 2023-10-19 17:10:44 +02:00
parent 5cc0595207
commit e579773feb
20 changed files with 1501 additions and 0 deletions

9
.gitattributes vendored Normal file
View File

@ -0,0 +1,9 @@
#
# https://help.github.com/articles/dealing-with-line-endings/
#
# Linux start script should use lf
/gradlew text eol=lf
# These are Windows script files and should use crlf
*.bat text eol=crlf

15
.gitignore vendored
View File

@ -1,2 +1,17 @@
__pycache__/
.nox/
# Gradle project-specific cache directory
.gradle
# Gradle build output directory
/optimiser-controller/build/
# generated artefact directory
/optimiser-controller/dist/
# jdtls (Java LSP server) and/or eclipse data files
.classpath
.project
.settings/
/optimiser-controller/bin/
# IntelliJ IDEA configuration files
/.idea/

1
.java-version Normal file
View File

@ -0,0 +1 @@
17.0

43
README.md Normal file
View File

@ -0,0 +1,43 @@
# The NebulOuS optimizer controller
This module serves two purposes:
- When a new NebulOuS application is started, set up the initial optimizer
infrastructure, parse the application structure and metric model, and pass
an initial resource configuration (“Optimized Service Graph”) to the
Deployment Manager.
- When an application is running, monitor the application-specific metrics
coming in via ActiveMQ and invoke the optimizer as needed, thereby possibly
triggering application reconfigurations.
# Building
To compile, install a JDK (Java Development Kit) version 17 or greater on the build machine.
```sh
# Compile:
./gradlew assemble
# Compile and test:
./gradlew build
```
# Building the container
A container can be built and run with the following commands:
```sh
docker build -t optimiser-controller -f optimiser-controller/Dockerfile .
docker run --rm optimiser-controller
```
# Running
To run, install a JRE (Java Runtime Environment) version 17 or greater.
A successful build creates the jar file `dist/optimiser-controller-all.jar`.
This file is self-contained and can be executed via the following command:
```sh
java -jar dist/optimiser-controller-all.jar
```

BIN
gradle/wrapper/gradle-wrapper.jar vendored Normal file

Binary file not shown.

View File

@ -0,0 +1,7 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-8.4-bin.zip
networkTimeout=10000
validateDistributionUrl=true
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists

249
gradlew vendored Executable file
View File

@ -0,0 +1,249 @@
#!/bin/sh
#
# Copyright © 2015-2021 the original authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
#
# Gradle start up script for POSIX generated by Gradle.
#
# Important for running:
#
# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is
# noncompliant, but you have some other compliant shell such as ksh or
# bash, then to run this script, type that shell name before the whole
# command line, like:
#
# ksh Gradle
#
# Busybox and similar reduced shells will NOT work, because this script
# requires all of these POSIX shell features:
# * functions;
# * expansions «$var», «${var}», «${var:-default}», «${var+SET}»,
# «${var#prefix}», «${var%suffix}», and «$( cmd )»;
# * compound commands having a testable exit status, especially «case»;
# * various built-in commands including «command», «set», and «ulimit».
#
# Important for patching:
#
# (2) This script targets any POSIX shell, so it avoids extensions provided
# by Bash, Ksh, etc; in particular arrays are avoided.
#
# The "traditional" practice of packing multiple parameters into a
# space-separated string is a well documented source of bugs and security
# problems, so this is (mostly) avoided, by progressively accumulating
# options in "$@", and eventually passing that to Java.
#
# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS,
# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly;
# see the in-line comments for details.
#
# There are tweaks for specific operating systems such as AIX, CygWin,
# Darwin, MinGW, and NonStop.
#
# (3) This script is generated from the Groovy template
# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt
# within the Gradle project.
#
# You can find Gradle at https://github.com/gradle/gradle/.
#
##############################################################################
# Attempt to set APP_HOME
# Resolve links: $0 may be a link
app_path=$0
# Need this for daisy-chained symlinks.
while
APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path
[ -h "$app_path" ]
do
ls=$( ls -ld "$app_path" )
link=${ls#*' -> '}
case $link in #(
/*) app_path=$link ;; #(
*) app_path=$APP_HOME$link ;;
esac
done
# This is normally unused
# shellcheck disable=SC2034
APP_BASE_NAME=${0##*/}
# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036)
APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD=maximum
warn () {
echo "$*"
} >&2
die () {
echo
echo "$*"
echo
exit 1
} >&2
# OS specific support (must be 'true' or 'false').
cygwin=false
msys=false
darwin=false
nonstop=false
case "$( uname )" in #(
CYGWIN* ) cygwin=true ;; #(
Darwin* ) darwin=true ;; #(
MSYS* | MINGW* ) msys=true ;; #(
NONSTOP* ) nonstop=true ;;
esac
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
# Determine the Java command to use to start the JVM.
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD=$JAVA_HOME/jre/sh/java
else
JAVACMD=$JAVA_HOME/bin/java
fi
if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
else
JAVACMD=java
if ! command -v java >/dev/null 2>&1
then
die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
fi
# Increase the maximum file descriptors if we can.
if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then
case $MAX_FD in #(
max*)
# In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked.
# shellcheck disable=SC2039,SC3045
MAX_FD=$( ulimit -H -n ) ||
warn "Could not query maximum file descriptor limit"
esac
case $MAX_FD in #(
'' | soft) :;; #(
*)
# In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked.
# shellcheck disable=SC2039,SC3045
ulimit -n "$MAX_FD" ||
warn "Could not set maximum file descriptor limit to $MAX_FD"
esac
fi
# Collect all arguments for the java command, stacking in reverse order:
# * args from the command line
# * the main class name
# * -classpath
# * -D...appname settings
# * --module-path (only if needed)
# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables.
# For Cygwin or MSYS, switch paths to Windows format before running java
if "$cygwin" || "$msys" ; then
APP_HOME=$( cygpath --path --mixed "$APP_HOME" )
CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" )
JAVACMD=$( cygpath --unix "$JAVACMD" )
# Now convert the arguments - kludge to limit ourselves to /bin/sh
for arg do
if
case $arg in #(
-*) false ;; # don't mess with options #(
/?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath
[ -e "$t" ] ;; #(
*) false ;;
esac
then
arg=$( cygpath --path --ignore --mixed "$arg" )
fi
# Roll the args list around exactly as many times as the number of
# args, so each arg winds up back in the position where it started, but
# possibly modified.
#
# NB: a `for` loop captures its iteration list before it begins, so
# changing the positional parameters here affects neither the number of
# iterations, nor the values presented in `arg`.
shift # remove old arg
set -- "$@" "$arg" # push replacement arg
done
fi
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
# Collect all arguments for the java command:
# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments,
# and any embedded shellness will be escaped.
# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be
# treated as '${Hostname}' itself on the command line.
set -- \
"-Dorg.gradle.appname=$APP_BASE_NAME" \
-classpath "$CLASSPATH" \
org.gradle.wrapper.GradleWrapperMain \
"$@"
# Stop when "xargs" is not available.
if ! command -v xargs >/dev/null 2>&1
then
die "xargs is not available"
fi
# Use "xargs" to parse quoted args.
#
# With -n1 it outputs one arg per line, with the quotes and backslashes removed.
#
# In Bash we could simply go:
#
# readarray ARGS < <( xargs -n1 <<<"$var" ) &&
# set -- "${ARGS[@]}" "$@"
#
# but POSIX shell has neither arrays nor command substitution, so instead we
# post-process each arg (as a line of input to sed) to backslash-escape any
# character that might be a shell metacharacter, then use eval to reverse
# that process (while maintaining the separation between arguments), and wrap
# the whole thing up as a single "set" statement.
#
# This will of course break if any of these variables contains a newline or
# an unmatched quote.
#
eval "set -- $(
printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" |
xargs -n1 |
sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' |
tr '\n' ' '
)" '"$@"'
exec "$JAVACMD" "$@"

92
gradlew.bat vendored Normal file
View File

@ -0,0 +1,92 @@
@rem
@rem Copyright 2015 the original author or authors.
@rem
@rem Licensed under the Apache License, Version 2.0 (the "License");
@rem you may not use this file except in compliance with the License.
@rem You may obtain a copy of the License at
@rem
@rem https://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@rem See the License for the specific language governing permissions and
@rem limitations under the License.
@rem
@if "%DEBUG%"=="" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%"=="" set DIRNAME=.
@rem This is normally unused
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Resolve any "." and ".." in APP_HOME to make it shorter.
for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if %ERRORLEVEL% equ 0 goto execute
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto execute
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
:end
@rem End local scope for the variables with windows NT shell
if %ERRORLEVEL% equ 0 goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
set EXIT_CODE=%ERRORLEVEL%
if %EXIT_CODE% equ 0 set EXIT_CODE=1
if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE%
exit /b %EXIT_CODE%
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega

View File

@ -0,0 +1,17 @@
#
# Build stage
#
FROM docker.io/library/gradle:8-jdk17-alpine AS build
COPY .. /home/optimiser-controller
WORKDIR /home/optimiser-controller
RUN gradle --no-daemon clean assemble
#
# Package stage
#
FROM docker.io/library/eclipse-temurin:17-jre
COPY --from=build /home/optimiser-controller/optimiser-controller/dist/optimiser-controller-all.jar /usr/local/lib/optimiser-controller-all.jar
# When running the image without arguments, print help for now. Adjust this
# once we start listening to MQTT messages from the NebulOuS GUI.
CMD ["--help"]
ENTRYPOINT ["java","-jar","/usr/local/lib/optimiser-controller-all.jar"]

View File

@ -0,0 +1,86 @@
/*
* This file was generated by the Gradle 'init' task.
*
* This generated file contains a sample Java application project to get you started.
* For more details on building Java & JVM projects, please refer to https://docs.gradle.org/8.4/userguide/building_java_projects.html in the Gradle documentation.
*/
plugins {
// Apply the application plugin to add support for building a CLI application in Java.
id 'application'
// This plugin creates a self-contained jar file including all dependencies
id 'com.github.johnrengelman.shadow' version '8.1.1'
// Use this to check for newer versions of dependency libraries via
// ./gradlew dependencyUpdates
id "com.github.ben-manes.versions" version "0.49.0"
}
repositories {
// Use Maven Central for resolving dependencies.
mavenCentral()
// 7bulls, activeeon maven repositories for SAL
maven {
url 'http://repository.activeeon.com/content/groups/proactive/'
allowInsecureProtocol = true
}
}
dependencies {
// YAML parsing: https://github.com/decorators-squad/eo-yaml/tree/master
implementation 'com.amihaiemil.web:eo-yaml:7.0.9'
// Command-line parsing: https://picocli.info
implementation 'info.picocli:picocli:4.7.5'
// SAL client library
implementation 'org.ow2.proactive:sal-common:13.1.0-SNAPSHOT'
// Logging: https://logging.apache.org/log4j/2.x/
implementation 'org.apache.logging.log4j:log4j-core:2.21.0'
// Use JUnit Jupiter for testing.
testImplementation 'org.junit.jupiter:junit-jupiter:5.10.0'
testRuntimeOnly 'org.junit.platform:junit-platform-launcher'
}
// Apply a specific Java toolchain to ease working on different environments.
java {
toolchain {
languageVersion = JavaLanguageVersion.of(17)
}
}
application {
// Define the main class for the application.
mainClass = 'eu.nebulous.optimiser.controller.Main'
}
jar {
archiveBaseName='optimiser-controller'
manifest {
attributes 'Implementation-Title': 'optimiser-controller',
'Bundle-Version': '0.1'
}
}
shadowJar {
// Creates a fat jar, including all dependencies
}
task distJar(type: Copy, dependsOn: shadowJar) {
description 'Copies the fat jar into its documented location.'
group 'build'
from shadowJar.archiveFile
into 'dist/'
}
clean {
delete 'dist/'
}
assemble.dependsOn distJar
tasks.named('test') {
// Use JUnit Platform for unit tests.
useJUnitPlatform()
}

View File

@ -0,0 +1,61 @@
package eu.nebulous.optimiser.controller;
import com.amihaiemil.eoyaml.Yaml;
import com.amihaiemil.eoyaml.YamlMapping;
import java.io.File;
import java.io.IOException;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class AppParser {
private static final Logger log = LogManager.getLogger(AppParser.class.getName());
/**
* Parse a KubeVela file. The file should be deployable as-is.
*
* @param kubevela_file the file to be parsed
* @return true if `kubevela_file` could be parsed
*/
public boolean parseKubevela(File kubevela_file) {
try {
YamlMapping m = Yaml.createYamlInput(kubevela_file).readYamlMapping();
} catch (IOException e) {
log.error("Could not parse " + kubevela_file + ": ", e);
return false;
}
return true;
}
/**
* Parse a parameterized KubeVela file. Such a file is not directly
* deployable, since it contains ranges for various parameters.
*
* @param kubevela_param_file the file to be parsed
* @return true if `kubevela_param_file` could be parsed
*/
public boolean parseParameterizedKubevela(File kubevela_param_file) {
try {
YamlMapping m = Yaml.createYamlInput(kubevela_param_file).readYamlMapping();
} catch (IOException e) {
log.error("Could not parse " + kubevela_param_file + ": ", e);
return false;
}
return true;
}
/**
* Parse a metric model. This file contains the metric model for a
* parameterized KubeVela file.
*/
public boolean parseMetricModel(File metricmodel_file) {
try {
YamlMapping m = Yaml.createYamlInput(metricmodel_file).readYamlMapping();
} catch (IOException e) {
log.error("Could not parse " + metricmodel_file + ": ", e);
return false;
}
return true;
}
}

View File

@ -0,0 +1,62 @@
package eu.nebulous.optimiser.controller;
import java.io.File;
import java.util.concurrent.Callable;
import picocli.CommandLine;
import static picocli.CommandLine.Command;
import static picocli.CommandLine.Option;
/**
* The main class of the optimizer controller.
*/
@Command(name = "nebulous-optimizer-controller",
version = "0.1", // TODO read this from Bundle-Version in the jar MANIFEST.MF
mixinStandardHelpOptions = true,
description = "Receive app creation messages from the UI and start up the optimizer infrastructure.")
public class Main implements Callable<Integer> {
@Option(names = {"-k", "--kubevela-file"},
description = "The name of a KubeVela yaml file to process (mostly for testing purposes)")
private File kubevela_file;
@Option(names = {"-p", "--kubevela-parameterized-file"},
description = "The name of a parameterized KubeVela yaml file to process (mostly for testing purposes)")
private File kubevela_parameterized_file;
@Option(names = {"-m", "--resource-model-file"},
description = "The name of a resource model to process (mostly for testing purposes)")
private File resourcemodel_file;
/**
* The main method of the main class.
*
* @return 0 if no error during execution, otherwise greater than 0
*/
@Override
public Integer call() {
int success = 0;
AppParser p = new AppParser();
if (kubevela_file != null) {
success = p.parseKubevela(kubevela_file) ? success : 1;
}
if (kubevela_parameterized_file != null) {
success = p.parseParameterizedKubevela(kubevela_parameterized_file) ? success : 2;
}
if (resourcemodel_file != null) {
success = p.parseMetricModel(resourcemodel_file) ? success : 3;
}
return success;
}
/**
* External entry point for the main class. Parses command-line
* parameters and invokes the `call` method.
*
* @param args the command-line parameters as passed by the user
*/
public static void main(String[] args) {
int exitCode = new CommandLine(new Main()).execute(args);
System.exit(exitCode);
}
}

View File

@ -0,0 +1,37 @@
package eu.nebulous.optimiser.controller;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.net.URISyntaxException;
import java.net.URL;
import static org.junit.jupiter.api.Assertions.*;
public class AppParserTest {
private File getResourceFile(String name) throws URISyntaxException {
URL resourceUrl = getClass().getClassLoader().getResource(name);
return new File(resourceUrl.toURI());
}
@Test
void readKubevelaFile() throws URISyntaxException {
File file = getResourceFile("vela-deployment.yaml");
AppParser p = new AppParser();
assertTrue(p.parseKubevela(file));
}
@Test
void readParamerizedKubevelaFile() throws URISyntaxException {
File file = getResourceFile("vela-deployment-parameterized.yaml");
AppParser p = new AppParser();
assertTrue(p.parseKubevela(file));
}
@Test
void readResourceModel() throws URISyntaxException {
File file = getResourceFile("surveillance_app_SAMPLE_metric_model.yml");
AppParser p = new AppParser();
assertTrue(p.parseKubevela(file));
}
}

View File

@ -0,0 +1,14 @@
/*
* This Java source file was generated by the Gradle 'init' task.
*/
package eu.nebulous.optimiser.controller;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
class MainTest {
@Test void noArgRunSucceeds() {
Main main = new Main();
assertTrue(main.call() == 0);
}
}

View File

@ -0,0 +1,121 @@
# Illustrative model for the 'face-detection' component
#
# Component resource requirements
# Values in meaningful units
var faceDetection.edge.cpu in interval [1.2, 3.0];
var faceDetection.edge.memory in integer [250, 1000];
var faceDetection.cloud.cpu in interval [3.0, 6.0];
var faceDetection.cloud.memory in integer [1000, 4000];
#
# Cloud and edge providers
#
set CloudProviders := AWS Google Azure;
set EdgeProviders := TID, Orange, Vodaphone, Swisscom;
#
# Number of workers to deploy and at different locations
#
var faceDetection.cloudWorkers.count in integer [2, 10];
var faceDetection.edgeWorkers.count in integer [0, 5];
var faceDetection.cloudWorkers.location{p in CloudProviders} in integer [0, faceDetection.cloudWorkers.count];
var faceDetection.edgeWorkers.location{p in EdgeProviders} in integer [0, faceDetection.edgeWorkers.count];
#
# Making sure to deploy correct number of workers over all locations
#
subject to CloudWorkerLimit :
sum{ p in CloudProviders } faceDetection.cloudWorkers.location[p] <= faceDetection.cloudWorkers.count;
subject to EdgeWorkerLimit :
sum{ p in EdgeProviders } faceDetection.edgeWorkers.location[p] <= faceDetection.edgeWorkers.count;
#
# Label the nodes at each provider the range is set so that there are as many nodes as
# there are workers at each provider to accomodate the case where there is only one worker per node.
#
param CloudNodeIDs{p in CloudProviders, 1..faceDetection.cloudWorkers.location[p]};
param EdgeNodeIDs{p in EdgeProviders, 1..faceDetection.edgeWorkers.location[p]};
#
# Specific deployment decision variables with the constraint that the sum of nodes on each provider matches the
# sum of all providers
#
var faceDetection.cloudWorkers.cloud.node.instances{p in CloudProviders, 1..faceDetection.cloudWorkers.location[p]}
in integer [0, faceDetection.cloudWorkers.location[p]];
var faceDetection.edgeWorkers.cloud.node.instances{p in EdgeProviders, 1..faceDetection.edgeWorkers.location[p]}
in integer[0, faceDetection.edgeWorkers.location[p]];
subject to CloudNodeWorkerLimit:
sum{ p in CloudProviders, id in integer [1, faceDetection.cloudWorkers.location[p] }
faceDetection.cloudWorkers.cloud.node.instances[p, id] == faceDetection.cloudWorkers.location[p];
subject to EdgeNodeWorkerLimit:
sum{ p in EdgeProviders, id in integer [1, faceDetection.edgeWorkers.location[p] }
faceDetection.edgeWorkers.edge.node.instances[p, id] == faceDetection.edgeWorkers.location[p];
#
# Cost parameters to be set for the available node candidates
# Values in some currency
#
param CloudNodeCost{ id in CloudNodeIDs };
param EdgeNodeCost{ id in EdgeNodeIDs };
#
# Then calculate the total deployment cost for Cloud and Edge
#
param TotalProviderCloudCost{ p in CloudProviders }
= sum{ n in faceDetection.cloudWorkers.location[p] :
faceDetection.cloudWorkers.cloud.node.instances[ p, n ]>0 } ( CloudNodeCost[ CloudNodeIDs[p, n] ] );
param TotalProviderEdgeCost{ p in EdgeProviders }
= sum{ n in faceDetection.edgeWorkers.location[p] :
faceDetection.edgeWorkers.edge.node.instances[ p, n ]>0 }( EdgeNodeCost[ EdgeNodeIDs[p, n] ] );
#
# Cost constraint on the number of workers
#
param DeploymentBudget;
param TotalCloudCost = sum{ p in CloudProviders } TotalProviderCloudCost[p];
param TotalEdgeCost = sum{ p in EdgeProviders } TotalProviderEdgeCost[p];
subject to DeploymenCostConstraint :
TotalCloudCost + TotalEdgeCost <= DeploymentBudget;
# =======================================================================================================================================================
# Utility calculation
#
# There will be two objectives for this deployment.
# The first objective aims at minimising the total cost of the deployment.
minimize Cost:
TotalCloudCost + TotalEdgeCost;
#
# The second objective aims to provide enough facial detection components to be able to process the average number of images. It is assumed that the
# metric model contains two measurements indicating the number of images to be processed over the next time interval, and the statistical observation
# of the upper quantile of the obeserved image processing time for the done images. This allows the computation of the number of images per
# facial detection compoent. One may always ensure that all images will be processed by overprovisioning facial
# detection component, but this will contradict minimising cost. At the same time, too few facial detection components will make the queue grow
# unbounded. The idea is therefore to use a performance utility which will be maximum when the expected flow of images is served.
param ImagesToProcess;
param UpperQuantileImagesProcessingTime;
param TimeIntervalLength = 60s;
param UpperQuantileNoImagesPerComponent = TimeIntervalLength / UpperQuantileImagesProcessingTime;
maximize Performance:
1/exp( (ImagesToProcess - UpperQuantileNoImagesPerComponent * (faceDetection.cloudWorkers.count + faceDetection.edgeWorkers.count) )^2 );

View File

@ -0,0 +1,6 @@
These files downloaded from [NEBULOUS - T2.5 - All
Documents](https://eurecatcloud.sharepoint.com/sites/NEBULOUS/Shared%20Documents/Forms/AllItems.aspx?csf=1&web=1&e=zIdzwG&cid=1097a5a6%2D9550%2D4c18%2Db602%2D2881bc3adbed&RootFolder=%2Fsites%2FNEBULOUS%2FShared%20Documents%2FWork%20Package%202%20%2D%20Requirements%20Analysis%2C%20Architectu%2FT2%2E5&FolderCTID=0x0120000596D9CCF02B4A45AB088935B35B3D54)
on 2023-10-24.
The canonical `vela-deployment.yaml` file is probably at
https://gitlab.ubitech.eu/nebulous/use-cases/surveillance-dsl-demo/

View File

@ -0,0 +1,272 @@
#
# Copyright (C) 2023 Institute of Communication and Computer Systems (imu.iccs.gr)
#
# This Source Code Form is subject to the terms of the Mozilla Public License, v2.0.
# If a copy of the MPL was not distributed with this file, you can obtain one at
# https://www.mozilla.org/en-US/MPL/2.0/
#
# !!! IMPORTANT !!!
# Sample metric model for discussion with consortium
# Based on surveillance app demo
# Header. MUST remain as-is
apiVersion: nebulous/v11
kind: MetricModel
# Optional. Currently, only 'metadata.name' is used (if present)
metadata:
name: face-detection-deployment
labels:
app: surveillance-demo-app
# Not part of metric model!!
# The following section can include anchors to values or blocks
# that will be reused (multiple times) in the model
common:
a_constant: &A_CONST 'a_constant value' # Subsequent occurrences of '*A_CONST' will be replaced with 'a_constant value'
percentage_template: &prct_tpl # Occurrences of '*prct_tpl' will be replaced with the following keys
id: 'prct'
type: double
range: [0, 100]
unit: none
# THe main part of metric model. MANDATORY
# It can only contain 'components' and 'scopes' subsections
spec:
# Optional. Lists components (i.e. containers in K8S files) for whom
# we need to specify component-specific metric or requirements
components:
# Each item in 'components' list corresponds to a K8S container, and
# MUST have the SAME NAME.
# Each item MUST HAVE a 'name' field, which MUST BE non-blank (!=null && .trim()!='') and UNIQUE
# (both in components and scopes sections)
# Each item CAN HAVE a 'requirements', and/or a 'metrics' section
- name: face-detection # MUST MATCH to a container name in K8S file
requirements: # Lists requirements specific to this component
# Each item is the list is a component-specific requirement
# Currently, requirements can only be Service-Level Objectives (SLOs)
# Each requirement MUST HAVE a name, which MUST BE non-blank and UNIQUE
# Each requirement MUST HAVE a type, whose value MUST BE 'slo' (in the future more values might be added)
# Each requirement MIGHT have additional fields/sections, specific to 'type'
# For 'slo' type the 'constraint' field MUST BE provided and MUST BE non-blank
- name: cpu_slo
type: slo
# SLO 'constraint' can be specified in two forms (shorthand, or detailed)
#
# Detailed form requires providing a section that MUST include 'type' field
# which specifies the constraint type, where each type further requires its type-specific fields.
# Currently three constraint types are supported:
# - Metric constraints, which take the form 'METRIC_EXPRESSION COMPARISON_OPERATOR THRESHOLD'
# - Logical constraints, which take the form of 'CONSTRAINT [AND/OR] CONSTRAINT...'
# - Conditional constraints, which take the form 'IF CONSTRAINT THEN CONSTRAINT ELSE CONSTRAINT'
# where METRIC_EXPRESSION can be any numeric expression following the mathXParser syntax
# including metrics (specified in the 'metrics' section). COMPARISON_OPERATOR can be < > <= >= = <>
# and THRESHOLD is a double number. CONSTRAINT can be any type of constraint.
constraint:
type: metric
metric: cpu_util_prct
operator: '>'
threshold: 80
# Alternatively the shorthand representation can be used
# Currently, shorthand form supports only metric constraints
# It requires providing a string value with a specific syntax:
# '<METRIC_EXPRESSION> <COMPARISON_OPERATOR> <THRESHOLD>'
# constraint: 'cpu_util_prct > 80'
- name: ram_slo
type: slo
constraint: 'ram_util_prct > 80'
- name: bandwidth_slo
type: slo
constraint: '100 * bandwidth / PROVIDER_BANDWIDTH > 85'
- name: latency_slo
type: slo
constraint: 'latency > 25'
- name: fps_slo
type: slo
constraint: 'fps - 30 * vc_instance_number < 0'
#--------------------------
metrics:
# Each item is the 'metrics' list is a component-specific metric
# Each metric MUST HAVE a name, which MUST BE non-blank and UNIQUE
# Each metric MUST HAVE a type, whose value MUST BE 'raw', 'composite', 'constant', or 'ref'
# Each metric MIGHT HAVE a 'template' specifying its value range and unit.
# If omitted the metric can have any double value
# Each metric has additional fields/sections, specific to its 'type'
#
# For 'composite' type the 'formula' field MUST BE provided and MUST BE non-blank
# 'formula' value is a metric expression and MUST follow the mathXParser syntax.
# Composite metrics can additionally have a 'window', an 'output' section/field,
# and a 'level
- name: cpu_util_prct
type: composite # Optional. Can be inferred from the existence of 'formula' field
template: *prct_tpl # This is a pointer to the section at '&prct_tpl anchor (see at 'commons' section)
formula: 'mean(cpu_util_instance)' # Expression follows mathXParser syntax (see https://mathparser.org/)
level: global # Level specifies where the metric calculation must be executed.
# Valid values: global (at EMS server), per_cloud, per_region, per_zone,
# per_host and per_instance (at each application node (VM/device/etc))
# Window can be specified either in detailed or shorthand form
# Windows in detailed form can also include 'processing's (in shorthand form this is not possible)
# See 'vc_instance_number' metric for an example with processings
window:
type: sliding # Window type. Can be 'sliding' or 'batch'
# Window 'size' can be specified either in detailed or shorthand form
size:
value: 5 # Window size value; a positive integer
unit: min # Window size unit; Can be 'event' (for length-based windows)
# or 'ms', 'sec', 'min', 'hour', 'day' (for time-based windows)
# Shorthand form of size MUST be a non-blank string
# following syntax '<SIZE> <UNIT>'
# size: '5 min'
#
# Shorthand form of window MUST be a non-blank string
# following syntax '<WINDOW_TYPE> <SIZE> <UNIT>'
# window: 'sliding 5 min'
#
# Output can be specified either in detailed or shorthand form
output:
type: all # Type specifies how many and which events will be retained
# Valid values are: 'all', 'first', 'last'
schedule: # Schedule specifies the period events are calculated and sent
# It can also be in shorthand form with syntax '<VALUE> <UNIT>'
value: 30 # The period value
unit: sec # The period value unit; ms, sec, min, hour, day
# Shorthand representation of output MUST be a non-blank string
# following syntax '<TYPE> <VALUE> <UNIT>'
# output: 'all 30 sec'
#
# For 'raw' type the 'sensor' field/section MUST BE provided
# 'sensor' value is a metric expression and MUST follow the mathXParser syntax.
# Raw metrics can additionally also have an 'output' section/field (like in composite metrics)
- name: cpu_util_instance
type: raw # Optional. Can be inferred from the existence of 'sensor' field
template: *prct_tpl
# Sensor section MUST include a 'type' field that MUST HAVE a non-blank string value
# The 'netdata' type has special meaning and requires collecting metric values using Netdata agent.
# In this case only the Netdata group/chart/dimension is required, in Netdata collector's format.
# This value MUST BE specified in 'affinity' field
# Other type values refer to application specific sensors. In this case 'config' and 'install' sections
# can be specified for providing sensor configuration and/or installation instructions (TO BE IMPLEMENTED)
# Note: 'config' section can also be included for 'netdata' sensors in order to override defaults.
sensor:
type: netdata
affinity: netdata__system__cpu__total
# Shorthand representation of sensor MUST be a non-blank string
# following syntax '<SENSOR_TYPE> <SENSOR_SPECIFIC_CONFIG_STRING>'
# For 'netdata' sensors only the Netdata group/chart/dimension is required in Netdata collector's format
# sensor: 'netdata netdata__system__cpu__total'
output: 'all 30 sec'
- name: ram_util_prct
type: composite
template: *prct_tpl
formula: 'mean(ram_util_instance)'
window: 'sliding 5 min'
output: 'all 30 sec'
- name: ram_util_instance
type: raw
template: *prct_tpl
sensor: 'netdata netdata__system__cpu__total'
output: 'all 30 sec'
- name: bandwidth
type: composite
formula: 'mean(bandwidth_instance)'
window: 'sliding 5 min'
output: 'all 30 sec'
- name: bandwidth_instance
type: raw
sensor: 'netdata netdata__system__cpu__total'
output: 'all 30 sec'
#
# The 'constant' metric can be used to specify initial values to constants used
# in metric expressions. The values are set in 'initial' field and MUST BE of the
# type expected in metric expressions (i.e. double, long, int)
- name: PROVIDER_BANDWIDTH
type: constant # Mandatory, in case of constants.
initial: 1 # Used to set initial values to constants provided by solver
#---
- name: latency
# type: composite
formula: 'mean(latency_instance)'
window: 'sliding 5 min'
output: 'all 30 sec'
- name: latency_instance
type: raw
sensor:
type: 'my_latency_sensor' # Custom / Application sensor (default is Netdata, used to collect system metrics)
config: { ping_address: '1.2.3.4', proto: 'icmp', schedule: '1 min' } # Sensor specific configuration (as key-value pairs)
output: 'all 30 sec'
#---
- name: fps
type: composite
formula: 'mean(fps_instance)'
window: 'sliding 1 min'
output: 'all 10 sec'
- name: fps_instance
type: raw
sensor:
type: 'my_fps_sensor' # Custom / Application sensor, without configuration
output: 'all 1 sec'
#
# The 'ref' metric can be used to copy the specification of another 'raw' or 'composite' metric
# in this component's scope and using a different name.
# It is an alternative to YAML anchor/alias feature, but it allows referencing metrics that will
# be specified later in the file.
# The 'ref' field MUST BE a non-blank string with the name of the referenced metric, following
# syntax: '[COMPONENT_NAME or SCOPE_NAME].[METRIC_NAME]'
# If the referenced metric is in the same component/scope then only the METRIC_NAME part is needed.
- name: vc_instance_number
ref: '[video-capture].[vc_instance_number]' # Replicates [video-capture].[vc_instance_number] under vc_instance_number name
#--------------------------
- name: video-capture
metrics:
#---
- name: vc_instance_number
formula: 'add(vc_instance_number_raw)'
level: global
window:
type: sliding
size: '1 sec'
# Processings introduce extra processing tasks on the data retained in the window
# The 'processing' section lists all window processing. NOTE: Grouping processings
# are executed first, then the Sorting and Ranking processings
processing:
- type: grouping # A window GROUPING processing.
# Valid values: 'grouping', 'sorting', 'ranking'
function: 'unique' # applies an EPL function (UNIQUE in this case)
criteria: [ PER_INSTANCE ] # specifies the grouping criteria used
# I.e., groups windows events per instance (IP address) and applies the UNIQUE function per group
# Result: The latest event per group/instance are effectively retained and used in subsequent calculations
output: 'all 1 sec'
- name: vc_instance_number_raw
sensor:
type: 'my_vc_sensor'
config: {}
install:
script: 'wget http://..... && chmod +x ..... && ./run.sh &'
#--------------------------
# Optional. Lists scopes (i.e. groups of components) for whom
# we need to specify metrics or requirements in common
scopes:
# Each item in 'scopes' list specifies a new scope. Scopes a reused to specify requirements and metrics
# common to many or all components. These requirements and metrics apply in addition to the component-specific ones.
#
# Each item MUST HAVE a 'name' field, which MUST BE non-blank and UNIQUE (both in scopes and components sections)
# Each item CAN HAVE a 'components' list, naming the components participating in the scope.
# If omitted all components participate in the scope (i.e. it is an application-wide scope)
# Each item CAN HAVE a 'requirements', and/or a 'metrics' section
- name: app-wide-scope
components: [ 'face-detection', 'video-capture' ]
requirements:
- name: sample_slo_combining_data_across_components
type: slo
constraint: 'sample_metric_combining_data_across_components > 10'
metrics:
- name: sample_metric_combining_data_across_components
formula: 'vc_instance_number * [face-detection].[latency]'
level: global
output: 'all 1 min'
- name: vc_instance_number
ref: '[video-capture].[vc_instance_number]'
#---
- name: VideoCaptureCardinality
ref: '[video-capture].[instances]'

View File

@ -0,0 +1,197 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: surveillance-demo
namespace: default
spec:
components:
- name: kafka-server
type: webservice
properties:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka-server
ports:
- port: 9092
expose: true
- port: 9093
expose: true
- port: 29092
expose: true
cpu: "1"
memory: "2000Mi"
cmd: [ "/bin/bash", "/tmp/run_workaround.sh" ]
env:
- name: KAFKA_NODE_ID
value: "1"
- name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP
value: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT"
- name: KAFKA_LISTENERS
value: "PLAINTEXT://0.0.0.0:9092,PLAINTEXT_HOST://0.0.0.0:29092,CONTROLLER://0.0.0.0:9093"
- name: KAFKA_ADVERTISED_LISTENERS
value: "PLAINTEXT://kafka-server:9092,PLAINTEXT_HOST://212.101.173.161:29092"
- name: KAFKA_CONTROLLER_LISTENER_NAMES
value: "CONTROLLER"
- name: KAFKA_CONTROLLER_QUORUM_VOTERS
value: "1@0.0.0.0:9093"
- name: KAFKA_PROCESS_ROLES
value: "broker,controller"
# volumeMounts:
# configMap:
# - name: configmap-example-1
# mountPath: /tmp
# cmName: configmap-example-1
# defaultMod: 777
traits:
- type: storage
properties:
configMap:
- name: kafka-init
mountPath: /tmp
data:
run_workaround.sh: |-
#!/bin/sh
sed -i '/KAFKA_ZOOKEEPER_CONNECT/d' /etc/confluent/docker/configure
sed -i 's/cub zk-ready/echo ignore zk-ready/' /etc/confluent/docker/ensure
echo "kafka-storage format --ignore-formatted -t NqnEdODVKkiLTfJvqd1uqQ== -c /etc/kafka/kafka.properties" >> /etc/confluent/docker/ensure
/etc/confluent/docker/run
- name: kafka-ui
type: webservice
properties:
image: provectuslabs/kafka-ui:cd9bc43d2e91ef43201494c4424c54347136d9c0
exposeType: NodePort
ports:
- port: 8080
expose: true
nodePort: 30001
cpu: "0.3"
memory: "512Mi"
env:
- name: KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS
value: "kafka-server:9092"
- name: video-capture
type: webservice
properties:
image: registry.ubitech.eu/nebulous/use-cases/surveillance-dsl-demo/video-capture:1.1.0
cpu: "0.2"
memory: "100Mi"
env:
- name: KAFKA_URL
value: "kafka-server:9092"
- name: KAFKA_DETECTION_TOPIC
value: "surveillance"
- name: CAPTURE_VIDEO
value: "False"
- name: CAPTURE_DEVICE
value: "/dev/video0"
- name: DEBUG
value: "True"
- name: HOSTNAME
value: "docker-capture"
volumeMounts:
hostPath:
- name: video
mountPath: "/dev/video1"
path: "/dev/video0"
traits:
- type: affinity
properties:
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: "kubernetes.io/hostname"
operator: "In"
values: ["nebulousk8s-worker-1"]
# devices:
# - /dev/video0:/dev/video0
- name: face-detection
type: webservice
properties:
image: registry.ubitech.eu/nebulous/use-cases/surveillance-dsl-demo/face-detection:1.2.0
edge:
cpu: {faceDetection.edge.cpu in {1.2..3.0} }
memory: {faceDetection.edge.memory in {250..1000} }
env:
- name: KAFKA_URL
value: "kafka-server:9092"
- name: KAFKA_DETECTION_TOPIC
value: "surveillance"
- name: THREADS_COUNT
value: "1"
- name: STORE_METRIC
value: "False"
- name: DEBUG
value: "True"
cloud:
cpu: {faceDetection.cloud.cpu in {3.0..6.0} }
memory: {faceDetection.cloud.memory in {1000..4000} }
env:
- name: KAFKA_URL
value: "kafka-server:9092"
- name: KAFKA_DETECTION_TOPIC
value: "surveillance"
- name: THREADS_COUNT
value: "1"
- name: STORE_METRIC
value: "False"
- name: DEBUG
value: "True"
traits:
- type: affinity
properties:
podAntiAffinity:
required:
- labelSelector:
matchExpressions:
- key: "app.oam.dev/component"
operator: "In"
values: [ "video-capture" ]
topologyKey: "test"
- type: nodePlacement
properties:
cloudWorkers:
count: {faceDetection.cloudWorkers.count in {2..10} }
nodeSelector:
- name: {faceDetection.cloudWorkers.cloud.node.label[ faceDetection.cloudWorkers.cloud.node.count ];
faceDetection.cloudWorkers.cloud.node.count in {1..3} }
value: {faceDetection.cloudWorkers.cloud.node.instances[i] in {0..faceDetection.cloudWorkers.count};
i in {1..faceDetection.cloudWorkers.cloud.node.count} }
edgeWorkers:
count: {faceDetection.edgeWorkers.count in {0..5} }
nodeSelector:
- name: {faceDetection.edgeWorkers.edge.node.label[ faceDetection.cloudWorkers.edge.node.count ];
i in {1..faceDetection.cloudWorkers.edge.node.count} }
value: {faceDetection.edgeWorkers.edge.node.instances[i] in {0..faceDetection.edgeWorkers.count};
i in {1..faceDetection.cloudWorkers.edge.node.count} }
- type: geoLocation
properties:
affinity:
required:
- labelSelector:
- key: "continent"
operator: "In"
values: ["Europe"]
- name: video-player
type: webservice
properties:
image: registry.ubitech.eu/nebulous/use-cases/surveillance-dsl-demo/video-player:1.1.0
exposeType: NodePort
env:
- name: KAFKA_URL
value: "kafka-server:9092"
- name: DEBUG
value: "True"
- name: SERVER_PORT
value: "8081"
ports:
- port: 8081
expose: true
nodePort: 30002

View File

@ -0,0 +1,197 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: surveillance-demo
namespace: default
spec:
components:
- name: kafka-server
type: webservice
properties:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka-server
ports:
- port: 9092
expose: true
- port: 9093
expose: true
- port: 29092
expose: true
cpu: "1"
memory: "2000Mi"
cmd: [ "/bin/bash", "/tmp/run_workaround.sh" ]
env:
- name: KAFKA_NODE_ID
value: "1"
- name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP
value: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT"
- name: KAFKA_LISTENERS
value: "PLAINTEXT://0.0.0.0:9092,PLAINTEXT_HOST://0.0.0.0:29092,CONTROLLER://0.0.0.0:9093"
- name: KAFKA_ADVERTISED_LISTENERS
value: "PLAINTEXT://kafka-server:9092,PLAINTEXT_HOST://212.101.173.161:29092"
- name: KAFKA_CONTROLLER_LISTENER_NAMES
value: "CONTROLLER"
- name: KAFKA_CONTROLLER_QUORUM_VOTERS
value: "1@0.0.0.0:9093"
- name: KAFKA_PROCESS_ROLES
value: "broker,controller"
# volumeMounts:
# configMap:
# - name: configmap-example-1
# mountPath: /tmp
# cmName: configmap-example-1
# defaultMod: 777
traits:
- type: storage
properties:
configMap:
- name: kafka-init
mountPath: /tmp
data:
run_workaround.sh: |-
#!/bin/sh
sed -i '/KAFKA_ZOOKEEPER_CONNECT/d' /etc/confluent/docker/configure
sed -i 's/cub zk-ready/echo ignore zk-ready/' /etc/confluent/docker/ensure
echo "kafka-storage format --ignore-formatted -t NqnEdODVKkiLTfJvqd1uqQ== -c /etc/kafka/kafka.properties" >> /etc/confluent/docker/ensure
/etc/confluent/docker/run
- name: kafka-ui
type: webservice
properties:
image: provectuslabs/kafka-ui:cd9bc43d2e91ef43201494c4424c54347136d9c0
exposeType: NodePort
ports:
- port: 8080
expose: true
nodePort: 30001
cpu: "0.3"
memory: "512Mi"
env:
- name: KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS
value: "kafka-server:9092"
- name: video-capture
type: webservice
properties:
image: registry.ubitech.eu/nebulous/use-cases/surveillance-dsl-demo/video-capture:1.1.0
cpu: "0.2"
memory: "100Mi"
env:
- name: KAFKA_URL
value: "kafka-server:9092"
- name: KAFKA_DETECTION_TOPIC
value: "surveillance"
- name: CAPTURE_VIDEO
value: "False"
- name: CAPTURE_DEVICE
value: "/dev/video0"
- name: DEBUG
value: "True"
- name: HOSTNAME
value: "docker-capture"
volumeMounts:
hostPath:
- name: video
mountPath: "/dev/video1"
path: "/dev/video0"
traits:
- type: affinity
properties:
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: "kubernetes.io/hostname"
operator: "In"
values: ["nebulousk8s-worker-1"]
# devices:
# - /dev/video0:/dev/video0
- name: face-detection
type: webservice
properties:
image: registry.ubitech.eu/nebulous/use-cases/surveillance-dsl-demo/face-detection:1.2.0
edge:
cpu: "1.2"
memory: "512Mi"
env:
- name: KAFKA_URL
value: "kafka-server:9092"
- name: KAFKA_DETECTION_TOPIC
value: "surveillance"
- name: THREADS_COUNT
value: "1"
- name: STORE_METRIC
value: "False"
- name: DEBUG
value: "True"
cloud:
cpu: "1.2"
memory: "512Mi"
env:
- name: KAFKA_URL
value: "kafka-server:9092"
- name: KAFKA_DETECTION_TOPIC
value: "surveillance"
- name: THREADS_COUNT
value: "1"
- name: STORE_METRIC
value: "False"
- name: DEBUG
value: "True"
traits:
- type: affinity
properties:
podAntiAffinity:
required:
- labelSelector:
matchExpressions:
- key: "app.oam.dev/component"
operator: "In"
values: [ "video-capture" ]
topologyKey: "test"
- type: nodePlacement
properties:
cloudWorkers:
count: 6
nodeSelector:
- name: node1
value: 2
- name: node2
value: 1
- name: node3
value: 3
edgeWorkers:
count: 3
nodeSelector:
- name: node4
value: 2
- name: node5
value: 1
- type: geoLocation
properties:
affinity:
required:
- labelSelector:
- key: "continent"
operator: "In"
values: ["Europe"]
- name: video-player
type: webservice
properties:
image: registry.ubitech.eu/nebulous/use-cases/surveillance-dsl-demo/video-player:1.1.0
exposeType: NodePort
env:
- name: KAFKA_URL
value: "kafka-server:9092"
- name: DEBUG
value: "True"
- name: SERVER_PORT
value: "8081"
ports:
- port: 8081
expose: true
nodePort: 30002

15
settings.gradle Normal file
View File

@ -0,0 +1,15 @@
/*
* This file was generated by the Gradle 'init' task.
*
* The settings file is used to specify which projects to include in your build.
* For more detailed information on multi-project builds, please refer to https://docs.gradle.org/8.4/userguide/building_swift_projects.html in the Gradle documentation.
*/
plugins {
// Apply the foojay-resolver plugin to allow automatic download of JDKs
id 'org.gradle.toolchains.foojay-resolver-convention' version '0.7.0'
}
rootProject.name = 'optimiser-controller'
include('optimiser-controller')