Deployment fixes
- Better logging during app deployment phase. - Minimize use of "quotes" in generated yaml. - Use `component=yes` instead of `component=true` when labeling nodes - Introduce flag whether to include nebulous-specific node requirements, or only those expressed in the kubevela file itself. Change-Id: Id0ac97538942819f36d324da764a7b7f2c7f944d
This commit is contained in:
parent
dfe6479a43
commit
cf552733b6
@ -90,7 +90,8 @@ public class KubevelaAnalyzer {
|
||||
*
|
||||
* Notes:<p>
|
||||
*
|
||||
* - We add the requirement that OS family == Ubuntu.<p>
|
||||
* - When asked to, we add the requirement that OS family == Ubuntu and
|
||||
* memory >= 2GB.<p>
|
||||
*
|
||||
* - For the first version, we specify all requirements as "greater or
|
||||
* equal", i.e., we might not find precisely the node candidates that
|
||||
@ -103,18 +104,24 @@ public class KubevelaAnalyzer {
|
||||
* provided by cloud providers. <p>
|
||||
*
|
||||
* @param kubevela the parsed KubeVela file.
|
||||
* @param includeNebulousRequirements if true, include requirements for
|
||||
* minimum memory size, Ubuntu OS. These requirements ensure that the
|
||||
* node candidate can run the Nebulous software.
|
||||
* @return a map of component name to (potentially empty, except for OS
|
||||
* family) list of requirements for that component. No requirements mean
|
||||
* any node will suffice.
|
||||
*/
|
||||
public static Map<String, List<Requirement>> getRequirements(JsonNode kubevela) {
|
||||
public static Map<String, List<Requirement>> getRequirements(JsonNode kubevela, boolean includeNebulousRequirements) {
|
||||
Map<String, List<Requirement>> result = new HashMap<>();
|
||||
ArrayNode components = kubevela.withArray("/spec/components");
|
||||
for (final JsonNode c : components) {
|
||||
String componentName = c.get("name").asText();
|
||||
ArrayList<Requirement> reqs = new ArrayList<>();
|
||||
if (includeNebulousRequirements) {
|
||||
reqs.add(new AttributeRequirement("image", "operatingSystem.family",
|
||||
RequirementOperator.IN, OperatingSystemFamily.UBUNTU.toString()));
|
||||
reqs.add(new AttributeRequirement("hardware", "ram", RequirementOperator.GEQ, "2048"));
|
||||
}
|
||||
JsonNode cpu = c.at("/properties/cpu");
|
||||
if (cpu.isMissingNode()) cpu = c.at("/properties/resources/requests/cpu");
|
||||
if (!cpu.isMissingNode()) {
|
||||
@ -125,8 +132,7 @@ public class KubevelaAnalyzer {
|
||||
try {
|
||||
kubevela_cpu = Double.parseDouble(cpu.asText());
|
||||
} catch (NumberFormatException e) {
|
||||
log.warn("CPU spec in {} is not a number, value seen is {}",
|
||||
componentName, cpu.asText());
|
||||
log.warn("CPU spec in " + componentName + " is not a number, value seen is " + cpu.asText());
|
||||
}
|
||||
long sal_cores = Math.round(Math.ceil(kubevela_cpu));
|
||||
if (sal_cores > 0) {
|
||||
@ -134,22 +140,19 @@ public class KubevelaAnalyzer {
|
||||
RequirementOperator.GEQ, Long.toString(sal_cores)));
|
||||
} else {
|
||||
// floatValue returns 0.0 if node is not numeric
|
||||
log.warn("CPU of component {} is 0 or not a number, value seen is {}",
|
||||
componentName, cpu.asText());
|
||||
log.warn("CPU spec in " + componentName + " is not a number, value seen is " + cpu.asText());
|
||||
}
|
||||
}
|
||||
JsonNode memory = c.at("/properties/memory");
|
||||
if (memory.isMissingNode()) cpu = c.at("/properties/resources/requests/memory");
|
||||
if (!memory.isMissingNode()) {;
|
||||
if (!memory.isMissingNode()) {
|
||||
String sal_memory = memory.asText();
|
||||
if (sal_memory.endsWith("Mi")) {
|
||||
sal_memory = sal_memory.substring(0, sal_memory.length() - 2);
|
||||
} else if (sal_memory.endsWith("Gi")) {
|
||||
sal_memory = String.valueOf(Integer.parseInt(sal_memory.substring(0, sal_memory.length() - 2)) * 1024);
|
||||
} else if (!memory.isNumber()) {
|
||||
log.warn("Unsupported memory specification in component {} :{} (wanted 'Mi' or 'Gi') ",
|
||||
componentName,
|
||||
memory.asText());
|
||||
log.warn("Unsupported memory specification in component " + componentName + " : " + memory.asText() + " (wanted 'Mi' or 'Gi') ");
|
||||
sal_memory = null;
|
||||
}
|
||||
// Fall-through: we rewrote the KubeVela file and didn't add
|
||||
@ -169,6 +172,17 @@ public class KubevelaAnalyzer {
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get node requirements for app components, including nebulous-specific
|
||||
* requirements. This method calls {@link #getRequirements(JsonNode,
|
||||
* boolean)} with second parameter {@code true}.
|
||||
*
|
||||
* @see #getRequirements(JsonNode, boolean)
|
||||
*/
|
||||
public static Map<String, List<Requirement>> getRequirements(JsonNode kubevela) {
|
||||
return getRequirements(kubevela, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract node requirements from a KubeVela file.
|
||||
*
|
||||
|
@ -512,7 +512,7 @@ public class ExnConnector {
|
||||
"body", mapper.writeValueAsString(body));
|
||||
} catch (JsonProcessingException e) {
|
||||
log.error("Could not convert JSON to string (this should never happen)",
|
||||
keyValue("appId", appID), e);
|
||||
keyValue("appId", appID), keyValue("clusterName", clusterName), e);
|
||||
return -1;
|
||||
}
|
||||
Map<String, Object> response = deployApplication.sendSync(msg, appID, null, false);
|
||||
|
@ -21,6 +21,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.node.ArrayNode;
|
||||
import com.fasterxml.jackson.databind.node.ObjectNode;
|
||||
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
|
||||
import com.fasterxml.jackson.dataformat.yaml.YAMLGenerator;
|
||||
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import static net.logstash.logback.argument.StructuredArguments.keyValue;
|
||||
@ -33,7 +34,8 @@ import static net.logstash.logback.argument.StructuredArguments.keyValue;
|
||||
@Slf4j
|
||||
public class NebulousAppDeployer {
|
||||
|
||||
private static final ObjectMapper yamlMapper = new ObjectMapper(new YAMLFactory());
|
||||
private static final ObjectMapper yamlMapper
|
||||
= new ObjectMapper(YAMLFactory.builder().enable(YAMLGenerator.Feature.MINIMIZE_QUOTES).build());
|
||||
private static final ObjectMapper mapper = new ObjectMapper();
|
||||
|
||||
/**
|
||||
@ -53,7 +55,7 @@ public class NebulousAppDeployer {
|
||||
* Produce a fresh KubeVela specification with added node affinity traits.
|
||||
*
|
||||
* During deployment and redeployment, we label all nodes with {@code
|
||||
* nebulouscloud.eu/<componentname>=true}. (Note that with this scheme, a
|
||||
* nebulouscloud.eu/<componentname>=yes}. (Note that with this scheme, a
|
||||
* node can have labels for multiple components if desired.) We add the
|
||||
* following trait to all components:
|
||||
*
|
||||
@ -67,7 +69,7 @@ public class NebulousAppDeployer {
|
||||
* - matchExpressions:
|
||||
* - key: "nebulouscloud.eu/<componentname>"
|
||||
* operator: In
|
||||
* values: "true"
|
||||
* values: "yes"
|
||||
* }</pre>
|
||||
*
|
||||
* @param kubevela the KubeVela specification to modify. This parameter is
|
||||
@ -86,7 +88,7 @@ public class NebulousAppDeployer {
|
||||
ObjectNode term = matchExpressions.addObject();
|
||||
term.put("key", "nebulouscloud.eu/" + name)
|
||||
.put("operator", "In")
|
||||
.withArray("values").add("true");
|
||||
.withArray("values").add("yes");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -252,7 +254,7 @@ public class NebulousAppDeployer {
|
||||
app.getNodeEdgeCandidates().put(nodeName, candidate);
|
||||
}
|
||||
clusterNodes.put(nodeName, candidate);
|
||||
nodeLabels.addObject().put(nodeName, "nebulouscloud.eu/" + componentName + "=true");
|
||||
nodeLabels.addObject().put(nodeName, "nebulouscloud.eu/" + componentName + "=yes");
|
||||
nodeNames.add(nodeName);
|
||||
}
|
||||
app.getComponentNodeNames().put(componentName, nodeNames);
|
||||
@ -315,6 +317,11 @@ public class NebulousAppDeployer {
|
||||
} catch (InterruptedException e1) {
|
||||
// ignore
|
||||
}
|
||||
// TODO: distinguish between clusterState==null because SAL hasn't
|
||||
// set up its datastructures yet, and clusterState==null because
|
||||
// the call to getCluster failed. In the latter case we want to
|
||||
// abort (because someone has deleted the cluster), in the former
|
||||
// case we want to continue.
|
||||
clusterState = conn.getCluster(clusterName);
|
||||
}
|
||||
|
||||
@ -342,6 +349,8 @@ public class NebulousAppDeployer {
|
||||
|
||||
log.info("Calling deployApplication", keyValue("appId", appUUID), keyValue("clusterName", clusterName));
|
||||
long proActiveJobID = conn.deployApplication(appUUID, clusterName, app.getName(), rewritten_kubevela);
|
||||
log.info("deployApplication returned ProActive Job ID {}", proActiveJobID,
|
||||
keyValue("appId", appUUID), keyValue("clusterName", clusterName));
|
||||
if (proActiveJobID == 0) {
|
||||
// 0 means conversion from long has failed (because of an invalid
|
||||
// response), OR a ProActive job id of 0.
|
||||
@ -358,6 +367,8 @@ public class NebulousAppDeployer {
|
||||
app.setComponentRequirements(componentRequirements);
|
||||
app.setComponentReplicaCounts(nodeCounts);
|
||||
app.setDeployedKubevela(rewritten);
|
||||
log.info("App deployment finished.",
|
||||
keyValue("appId", appUUID), keyValue("clusterName", clusterName));
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
x
Reference in New Issue
Block a user