Fix snmp trap (Coldstart, Warmstart, or other Fault Managment alarm traps) behaviour on IPv6 environments.
Modifications have been made in the containers of the snmp-armada-app, and deployment and values files of helm chart package. The following changes are made: 1. Bootstrap.sh scripts of snmp trap-subagent and sub-agent were modified to waits until the master agent container is up, to intialize them. 2. Add an environment variable to set the maxium numer or tries until raise a communication error and restart the pod. 3. Add other environment variables for testing purpose that can be modified with a helm override, like the master agent address and port. 4. Add an init_container in the snmp pod thats applies a delay when de pod starts, to solve ColdStart traps missing. Closes-Bug: 1943612 Signed-off-by: Jorge Saffe <jorge.saffe@windriver.com> Change-Id: I2cd4a6309e99ba73257253077dc13b3b725043dd
This commit is contained in:
parent
c6c16c0682
commit
f7de093305
@ -15,8 +15,34 @@ if [ "$OPTIONDEBUG" != "" ]; then
|
||||
esac
|
||||
fi
|
||||
|
||||
# Waiting master agent initialization
|
||||
if [ "$MASTER_AGENT_HOST" == "" ]; then
|
||||
MASTER_AGENT_HOST="localhost";
|
||||
fi
|
||||
|
||||
if [ "$MASTER_AGENT_PORT" == "" ]; then
|
||||
MASTER_AGENT_PORT=705;
|
||||
fi
|
||||
|
||||
if [ "$MASTER_AGENT_CONNECTION_RETRIES" == "" ]; then
|
||||
MASTER_AGENT_CONNECTION_RETRIES=20;
|
||||
fi
|
||||
|
||||
HOST=$MASTER_AGENT_HOST
|
||||
PORT=$MASTER_AGENT_PORT
|
||||
RETRIES=$MASTER_AGENT_CONNECTION_RETRIES
|
||||
|
||||
echo "Waiting master agent initialization ($HOST:$PORT) [MaxRetries:$RETRIES]"
|
||||
counter=0
|
||||
until </dev/tcp/$HOST/$PORT; do
|
||||
sleep 1;
|
||||
[[ counter -eq $RETRIES ]] && exit 1;
|
||||
echo "Trying again, try #$counter";
|
||||
$((counter++));
|
||||
done
|
||||
|
||||
# Internal params
|
||||
RUN_CMD="./snmpSubAgent ${OPTIONDEBUG} -f -x tcp:localhost:705"
|
||||
RUN_CMD="./snmpSubAgent ${OPTIONDEBUG} -f -x tcp:${HOST}:$PORT"
|
||||
|
||||
# Launch
|
||||
$RUN_CMD
|
||||
|
@ -15,8 +15,34 @@ if [ "$OPTIONDEBUG" != "" ]; then
|
||||
esac
|
||||
fi
|
||||
|
||||
# Waiting master agent initialization
|
||||
if [ "$MASTER_AGENT_HOST" == "" ]; then
|
||||
MASTER_AGENT_HOST="localhost";
|
||||
fi
|
||||
|
||||
if [ "$MASTER_AGENT_PORT" == "" ]; then
|
||||
MASTER_AGENT_PORT=705;
|
||||
fi
|
||||
|
||||
if [ "$MASTER_AGENT_CONNECTION_RETRIES" == "" ]; then
|
||||
MASTER_AGENT_CONNECTION_RETRIES=20;
|
||||
fi
|
||||
|
||||
HOST=$MASTER_AGENT_HOST
|
||||
PORT=$MASTER_AGENT_PORT
|
||||
RETRIES=$MASTER_AGENT_CONNECTION_RETRIES
|
||||
|
||||
echo "Waiting master agent initialization ($HOST:$PORT) [MaxRetries:$RETRIES]"
|
||||
counter=0
|
||||
until </dev/tcp/$HOST/$PORT; do
|
||||
sleep 1;
|
||||
[[ counter -eq $RETRIES ]] && exit 1;
|
||||
echo "Trying again, try #$counter";
|
||||
$((counter++));
|
||||
done
|
||||
|
||||
# Internal params
|
||||
RUN_CMD="./wrsAlarmMIB ${OPTIONDEBUG} -f -x tcp:localhost:705"
|
||||
RUN_CMD="./wrsAlarmMIB ${OPTIONDEBUG} -f -x tcp:${HOST}:$PORT"
|
||||
|
||||
# Launch
|
||||
$RUN_CMD
|
||||
|
@ -68,6 +68,12 @@ spec:
|
||||
env:
|
||||
- name: "OPTIONDEBUG"
|
||||
value: {{ .Values.image.debug }}
|
||||
- name: "MASTER_AGENT_HOST"
|
||||
value: {{ .Values.deployment.master_agent_host | quote}}
|
||||
- name: "MASTER_AGENT_PORT"
|
||||
value: {{ .Values.deployment.master_agent_port | quote}}
|
||||
- name: "MASTER_AGENT_CONNECTION_RETRIES"
|
||||
value: {{ .Values.deployment.master_agent_connection_retries | quote}}
|
||||
# Fail on liveness and readiness as here: https://bit.ly/3nVRQrL
|
||||
# K8s issue related: https://github.com/kubernetes/kubernetes/issues/81713
|
||||
#
|
||||
@ -104,6 +110,12 @@ spec:
|
||||
env:
|
||||
- name: "OPTIONDEBUG"
|
||||
value: {{ .Values.image.debug }}
|
||||
- name: "MASTER_AGENT_HOST"
|
||||
value: {{ .Values.deployment.master_agent_host | quote}}
|
||||
- name: "MASTER_AGENT_PORT"
|
||||
value: {{ .Values.deployment.master_agent_port | quote}}
|
||||
- name: "MASTER_AGENT_CONNECTION_RETRIES"
|
||||
value: {{ .Values.deployment.master_agent_connection_retries | quote}}
|
||||
# Fail on liveness and readiness as here: https://bit.ly/3nVRQrL
|
||||
# K8s issue related: https://github.com/kubernetes/kubernetes/issues/81713
|
||||
#
|
||||
@ -123,7 +135,6 @@ spec:
|
||||
# timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
|
||||
# successThreshold: {{ .Values.readinessProbe.successThreshold }}
|
||||
# failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
|
||||
|
||||
volumes:
|
||||
- name: snmpd-etc-volume
|
||||
configMap:
|
||||
|
@ -33,6 +33,8 @@ configmap:
|
||||
|
||||
deployment:
|
||||
master_agent_host: localhost
|
||||
master_agent_port: 705
|
||||
master_agent_connection_retries: 20
|
||||
|
||||
service:
|
||||
name: snmpd-service
|
||||
|
Loading…
x
Reference in New Issue
Block a user