
Fixed two issues: 1. Issue where the default context for initiating each ptp service context was incorrectly being copied by reference. On startup, the initial context for each ptp service was set to default values with a copy-by-reference resulting in updates propagating to each instance when they shouldn't. Corrected the initialization to copy by value instead. This fixes an issue where subscription notifications were reporting the wrong status, ie. if one ptp4l instance is Freerun, all ptp4l instances were being updated to Freerun. Fix ensures that only the affected instance is updated. 2. Nodes running as master now change out of locked state when their ports are FAULTY. The original logic did not consider the port state for master nodes. Added check to change the ptp4l lock status when ports enter a fault state. Test Plan: PASS: Build and deploy ptp-notification app PASS: Multiple ptp instance contexts are updated independently and notifications now report correct state. PASS: Master nodes transition their ptp4l status when ports go faulty. Story: 2010056 Task: 46417 Signed-off-by: Cole Walker <cole.walker@windriver.com> Change-Id: I3d2bfa62efeefc5aca282d80eccc5d825d7ef900
112 lines
3.6 KiB
Python
112 lines
3.6 KiB
Python
#! /usr/bin/python3
|
|
#
|
|
# Copyright (c) 2021-2022 Wind River Systems, Inc.
|
|
#
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
#
|
|
|
|
#
|
|
# This script provides the PTP synchronization status
|
|
# for PTP NIC configured as subordinate (slave mode)
|
|
# It relies on Linux ptp4l (PMC) module in order to work
|
|
# Sync status provided as: 'Locked', 'Holdover', 'Freerun'
|
|
#
|
|
#
|
|
import errno, os
|
|
import os.path
|
|
import re
|
|
import sys
|
|
import subprocess
|
|
import datetime
|
|
import logging
|
|
from trackingfunctionsdk.common.helpers import constants
|
|
from trackingfunctionsdk.common.helpers import log_helper
|
|
|
|
LOG = logging.getLogger(__name__)
|
|
log_helper.config_logger(LOG)
|
|
|
|
|
|
# run subprocess and returns out, err, errcode
|
|
def run_shell2(dir, ctx, args):
|
|
cwd = os.getcwd()
|
|
os.chdir(dir)
|
|
|
|
process = subprocess.Popen(args, shell=True,
|
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
out, err = process.communicate()
|
|
errcode = process.returncode
|
|
|
|
os.chdir(cwd)
|
|
|
|
return out, err, errcode
|
|
|
|
|
|
def check_critical_resources(ptp4l_service_name, phc2sys_service_name):
|
|
pmc = False
|
|
ptp4l = False
|
|
phc2sys = False
|
|
ptp4lconf = False
|
|
|
|
if os.path.isfile('/usr/sbin/pmc'):
|
|
pmc = True
|
|
if os.path.isfile('/var/run/ptp4l-%s.pid' % ptp4l_service_name):
|
|
ptp4l = True
|
|
if os.path.isfile('/var/run/phc2sys-%s.pid' % phc2sys_service_name):
|
|
phc2sys = True
|
|
if os.path.isfile('/ptp/ptpinstance/ptp4l-%s.conf' % ptp4l_service_name):
|
|
ptp4lconf = True
|
|
return pmc, ptp4l, phc2sys, ptp4lconf
|
|
|
|
|
|
def check_results(result, total_ptp_keywords, port_count):
|
|
# sync state is in 'Locked' state and will be overwritten if
|
|
# it is not the case
|
|
sync_state = constants.LOCKED_PHC_STATE
|
|
|
|
local_gm = False
|
|
|
|
# check for a healthy result
|
|
if len(result) != total_ptp_keywords:
|
|
sync_state = constants.FREERUN_PHC_STATE
|
|
LOG.warning('results are not complete, returning FREERUN')
|
|
return sync_state
|
|
# determine the current sync state
|
|
if (result[constants.GM_PRESENT].lower() != constants.GM_IS_PRESENT
|
|
and result[constants.GRANDMASTER_IDENTITY] != result[constants.CLOCK_IDENTITY]):
|
|
sync_state = constants.FREERUN_PHC_STATE
|
|
elif result[constants.GRANDMASTER_IDENTITY] == result[constants.CLOCK_IDENTITY]:
|
|
local_gm = True
|
|
LOG.debug("Local node is a GM")
|
|
for port in range(1, port_count + 1):
|
|
if result[constants.PORT.format(port)].lower() == constants.SLAVE_MODE:
|
|
break
|
|
elif local_gm and result[constants.PORT.format(port)].lower() == constants.MASTER_MODE:
|
|
break
|
|
else:
|
|
sync_state = constants.FREERUN_PHC_STATE
|
|
if (result[constants.TIME_TRACEABLE] != constants.TIME_IS_TRACEABLE1
|
|
and result[constants.TIME_TRACEABLE].lower != constants.TIME_IS_TRACEABLE2):
|
|
sync_state = constants.FREERUN_PHC_STATE
|
|
if (result[constants.GM_CLOCK_CLASS] not in
|
|
[constants.CLOCK_CLASS_VALUE1,
|
|
constants.CLOCK_CLASS_VALUE2,
|
|
constants.CLOCK_CLASS_VALUE3]):
|
|
sync_state = constants.FREERUN_PHC_STATE
|
|
return sync_state
|
|
|
|
|
|
def parse_resource_address(resource_address):
|
|
# The format of resource address is:
|
|
# /{clusterName}/{siteName}(/optional/hierarchy/..)/{nodeName}/{resource}
|
|
# Assume no optional hierarchy for now
|
|
clusterName = resource_address.split('/')[1]
|
|
nodeName = resource_address.split('/')[2]
|
|
resource_path = '/' + re.split('[/]', resource_address, 3)[3]
|
|
return clusterName, nodeName, resource_path
|
|
|
|
|
|
def format_resource_address(node_name, resource):
|
|
# Return a resource_address
|
|
resource_address = '/./' + node_name + resource
|
|
return resource_address
|