aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOleksandr Terentiev <oterenti@cisco.com>2018-11-28 04:38:52 -0800
committerAníbal Limón <anibal.limon@linaro.org>2018-12-13 15:39:06 -0600
commit03e82bcd3d5bb0b00bd27805a62ef23568af5f1d (patch)
treefadf8bf122c32625e8a81d4fe629c7d07464b561
parent10df68a28a6d5e525f743b0c708ba06d13d191fc (diff)
downloadtest-definitions-03e82bcd3d5bb0b00bd27805a62ef23568af5f1d.tar.gz
automated/linux/ptest: Analyze each test in package tests
Currently ptest.py analyze only exit code of each package test to decide if it passed or not. However, ptest-runner can return success code even though some tests failed. So we need to parse test output and analyze it. It also quite useful to see exactly which tests failed. So results are recorded for each particular test, and lava-test-set feature is used to distinguish packages. Test results, https://validation.linaro.org/results/1900356 automated/utils/send-to-lava.sh: Add support to send lava-test-set results Change-Id: Id1c2e8190b9cce5cfa9f08e64322667f8e1a0c28 Signed-off-by: Oleksandr Terentiev <oterenti@cisco.com> Signed-off-by: Aníbal Limón <anibal.limon@linaro.org>
-rwxr-xr-xautomated/linux/ptest/ptest.py56
-rw-r--r--automated/linux/ptest/ptest.yaml2
-rwxr-xr-xautomated/utils/send-to-lava.sh14
3 files changed, 62 insertions, 10 deletions
diff --git a/automated/linux/ptest/ptest.py b/automated/linux/ptest/ptest.py
index 13feb4de..386792fc 100755
--- a/automated/linux/ptest/ptest.py
+++ b/automated/linux/ptest/ptest.py
@@ -85,18 +85,56 @@ def filter_ptests(ptests, requested_ptests, exclude):
return filter_ptests
-def check_ptest(ptest_dir, ptest_name, output_log):
- status = 'pass'
+def parse_line(line):
+ test_status_list = {
+ 'pass': re.compile("^PASS:(.+)"),
+ 'fail': re.compile("^FAIL:(.+)"),
+ 'skip': re.compile("^SKIP:(.+)")
+ }
+
+ for test_status, status_regex in test_status_list.items():
+ test_name = status_regex.search(line)
+ if test_name:
+ return [test_name.group(1), test_status]
- try:
- output = subprocess.check_call('ptest-runner -d %s %s' %
- (ptest_dir, ptest_name), shell=True,
- stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError:
- status = 'fail'
+ return None
+
+
+def run_ptest(command):
+ results = []
+ process = subprocess.Popen(command,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ while True:
+ output = process.stdout.readline()
+ try:
+ output = unicode(output, "utf-8").strip()
+ except:
+ output = output.decode("utf-8").strip()
+ if len(output) == 0 and process.poll() is not None:
+ break
+ if output:
+ print(output)
+ result_tuple = parse_line(output)
+ if result_tuple:
+ results.append(result_tuple)
+
+ rc = process.poll()
+ return rc, results
+
+
+def check_ptest(ptest_dir, ptest_name, output_log):
+ log_name = os.path.join(os.getcwd(), '%s.log' % ptest_name)
+ status, results = run_ptest('ptest-runner -d %s %s' % (ptest_dir, ptest_name))
with open(output_log, 'a+') as f:
- f.write("%s %s\n" % (ptest_name, status))
+ f.write("lava-test-set start %s\n" % ptest_name)
+ f.write("%s %s\n" % (ptest_name, "pass" if status == 0 else "fail"))
+ for test, test_status in results:
+ test = test.encode("ascii", errors="ignore").decode()
+ f.write("%s %s\n" % (re.sub(r'[^\w-]', '', test), test_status))
+ f.write("lava-test-set stop %s\n" % ptest_name)
def main():
diff --git a/automated/linux/ptest/ptest.yaml b/automated/linux/ptest/ptest.yaml
index 854af0b2..6205c112 100644
--- a/automated/linux/ptest/ptest.yaml
+++ b/automated/linux/ptest/ptest.yaml
@@ -21,5 +21,5 @@ params:
run:
steps:
- cd ./automated/linux/ptest
- - ./ptest.py -o ./result.txt -t ${TESTS} -e ${EXCLUDE}
+ - PYTHONIOENCODING=UTF-8 ./ptest.py -o ./result.txt -t ${TESTS} -e ${EXCLUDE}
- ../../utils/send-to-lava.sh ./result.txt
diff --git a/automated/utils/send-to-lava.sh b/automated/utils/send-to-lava.sh
index bf2a4778..fdfdf784 100755
--- a/automated/utils/send-to-lava.sh
+++ b/automated/utils/send-to-lava.sh
@@ -4,6 +4,8 @@ RESULT_FILE="$1"
which lava-test-case > /dev/null 2>&1
lava_test_case="$?"
+which lava-test-set > /dev/null 2>&1
+lava_test_set="$?"
if [ -f "${RESULT_FILE}" ]; then
while read -r line; do
@@ -31,6 +33,18 @@ if [ -f "${RESULT_FILE}" ]; then
else
echo "<TEST_CASE_ID=${test} RESULT=${result} MEASUREMENT=${measurement} UNITS=${units}>"
fi
+ elif echo "${line}" | egrep -iq "^lava-test-set.*"; then
+ test_set_status="$(echo "${line}" | awk '{print $2}')"
+ test_set_name="$(echo "${line}" | awk '{print $3}')"
+ if [ "${lava_test_set}" -eq 0 ]; then
+ lava-test-set "${test_set_status}" "${test_set_name}"
+ else
+ if [ "${test_set_status}" = "start" ]; then
+ echo "<LAVA_SIGNAL_TESTSET START ${test_set_name}>"
+ else
+ echo "<LAVA_SIGNAL_TESTSET STOP>"
+ fi
+ fi
fi
done < "${RESULT_FILE}"
else