summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorClark Laughlin <clark.laughlin@linaro.org>2015-05-08 14:16:16 -0400
committerClark Laughlin <clark.laughlin@linaro.org>2015-05-08 14:16:16 -0400
commit913ad450b41f081b1e96dc53e9cdd0526ef1caa8 (patch)
treece7d9625cbee9ddb1499d23e857c779777990ee5
parentc15192e81ed99c2398d435a4012c30a04fef2419 (diff)
initial versions
-rw-r--r--lava-pull/run4
-rw-r--r--lava-pull/upload_testresults.py186
2 files changed, 190 insertions, 0 deletions
diff --git a/lava-pull/run b/lava-pull/run
new file mode 100644
index 0000000..85a2a86
--- /dev/null
+++ b/lava-pull/run
@@ -0,0 +1,4 @@
+export LAVA_PULL_BUNDLEROOT=~/lava_pull/bundles/
+export LAVA_PULL_LOGROOT=~/lava_pull/logs/
+
+python lava-pull.py
diff --git a/lava-pull/upload_testresults.py b/lava-pull/upload_testresults.py
new file mode 100644
index 0000000..c7edff4
--- /dev/null
+++ b/lava-pull/upload_testresults.py
@@ -0,0 +1,186 @@
+import couchdb
+import csv
+import argparse
+import subprocess
+import string
+import os
+import StringIO
+import re
+from uuid import uuid4
+import datetime
+
+COUCHDB_USERNAME = 'data_upload'
+COUCHDB_PASSWORD = 'linaro'
+
+# clean up a test id
+def clean_test_id(test_id):
+ if '[' in test_id:
+ test_id = test_id[:test_id.index('[')]
+ return re.sub('[^-0-9A-Za-z_.]', '-', test_id)
+
+
+# extract a "class name" from a test id
+def get_class_name(test_id, class_depth):
+ clean_name = clean_test_id(test_id)
+ words = clean_name.split(".")
+ return ".".join(words[1:class_depth])
+
+
+# convert subunit2csv data to json
+def csv_to_json(input_csv):
+ f = StringIO.StringIO(input_csv)
+ reader = csv.DictReader(f)
+ result = []
+ for row in reader:
+ test_id = row["test"]
+ # clean up the test name (get rid of the [uuid]) part
+ row["test"] = clean_test_id(test_id)
+ # add an entry for the class
+ row["class"] = get_class_name(test_id, 3)
+ result.append(row)
+ return result
+
+
+def get_tests_run(subunit_stream):
+ p1 = subprocess.Popen(["cat", subunit_stream], stdout=subprocess.PIPE)
+ p2 = subprocess.Popen(["subunit2csv", "--no-passthrough"], stdin=p1.stdout, stdout=subprocess.PIPE)
+ p1.stdout.close()
+ output = p2.communicate()[0]
+ return csv_to_json(output)
+
+
+def get_failing_tests(subunit_stream):
+ p1 = subprocess.Popen(["cat", subunit_stream], stdout = subprocess.PIPE)
+ p2 = subprocess.Popen(["subunit-filter", "--only-genuine-failures", "--no-passthrough"], stdin=p1.stdout, stdout=subprocess.PIPE)
+ p3 = subprocess.Popen(["subunit2csv", "--no-passthrough"], stdin=p2.stdout, stdout=subprocess.PIPE)
+ p1.stdout.close()
+ p2.stdout.close()
+ output = p3.communicate()[0]
+ return csv_to_json(output)
+
+
+def get_failing_tests_xml(subunit_stream):
+ p1 = subprocess.Popen(["cat", subunit_stream], stdout = subprocess.PIPE)
+ p2 = subprocess.Popen(["subunit-filter", "--only-genuine-failures", "--passthrough"], stdin=p1.stdout, stdout=subprocess.PIPE)
+ p3 = subprocess.Popen(["subunit2junitxml"], stdin=p2.stdout, stdout=subprocess.PIPE)
+ p1.stdout.close()
+ p2.stdout.close()
+ output = p3.communicate()[0]
+ return output
+
+
+def get_passing_tests(subunit_stream):
+ p1 = subprocess.Popen(["cat", subunit_stream], stdout = subprocess.PIPE)
+ p2 = subprocess.Popen(["subunit-filter", "--no-skip", "--no-failure", "--success", "--no-passthrough"], stdin=p1.stdout, stdout=subprocess.PIPE)
+ p3 = subprocess.Popen(["subunit2csv", "--no-passthrough"], stdin=p2.stdout, stdout=subprocess.PIPE)
+ p1.stdout.close()
+ p2.stdout.close()
+ output = p3.communicate()[0]
+ return csv_to_json(output)
+
+
+def get_skipped_tests(subunit_stream):
+ p1 = subprocess.Popen(["cat", subunit_stream], stdout = subprocess.PIPE)
+ p2 = subprocess.Popen(["subunit-filter", "--no-error", "--no-failure", "--no-success", "--no-xfail", "--no-passthrough"], stdin=p1.stdout, stdout=subprocess.PIPE)
+ p3 = subprocess.Popen(["subunit2csv", "--no-passthrough"], stdin=p2.stdout, stdout=subprocess.PIPE)
+ p1.stdout.close()
+ p2.stdout.close()
+ output = p3.communicate()[0]
+ return csv_to_json(output)
+
+
+def get_all_tests(all_tests):
+ with open(all_tests) as f:
+ return [clean_test_id(line.rstrip('\n')) for line in f]
+
+
+def create_base_document(metadata):
+ doc = dict()
+
+ # test data that needs to come from the bundle stream
+ doc['os_distro'] = 'ubuntu'
+ doc['os_version'] = 'trusty'
+ doc['devstack_branch'] = 'master'
+ doc['lava_job_id'] = '624'
+ doc['networking'] = 'nova-network'
+ doc['lava_bundle_stream'] = 'http://openstack.validation.linaro.org/dashboard/permalink/bundle/1bb4bf811506d95b2e9c09d88d05197750672902/'
+ doc['date'] = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
+
+ # create a document id, and create the base set of tags for searching
+ doc['_id'] = uuid4().hex
+ doc['tags'] = [ doc['os_distro'], doc['os_version'], doc['devstack_branch'], doc['networking'] ]
+ return doc
+
+
+
+def create_test_run_document(metadata, log_files, all_tests, tests_run, skipped_tests, failing_tests, passing_tests):
+ doc = create_base_document(metadata)
+ doc['$type'] = 'test-run'
+
+ summary_stats = dict()
+ summary_stats['total_tests'] = len(all_tests)
+ summary_stats['tests_run'] = len(tests_run)
+ summary_stats['skipped_tests'] = len(skipped_tests)
+ summary_stats['passing_tests'] = len(passing_tests)
+ summary_stats['failing_tests'] = len(failing_tests)
+ doc['summary'] = summary_stats
+ doc['discovered_tests'] = all_tests
+
+ server = couchdb.Server()
+ server.resource.credentials = (COUCHDB_USERNAME, COUCHDB_PASSWORD)
+ doc_id, rev_id = server['tempest_results'].save(doc)
+ print "test run summary document created [%s, %s]" % (doc_id, rev_id)
+
+ return doc_id
+
+
+def create_test_document_helper(server, metadata, summary_doc_id, test):
+ doc = create_base_document(metadata)
+ doc['$type'] = 'test'
+ doc['$test_run'] = summary_doc_id
+ doc['tags'].append(test['status'])
+ doc.update(test)
+ doc_id, rev_id = server['tempest_results'].save(doc)
+ return doc_id, rev_id
+
+
+
+def create_test_documents(metadata, summary_doc_id, skipped_tests, failing_tests, passing_tests):
+ server = couchdb.Server()
+ server.resource.credentials = (COUCHDB_USERNAME, COUCHDB_PASSWORD)
+ print "creating passing test documents:"
+ for t in passing_tests:
+ doc_id, rev_id = create_test_document_helper(server, metadata, summary_doc_id, t)
+ print ": [%s, %s]" % (doc_id, rev_id)
+ print "creating failing test documents:"
+ for t in failing_tests:
+ doc_id, rev_id = create_test_document_helper(server, metadata, summary_doc_id, t)
+ print ": [%s, %s]" % (doc_id, rev_id)
+ print "creating skipped test documents:"
+ for t in skipped_tests:
+ doc_id, rev_id = create_test_document_helper(server, metadata, summary_doc_id, t)
+ print ": [%s, %s]" % (doc_id, rev_id)
+
+
+
+# parse arguments, then process the data
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--stream', help='subunit result stream', required=True)
+parser.add_argument('--all_tests', help='text file containing list of all discovered tests', \
+ required=True)
+args = parser.parse_args()
+
+print "subunit stream = %s" % args.stream
+print "discovered tests list = %s" % args.all_tests
+
+
+failing_tests = get_failing_tests(args.stream)
+passing_tests = get_passing_tests(args.stream)
+skipped_tests = get_skipped_tests(args.stream)
+all_tests = get_all_tests(args.all_tests)
+tests_run = get_tests_run(args.stream)
+
+doc_id = create_test_run_document(None, None, all_tests, tests_run, skipped_tests, failing_tests, passing_tests)
+create_test_documents(None, doc_id, skipped_tests, failing_tests, passing_tests)
+