aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrian Homerding <homerdin@gmail.com>2019-04-04 15:12:08 +0000
committerBrian Homerding <homerdin@gmail.com>2019-04-04 15:12:08 +0000
commited2fd6e52bbfe4a5f2844300bb99777eab1907c1 (patch)
tree6fb2e445824923da4e5138f1a4454151d8f1e92e
parent923d23538052a07d85d9a90f9e9e7fecd03e6e75 (diff)
[test-suite] Update test-suite microbenchmarks to use JSON (fix bug 41327)
Google benchmark is hoping to drop the CSV output format. This updates the microbenchmark module to use the JSON output. This fixes PR41327 Reviewers: lebedev.ri https://reviews.llvm.org/D60205 git-svn-id: https://llvm.org/svn/llvm-project/test-suite/trunk@357704 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--litsupport/modules/microbenchmark.py22
1 files changed, 11 insertions, 11 deletions
diff --git a/litsupport/modules/microbenchmark.py b/litsupport/modules/microbenchmark.py
index 5cef7457..c7b9c3d2 100644
--- a/litsupport/modules/microbenchmark.py
+++ b/litsupport/modules/microbenchmark.py
@@ -1,17 +1,17 @@
'''Test module to collect google benchmark results.'''
from litsupport import shellcommand
from litsupport import testplan
-import csv
+import json
import lit.Test
def _mutateCommandLine(context, commandline):
cmd = shellcommand.parse(commandline)
- cmd.arguments.append("--benchmark_format=csv")
+ cmd.arguments.append("--benchmark_format=json")
# We need stdout outself to get the benchmark csv data.
if cmd.stdout is not None:
raise Exception("Rerouting stdout not allowed for microbenchmarks")
- benchfile = context.tmpBase + '.bench.csv'
+ benchfile = context.tmpBase + '.bench.json'
cmd.stdout = benchfile
context.microbenchfiles.append(benchfile)
@@ -25,18 +25,18 @@ def _mutateScript(context, script):
def _collectMicrobenchmarkTime(context, microbenchfiles):
for f in microbenchfiles:
content = context.read_result_file(context, f)
- lines = csv.reader(content.splitlines())
- # First line: "name,iterations,real_time,cpu_time,time_unit..."
- for line in lines:
- if line[0] == 'name':
- continue
+ data = json.loads(content)
+
+ # Create a micro_result for each benchmark
+ for benchmark in data['benchmarks']:
# Name for MicroBenchmark
- name = line[0]
+ name = benchmark['name']
+
# Create Result object with PASS
microBenchmark = lit.Test.Result(lit.Test.PASS)
- # Index 3 is cpu_time
- exec_time_metric = lit.Test.toMetricValue(float(line[3]))
+ # Add the exec_time metric for this result
+ exec_time_metric = lit.Test.toMetricValue(benchmark['cpu_time'])
microBenchmark.addMetric('exec_time', exec_time_metric)
# Add Micro Result