aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthias Braun <matze@braunis.de>2017-09-20 03:00:03 +0000
committerMatthias Braun <matze@braunis.de>2017-09-20 03:00:03 +0000
commit1849eb07b7fe95165bcf79504b0bdd6be73c6a88 (patch)
treec1fc94f2d0deaf52a4e02a3f016edfb3562a7595
parent293105474bcb147dde25457ad42e16b0bcbed0fe (diff)
litsupport: Mark file-local functions as such
Start local functions with an underscore `_` for clarity and so that other modules cannot (easily) import them. git-svn-id: https://llvm.org/svn/llvm-project/test-suite/trunk@313709 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--litsupport/perf.py4
-rw-r--r--litsupport/profilegen.py8
-rw-r--r--litsupport/remote.py10
-rw-r--r--litsupport/run_under.py4
-rw-r--r--litsupport/testplan.py20
-rw-r--r--litsupport/timeit.py8
6 files changed, 27 insertions, 27 deletions
diff --git a/litsupport/perf.py b/litsupport/perf.py
index a139285d..4913b956 100644
--- a/litsupport/perf.py
+++ b/litsupport/perf.py
@@ -6,7 +6,7 @@ from litsupport import run_under
import lit.Test
-def mutateCommandLine(context, commandline):
+def _mutateCommandLine(context, commandline):
profilefile = context.tmpBase + ".perf_data"
cmd = shellcommand.parse(commandline)
cmd.wrap('perf', [
@@ -31,7 +31,7 @@ def mutatePlan(context, plan):
if context.config.run_under:
script = testplan.mutateScript(context, script,
run_under.mutateCommandLine)
- script = testplan.mutateScript(context, script, mutateCommandLine)
+ script = testplan.mutateScript(context, script, _mutateCommandLine)
plan.profilescript += script
plan.metric_collectors.append(
lambda context: {
diff --git a/litsupport/profilegen.py b/litsupport/profilegen.py
index e76f4f34..6322cae4 100644
--- a/litsupport/profilegen.py
+++ b/litsupport/profilegen.py
@@ -3,7 +3,7 @@ from litsupport import shellcommand
from litsupport import testplan
-def mutateCommandline(context, commandline):
+def _mutateCommandline(context, commandline):
"""Adjust runscript to set a different value to the LLVM_PROFILE_FILE
environment variable for each execution."""
profilefile = context.tmpBase + ".profraw"
@@ -12,14 +12,14 @@ def mutateCommandline(context, commandline):
return prefix + commandline
-def mutateScript(context, script):
- return testplan.mutateScript(context, script, mutateCommandline)
+def _mutateScript(context, script):
+ return testplan.mutateScript(context, script, _mutateCommandline)
def mutatePlan(context, plan):
context.profilefiles = []
# Adjust run steps to set LLVM_PROFILE_FILE
- plan.runscript = mutateScript(context, plan.runscript)
+ plan.runscript = _mutateScript(context, plan.runscript)
# Run profdata merge at the end
profdatafile = context.executable + ".profdata"
args = ['merge', '-output=%s' % profdatafile] + context.profilefiles
diff --git a/litsupport/remote.py b/litsupport/remote.py
index 0d6fc480..887ce0c1 100644
--- a/litsupport/remote.py
+++ b/litsupport/remote.py
@@ -5,7 +5,7 @@ from litsupport import testplan
import logging
-def mutateCommandline(context, commandline, suffix=""):
+def _mutateCommandline(context, commandline, suffix=""):
shfilename = context.tmpBase + suffix + ".sh"
shfile = open(shfilename, "w")
shfile.write(commandline + "\n")
@@ -25,11 +25,11 @@ def mutateCommandline(context, commandline, suffix=""):
return remote_commandline
-def mutateScript(context, script, suffix=""):
- mutate = lambda c, cmd: mutateCommandline(c, cmd, suffix)
+def _mutateScript(context, script, suffix=""):
+ mutate = lambda c, cmd: _mutateCommandline(c, cmd, suffix)
return testplan.mutateScript(context, script, mutate)
def mutatePlan(context, plan):
- plan.preparescript = mutateScript(context, plan.preparescript, "-prepare")
- plan.runscript = mutateScript(context, plan.runscript)
+ plan.preparescript = _mutateScript(context, plan.preparescript, "-prepare")
+ plan.runscript = _mutateScript(context, plan.runscript)
diff --git a/litsupport/run_under.py b/litsupport/run_under.py
index b22c0de6..e4edfcd0 100644
--- a/litsupport/run_under.py
+++ b/litsupport/run_under.py
@@ -4,7 +4,7 @@ from litsupport import shellcommand
from litsupport import testplan
-def mutateCommandLine(context, commandline):
+def _mutateCommandLine(context, commandline):
cmd = shellcommand.parse(commandline)
run_under_cmd = shellcommand.parse(context.config.run_under)
@@ -24,4 +24,4 @@ def mutatePlan(context, plan):
run_under = context.config.run_under
if run_under:
plan.runscript = testplan.mutateScript(context, plan.runscript,
- mutateCommandLine)
+ _mutateCommandLine)
diff --git a/litsupport/testplan.py b/litsupport/testplan.py
index a2aa2ec3..0dc80b19 100644
--- a/litsupport/testplan.py
+++ b/litsupport/testplan.py
@@ -35,7 +35,7 @@ def mutateScript(context, script, mutator):
return mutated_script
-def executeScript(context, script, scriptBaseName, useExternalSh=True):
+def _executeScript(context, script, scriptBaseName, useExternalSh=True):
if len(script) == 0:
return "", "", 0, None
@@ -86,20 +86,20 @@ def check_call(commandline, *aargs, **dargs):
return subprocess.check_call(commandline, *aargs, **dargs)
-def executePlan(context, plan):
+def _executePlan(context, plan):
"""This is the main driver for executing a benchmark."""
# Execute PREPARE: part of the test.
- _, _, exitCode, _ = executeScript(context, plan.preparescript, "prepare")
+ _, _, exitCode, _ = _executeScript(context, plan.preparescript, "prepare")
if exitCode != 0:
return lit.Test.FAIL
# Execute RUN: part of the test.
- _, _, exitCode, _ = executeScript(context, plan.runscript, "run")
+ _, _, exitCode, _ = _executeScript(context, plan.runscript, "run")
if exitCode != 0:
return lit.Test.FAIL
# Execute VERIFY: part of the test.
- _, _, exitCode, _ = executeScript(context, plan.verifyscript, "verify")
+ _, _, exitCode, _ = _executeScript(context, plan.verifyscript, "verify")
if exitCode != 0:
# The question here is whether to still collects metrics if the
# benchmark results are invalid. I choose to avoid getting potentially
@@ -107,7 +107,7 @@ def executePlan(context, plan):
return lit.Test.FAIL
# Execute additional profile gathering actions setup by testing modules.
- _, _, exitCode, _ = executeScript(context, plan.profilescript, "profile")
+ _, _, exitCode, _ = _executeScript(context, plan.profilescript, "profile")
if exitCode != 0:
logging.warning("Profile script '%s' failed", plan.profilescript)
@@ -123,8 +123,8 @@ def executePlan(context, plan):
# Execute the METRIC: part of the test.
for metric, metricscript in plan.metricscripts.items():
- out, err, exitCode, timeoutInfo = executeScript(context, metricscript,
- "metric")
+ out, err, exitCode, timeoutInfo = _executeScript(context, metricscript,
+ "metric")
if exitCode != 0:
logging.warning("Metric script for '%s' failed", metric)
continue
@@ -139,12 +139,12 @@ def executePlan(context, plan):
def executePlanTestResult(context, testplan):
- """Convenience function to invoke executePlan() and construct a
+ """Convenience function to invoke _executePlan() and construct a
lit.test.Result() object for the results."""
context.result_output = ""
context.result_metrics = {}
- result_code = executePlan(context, testplan)
+ result_code = _executePlan(context, testplan)
# Build test result object
result = lit.Test.Result(result_code, context.result_output)
diff --git a/litsupport/timeit.py b/litsupport/timeit.py
index 49816a11..7852dd44 100644
--- a/litsupport/timeit.py
+++ b/litsupport/timeit.py
@@ -4,7 +4,7 @@ import lit.Test
import re
-def mutateCommandLine(context, commandline):
+def _mutateCommandLine(context, commandline):
outfile = context.tmpBase + ".out"
timefile = context.tmpBase + ".time"
config = context.config
@@ -47,10 +47,10 @@ def mutateCommandLine(context, commandline):
return cmd.toCommandline()
-def mutateScript(context, script):
+def _mutateScript(context, script):
if not hasattr(context, "timefiles"):
context.timefiles = []
- return testplan.mutateScript(context, script, mutateCommandLine)
+ return testplan.mutateScript(context, script, _mutateCommandLine)
def _collectTime(context, timefiles, metric_name='exec_time'):
@@ -64,7 +64,7 @@ def mutatePlan(context, plan):
if len(plan.runscript) == 0:
return
context.timefiles = []
- plan.runscript = mutateScript(context, plan.runscript)
+ plan.runscript = _mutateScript(context, plan.runscript)
plan.metric_collectors.append(
lambda context: _collectTime(context, context.timefiles)
)