aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthias Braun <matze@braunis.de>2017-09-20 03:00:05 +0000
committerMatthias Braun <matze@braunis.de>2017-09-20 03:00:05 +0000
commit3af4342e5aca73cef4ee1135b5534d22959ab300 (patch)
treed8df00172ab959d755dc00010d509908f398cb28
parent377b64f29431407e4dfe6da6a6c5180b81f73808 (diff)
litsupport: Extend documentation
- Write a paragraph about testing modules and how to enable/disable them. - Write a paragraph on developing new testing modules. - Improve API documentation for the testplan object and related functions. git-svn-id: https://llvm.org/svn/llvm-project/test-suite/trunk@313711 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--litsupport/README.md49
-rw-r--r--litsupport/testplan.py37
2 files changed, 72 insertions, 14 deletions
diff --git a/litsupport/README.md b/litsupport/README.md
index 1347b957..0d43527b 100644
--- a/litsupport/README.md
+++ b/litsupport/README.md
@@ -3,7 +3,7 @@ Introduction
This is the benchmark runner for llvm test-suite. It is only available when the
test-suite was built with cmake. It runs benchmarks, checks the results and
-collects various metrics such as runtime, compiletime or code size.
+collects various metrics such as runtime, compile time or code size.
The runner is implemented as a custom test format for the llvm-lit tool.
@@ -48,3 +48,50 @@ install `lit` from the python package index. Examples:
# Run benchmarks with reduced lit output but save collected metrics to json
# file. This format is used by LNT or viewable by test-suite/utils/compare.py.
$ llvm-lit . -o result.json -s
+
+Testing Modules
+===============
+
+The benchmark runner behaviour is defined and enhanced by testing modules.
+Testing modules can modify the command lines to run the benchmark and register
+callbacks to collect metrics.
+
+The list of modules is defined in the `lit.site.cfg` file
+(`config.test_modules`) which in turn is generated by cmake. The module list is
+influenced by a number of cmake flags. For a complete list consult the cmake
+code; typical examples are:
+
+- `cmake -DTEST_SUITE_RUN_BENCHMARKS=Off` removes the `run` module, so no
+ benchmarks are actually run; this is useful as code size data, compiletime, or
+ compilation statistics can be collected anyway.
+- `cmake -DTEST_SUITE_RUN_UNDER=qemu` enable the `run_under` module and will
+ prefix all benchmark invocations with the specified command (here: `qemu`).
+- `cmake -DTEST_SUITE_REMOTE_HOST=xxx` enabled the `remote` module that uses
+ ssh to run benchmarks on a remote device (assuming shared file systems).
+- `cmake -DTEST_SUITE_PROFILE_GENERATE` compiles benchmark with
+ `-fprofile-instr-generate` and enables the `profilegen` module that runs
+ `llvm-profdata` after running the benchmarks.
+
+Developing New Modules
+----------------------
+
+Testing modules consist of a python module with a `mutatePlan(context, plan)`
+function. Modules can:
+
+- Modify the scripts used to execute the benchmark (`plan.preparescript`,
+ `plan.runscript`, `plan.verifyscript`). A script is a list of strings with
+ shell commands. Modifying the list is best done via the testplan.mutateScript
+ function which sets `context.tmpBase` to a unique for the command to be
+ modified. The `shellcommand` module helps analyzing and modifying posix shell
+ command lines.
+
+- Append a function to the `plan.metric_collectors` list. The metric collector
+ functions are executed after the benchmark scripts ran. Metrics must be
+ returned as a dictionary with int, float or string values (anything supported
+ by `lit.Test.toMetricValue()`).
+
+- The `context` object is passed to all testing modules and metric collectors
+ and may be used to communicate information between them. `context.executable`
+ contains the path to the benchmark executable; `context.config` contains the
+ lit configuration coming from a combination of `lit.site.cfg`, `lit.cfg` and
+ `lit.local.cfg` files for the benchmark.
diff --git a/litsupport/testplan.py b/litsupport/testplan.py
index 7b066a7f..08919fd0 100644
--- a/litsupport/testplan.py
+++ b/litsupport/testplan.py
@@ -10,6 +10,12 @@ import subprocess
class TestPlan(object):
+ """Describes how to execute a benchmark and how to collect metrics.
+ A script is a list of strings containing shell commands. The available
+ scripts are: preparescript, runscript, verifyscript, profilescript,
+ metricscripts and are executed in this order.
+ metric_collectors contains a list of functions executed after the scripts
+ finished."""
def __init__(self):
self.runscript = []
self.verifyscript = []
@@ -20,6 +26,10 @@ class TestPlan(object):
def mutateScript(context, script, mutator):
+ """Apply `mutator` function to every command in the `script` array of
+ strings. The mutator function is called with `context` and the string to
+ be mutated and must return the modified string. Sets `context.tmpBase`
+ to a path unique to every command."""
previous_tmpbase = context.tmpBase
i = 0
mutated_script = []
@@ -36,6 +46,7 @@ def mutateScript(context, script, mutator):
def _executeScript(context, script, scriptBaseName, useExternalSh=True):
+ """Execute an array of strings with shellcommands (a script)."""
if len(script) == 0:
return "", "", 0, None
@@ -74,20 +85,8 @@ def _executeScript(context, script, scriptBaseName, useExternalSh=True):
return (out, err, exitCode, timeoutInfo)
-def check_output(commandline, *aargs, **dargs):
- """Wrapper around subprocess.check_output that logs the command."""
- logging.info(" ".join(commandline))
- return subprocess.check_output(commandline, *aargs, **dargs)
-
-
-def check_call(commandline, *aargs, **dargs):
- """Wrapper around subprocess.check_call that logs the command."""
- logging.info(" ".join(commandline))
- return subprocess.check_call(commandline, *aargs, **dargs)
-
-
def _executePlan(context, plan):
- """This is the main driver for executing a benchmark."""
+ """Executes a test plan (a TestPlan object)."""
# Execute PREPARE: part of the test.
_, _, exitCode, _ = _executeScript(context, plan.preparescript, "prepare")
if exitCode != 0:
@@ -152,3 +151,15 @@ def executePlanTestResult(context, testplan):
for key, value in context.result_metrics.items():
result.addMetric(key, value)
return result
+
+
+def check_output(commandline, *aargs, **dargs):
+ """Wrapper around subprocess.check_output that logs the command."""
+ logging.info(" ".join(commandline))
+ return subprocess.check_output(commandline, *aargs, **dargs)
+
+
+def check_call(commandline, *aargs, **dargs):
+ """Wrapper around subprocess.check_call that logs the command."""
+ logging.info(" ".join(commandline))
+ return subprocess.check_call(commandline, *aargs, **dargs)