aboutsummaryrefslogtreecommitdiff
path: root/litsupport/testplan.py
blob: 0dc80b19bd6c6864faecb86a702fefc6febdfc7b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
"""
Datastructures for test plans; Parsing of .test files; Executing test plans.
"""
from litsupport import shellcommand
import lit.Test
import lit.TestRunner
import logging
import os
import subprocess


class TestPlan(object):
    def __init__(self):
        self.runscript = []
        self.verifyscript = []
        self.metricscripts = {}
        self.metric_collectors = []
        self.preparescript = []
        self.profilescript = []


def mutateScript(context, script, mutator):
    previous_tmpbase = context.tmpBase
    i = 0
    mutated_script = []
    for line in script:
        number = ""
        if len(script) > 1:
            number = "-%s" % (i,)
            i += 1
        context.tmpBase = previous_tmpbase + number

        mutated_line = mutator(context, line)
        mutated_script.append(mutated_line)
    return mutated_script


def _executeScript(context, script, scriptBaseName, useExternalSh=True):
    if len(script) == 0:
        return "", "", 0, None

    if useExternalSh:
        execdir = None
        executeFunc = lit.TestRunner.executeScript
    else:
        execdir = os.getcwd()
        executeFunc = lit.TestRunner.executeScriptInternal

    logging.info("\n".join(script))
    res = executeFunc(context.test, context.litConfig,
                      context.tmpBase + "_" + scriptBaseName, script, execdir)
    # The executeScript() functions return lit.Test.Result in some error
    # conditions instead of the normal tuples. Having different return types is
    # really annoying so we transform it back to the usual tuple.
    if isinstance(res, lit.Test.Result):
        out = ""
        err = res.output
        exitCode = 1
        timeoutInfo = None
    else:
        (out, err, exitCode, timeoutInfo) = res

    # Log script in test output
    context.result_output += "\n" + "\n".join(script)
    # In case of an exitCode != 0 also log stdout/stderr
    if exitCode != 0:
        context.result_output += "\n" + out
        context.result_output += "\n" + err

    logging.info(out)
    logging.info(err)
    if exitCode != 0:
        logging.info("ExitCode: %s" % exitCode)
    return (out, err, exitCode, timeoutInfo)


def check_output(commandline, *aargs, **dargs):
    """Wrapper around subprocess.check_output that logs the command."""
    logging.info(" ".join(commandline))
    return subprocess.check_output(commandline, *aargs, **dargs)


def check_call(commandline, *aargs, **dargs):
    """Wrapper around subprocess.check_call that logs the command."""
    logging.info(" ".join(commandline))
    return subprocess.check_call(commandline, *aargs, **dargs)


def _executePlan(context, plan):
    """This is the main driver for executing a benchmark."""
    # Execute PREPARE: part of the test.
    _, _, exitCode, _ = _executeScript(context, plan.preparescript, "prepare")
    if exitCode != 0:
        return lit.Test.FAIL

    # Execute RUN: part of the test.
    _, _, exitCode, _ = _executeScript(context, plan.runscript, "run")
    if exitCode != 0:
        return lit.Test.FAIL

    # Execute VERIFY: part of the test.
    _, _, exitCode, _ = _executeScript(context, plan.verifyscript, "verify")
    if exitCode != 0:
        # The question here is whether to still collects metrics if the
        # benchmark results are invalid. I choose to avoid getting potentially
        # broken metric values as well for a broken test.
        return lit.Test.FAIL

    # Execute additional profile gathering actions setup by testing modules.
    _, _, exitCode, _ = _executeScript(context, plan.profilescript, "profile")
    if exitCode != 0:
        logging.warning("Profile script '%s' failed", plan.profilescript)

    # Perform various metric extraction steps setup by testing modules.
    for metric_collector in plan.metric_collectors:
        try:
            additional_metrics = metric_collector(context)
            for metric, value in additional_metrics.items():
                context.result_metrics[metric] = value
        except Exception as e:
            logging.error("Could not collect metric with %s", metric_collector,
                          exc_info=e)

    # Execute the METRIC: part of the test.
    for metric, metricscript in plan.metricscripts.items():
        out, err, exitCode, timeoutInfo = _executeScript(context, metricscript,
                                                         "metric")
        if exitCode != 0:
            logging.warning("Metric script for '%s' failed", metric)
            continue
        try:
            value = lit.Test.toMetricValue(float(out))
            context.result_metrics[metric] = value
        except ValueError:
            logging.warning("Metric reported for '%s' is not a float: '%s'",
                            metric, out)

    return lit.Test.PASS


def executePlanTestResult(context, testplan):
    """Convenience function to invoke _executePlan() and construct a
    lit.test.Result() object for the results."""
    context.result_output = ""
    context.result_metrics = {}

    result_code = _executePlan(context, testplan)

    # Build test result object
    result = lit.Test.Result(result_code, context.result_output)
    for key, value in context.result_metrics.items():
        result.addMetric(key, value)
    return result