aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPetre-Ionut Tudor <petre-ionut.tudor@linaro.org>2017-12-01 16:35:43 +0000
committerPetre-Ionut Tudor <petre-ionut.tudor@linaro.org>2018-02-15 18:06:37 +0000
commit3530b755c413ad5593b4832f4fd6f5f5d5206636 (patch)
tree51c47facf4ca56bb89c33005c41a14c22124c2ad
parentf5ac3ea0ddcd0ed5364a6b00d8e322bd50851b8b (diff)
Perf tools: Rewrite the old perf scripts.
Rewrite the old perf scripts to abide by the Google style rules for bash scripts and to offer support for running under a --android-root configuration. Test: manual for now. Change-Id: I620d87144d96eed41801b9adc55fe6964b140950
-rwxr-xr-xbenchmarks/benchmarks_run_target.sh11
-rwxr-xr-xbenchmarks/perf_profile_benchmarks_target.sh270
-rw-r--r--utils/utils_benchmarks_build.sh237
-rw-r--r--utils/utils_perf.sh181
4 files changed, 691 insertions, 8 deletions
diff --git a/benchmarks/benchmarks_run_target.sh b/benchmarks/benchmarks_run_target.sh
index a099f83..ed569e5 100755
--- a/benchmarks/benchmarks_run_target.sh
+++ b/benchmarks/benchmarks_run_target.sh
@@ -20,6 +20,7 @@ source "${local_path}/../utils/utils.sh"
source "${local_path}/../utils/utils_test.sh"
source "${local_path}/../utils/utils_android.sh"
source "${local_path}/../utils/utils_android_root.sh"
+source "${local_path}/../utils/utils_benchmarks_build.sh"
source "${local_path}/../devices/cpu_freq_utils.sh"
readonly timer_name="Target Benchmarks"
@@ -46,15 +47,9 @@ set_default_options() {
validate_options() {
local -r mode="${options["mode"]}"
- if [[ "$mode" != "all" && "$mode" != "32" && "$mode" != "64" ]]; then
- log E "Invalid mode option: $mode"
- exit 1
- fi
+ validate_mode_option "${mode}"
local -r cpu="${options["cpu"]}"
- if [[ "$cpu" != "all" && "$cpu" != "big" && "$cpu" != "little" && "$cpu" != "default" ]]; then
- log E "Invalid CPU option: $cpu"
- exit 1
- fi
+ valdiate_cpu_option "${cpu}"
local -r iterations="${options["iterations"]}"
if [[ ! ${iterations} =~ ^[0-9]+$ ]]; then
log E "Invalid number of iterations: ${iterations}"
diff --git a/benchmarks/perf_profile_benchmarks_target.sh b/benchmarks/perf_profile_benchmarks_target.sh
new file mode 100755
index 0000000..48b113c
--- /dev/null
+++ b/benchmarks/perf_profile_benchmarks_target.sh
@@ -0,0 +1,270 @@
+#!/bin/bash
+#
+# Copyright (c) 2017, Linaro Ltd.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This is an executable script for gathering benchmark performance data on Android devices using
+# perf and adb. The performance data is gathered on the device using perf record and analyzed
+# locally in order to generate annotations of the hotspots in the perf data.
+
+readonly local_path=$(dirname "$0")
+source "${local_path}/../utils/utils.sh"
+source "${local_path}/../utils/utils_test.sh"
+source "${local_path}/../utils/utils_android.sh"
+source "${local_path}/../utils/utils_android_root.sh"
+source "${local_path}/../devices/cpu_freq_utils.sh"
+source "${local_path}/../utils/utils_perf.sh"
+source "${local_path}/../utils/utils_benchmarks_build.sh"
+
+readonly timer_name="Perf Target Benchmarks"
+readonly LOG_DIRECTORY="$(get_workspace)"
+readonly perf_out="./benchmarks/tools/perf/perf-out"
+readonly symbols_folder="${perf_out}/symbols"
+readonly cfg_folder="${perf_out}/cfg"
+readonly structured_sources_folder="${perf_out}/structured_src"
+
+# shellcheck disable=SC2034
+declare -A options_format=(
+ ["help"]="p:usage()"
+ ["h"]="r:&help"
+ ["verbose"]="p:enable_verbose()"
+ ["v"]="r:&verbose"
+ ["cpu"]="all"
+ ["mode"]="all"
+ ["calibration"]="false"
+ ["keep-logs"]="false"
+ ["single-benchmark"]=""
+ ["single-event"]=""
+ ["dalvik-flags"]=""
+ ["custom-counters"]=""
+)
+declare -A options=()
+
+validate_options() {
+ local -r mode="${options[mode]}"
+ validate_mode_option "${mode}"
+
+ local -r cpu="${options[cpu]}"
+ validate_cpu_option "${cpu}"
+
+ local -r single_benchmark="${options[single-benchmark]}"
+ if [[ -n "${single_benchmark}" ]]; then
+ # Check that the benchmark file exists and has the right extension.
+ local -r bench_file="./benchmarks/benchmarks/${single_benchmark}"
+ if [[ ! -f "${bench_file}" || "${bench_file##*.}" != "java" ]]; then
+ log E "File does not exist or has wrong extension: ${single_benchmark}"
+ log E "The file must exist and have a .java extension"
+ exit 1
+ fi
+ fi
+
+ local -r pmu_counters="${options[custom-counters]}"
+ if [[ -n "${pmu_counters}" && ! -f "${pmu_counters}" ]]; then
+ log E "File does not exist: ${pmu_counters}"
+ exit 1
+ fi
+}
+
+usage() {
+ log I "$0"
+ log I "Use this script to gather and analyze performance data for benchmarks."
+ log I "This script must be run from the root directory of the android source tree."
+ log I "This script expects a device to be connected."
+ log I " -h|--help - help"
+ log I " -v|--verbose - verbose"
+ log I "-------------------------------------------"
+ log I " --mode <all|32|64> - Run benchmarks for the specified mode(s)."
+ log I " (default: all)"
+ log I " --cpu <all|big|little|default> - CPU mode."
+ log I " \"big\": Run with only big cores and pin their"
+ log I " frequency."
+ log I " \"little\": Run with only little cores and pin their"
+ log I " frequency."
+ log I " \"all\": With big.LITTLE devices: Run consecutively"
+ log I " with only little cores enabled and pinned, and then"
+ log I " with only big cores."
+ log I " For devices without big.LITTLE, all cores are enabled"
+ log I " and pinned."
+ log I " \"default\": Run with unaltered default CPU"
+ log I " configuration (no pinning)."
+ log I " (default: all)"
+ log I " --calibration - Run benchmarks in calibration mode."
+ log I " (default: don't use calibration)"
+ log I " --keep-logs - Keep the dalvik command lines generated during previous"
+ log I " runs on the target device, for future use."
+ log I " (default: don't keep logs)"
+ log I " --single-benchmark <benchmark> - Run only the specified benchmark."
+ log I " Format: <package>/<benchmark>"
+ log I " Example: micro/intrinsics/Intrinsics.java"
+ log I " (default: run all benchmarks)"
+ log I " --single-event <pmu_event> - Record only the specified event."
+ log I " (default: record all events)"
+ log I " --dalvik-flags <dalvikvm flags> - Use this to pass extra flags to dalvikvm, besides the"
+ log I " flags that are normally used when compiling/running"
+ log I " benchmarks. If your flags contain spaces, use quoting."
+ log I " (default: Use usual benchmark dalvik flags)"
+ log I " --custom-counters <file> - Use PMU counters from a custom file, in addition to"
+ log I " the generic events."
+ log I " (default: Use generic events)"
+ log I "-------------------------------------------"
+ exit 0
+}
+
+# Record performance data for each of the CPUs specified by the user.
+record_for_each_cpu() {
+ local -r bench_list="$1"
+ local -r event_list="$2"
+ local -r cpu="${options[cpu]}"
+ local -r calibration="${options[calibration]}"
+ local -r target_device=$(safe adb_shell getprop ro.product.device)
+ local -r dalvikvm=$(basename "$(get_dalvikvm "${bits}")")
+ local -r cmdline=$(get_dalvik_cmdline "${bits}" "/data/local/tmp" "${dalvik_flags}")
+
+ if [[ "${cpu}" == "default" ]]; then
+ get_benchmarks_performance_data "${bench_list}" "${event_list}" "${cmdline}" \
+ "${perf_out}" "${symbols_folder}" "default-cpu" "${dalvikvm}" "${calibration}"
+ return
+ fi
+
+ exit_on_failure get_device_settings "${target_device}" "${local_path}/../devices/config"
+
+ # Create an associative array to store the names of the CPUs on which we want to record.
+ declare -A cpu_names
+ if ${DEVICE_IS_BIG_LITTLE}; then
+ if [[ "$cpu" == "little" || "$cpu" == "all" ]]; then
+ cpu_names["little"]="${LITTLE_CPUS_NAME}"
+ fi
+ if [[ "$cpu" == "big" || "$cpu" == "all" ]]; then
+ cpu_names["big"]="${BIG_CPUS_NAME}"
+ fi
+ else
+ if [[ "$cpu" == "big" || "$cpu" == "little" ]]; then
+ log E "Options \`--big\` and \`--little\` are only valid for big.LITTLE devices."
+ exit 1
+ fi
+ cpu_names["all"]="${CPUS_NAME}"
+ fi
+
+ # Iterate over the elements, pinning CPU and frequency before recording the benchmarks.
+ local proc
+ for proc in "${!cpu_names[@]}"; do
+ safe "${local_path}/../devices/set_cpu_freq.sh" --"${proc}" --pin-freq
+ get_benchmarks_performance_data "${bench_list}" "${event_list}" "${cmdline}" \
+ "${perf_out}" "${symbols_folder}" "${cpu_names[${proc}]}" "${dalvikvm}" "${calibration}"
+ done
+}
+
+# Pin CPU and frequency and gather performance data for multiple events.
+profile_benchmarks() {
+ local -r single_bench="${options[single-benchmark]}"
+ local -r single_event="${options[single-event]}"
+ local -r custom_pmu_counters="${options[custom-counters]}"
+
+ # Construct the list of benchmarks that will be recorded.
+ local bench_list
+ if [[ -n "${single_bench}" ]]; then
+ bench_list="${single_bench}"
+ else
+ # Searching for all the benchmarks sources and removing the directory name gives us the list
+ # of benchmarks that the `get_benchmarks_performance_data` function expects.
+ bench_list=$(find benchmarks/benchmarks -mindepth 1 -iname "*.java" -printf '%P ')
+ fi
+
+ # Construct the list of events that will be recorded.
+ local event_list
+ if [[ -n "${single_event}" ]]; then
+ event_list="${single_event}"
+ else
+ event_list=$(get_pmu_events_from_file ./benchmarks/tools/perf/config/events-generic.js)
+ if [[ "${custom_pmu_counters}" != "" ]]; then
+ event_list="${event_list} $(get_pmu_events_from_file "${custom_pmu_counters}")"
+ fi
+ fi
+
+ record_for_each_cpu "${bench_list}" "${event_list}"
+}
+
+main() {
+ exit_on_failure arguments_parser options_format options -- "$@"
+ readonly options
+ validate_options
+ dump_options
+
+ local -r mode="${options[mode]}"
+ local -r dalvik_flags="${options[dalvik-flags]}"
+ local -r single_bench="${options[single-benchmark]}"
+ local -r keep_logs="${options[keep-logs]}"
+
+ if android_build_already_setup; then
+ log E "This test does not support environment targets. Please re-run in a clean environment."
+ exit 1
+ fi
+
+ start_test "${timer_name}"
+
+ # TODO: Use unique temporary folders to store the output of each run.
+ safe rm -rf "${perf_out}"
+ local already_setup_sources_folder="false"
+
+ local bits
+ for bits in 32 64; do
+ if [[ "${mode}" != "all" && "${mode}" != "${bits}" ]]; then
+ log I "Skipping ${bits}bit benchmarks."
+ continue
+ fi
+ log I "Starting ${bits}bit benchmarks."
+
+ # Set environment variables.
+ set_environment_target
+
+ build_target "${bits}"
+
+ # Setup & sync device.
+ device_setup "${bits}"
+ if [[ "${keep_logs}" == "true" ]]; then
+ device_cleanup_keep_logs "${bits}"
+ else
+ device_cleanup "${bits}"
+ fi
+ sync_target "${bits}"
+ # Setup a folder which mimics the directory structure that perf expects when reporting.
+ setup_symbols_folder "${symbols_folder}"
+
+ build_benchmarks "${single_bench}"
+
+ # Trigger AOT compilation before running the benchmarks. This makes use of the dalvik-cache
+ # in order to avoid the overhead of compiling the benchmarks when recording them.
+ trigger_aot_compilation "${cfg_folder}" "${bits}" "/data/local/tmp" "${dalvik_flags}" "true"
+
+ # This only needs to be executed once, but it requires environment targets, hence it being
+ # located inside this loop.
+ if [[ "${already_setup_sources_folder}" == "false" ]]; then
+ setup_sources_folder "${structured_sources_folder}"
+ already_setup_sources_folder="true"
+ fi
+
+ # Pull the compiled benchmark so we have debug symbols for it.
+ pull_compiled_benchmark "${bits}" "${symbols_folder}"
+
+ profile_benchmarks
+ done
+
+ # Return the device to its default configuration.
+ device_restore
+
+ end_test "${timer_name}"
+}
+
+main "$@"
diff --git a/utils/utils_benchmarks_build.sh b/utils/utils_benchmarks_build.sh
new file mode 100644
index 0000000..437e7d1
--- /dev/null
+++ b/utils/utils_benchmarks_build.sh
@@ -0,0 +1,237 @@
+#!/bin/bash
+#
+# Copyright (c) 2017, Linaro Ltd.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [[ ! -v JCPU_COUNT ]]; then
+ log E "utils_android.sh must be sourced before utils_benchmarks_build.sh"
+ abort
+fi
+
+# Build benchmarks and push them to the device.
+# $1: Name of a benchmark to build. Empty string causes all benchmarks to be built.
+build_benchmarks() {
+ local -r single_bench="$1"
+ if [[ -n "${single_bench}" ]]; then
+ safe ./benchmarks/build.sh -t -b "./benchmarks/benchmarks/${single_bench}"
+ else
+ safe ./benchmarks/build.sh -t
+ fi
+ # Push benchmarks to the device.
+ safe adb push ./benchmarks/out/build/bench.apk /data/local/tmp
+}
+
+# Return if an art disassembler is available on the device.
+# $1: number of bits.
+has_art_disassembler() {
+ local -r bits=$1
+
+ local lib
+ if [[ "${bits}" == 32 ]]; then
+ lib="lib"
+ else
+ lib="lib64"
+ fi
+
+ adb_shell "ls ${ART_TEST_ANDROID_ROOT}/${lib}/libart-disassembler.so &>/dev/null"
+}
+
+# Prepare the target device for running under android-root.
+# $1: number of bits the target was built for.
+sync_target() {
+ local -r bits="$1"
+ start_adb_section "sync_target_${bits}"
+ safe adb push "${OUT}/system" "${ART_TEST_ANDROID_ROOT}"
+ safe adb push "${OUT}/data/art-test" /data/art-test
+ end_adb_section "sync_target_${bits}" "$?"
+}
+
+# Clear all vestiges of the previous run, but keep other files such as logs.
+device_cleanup_keep_logs() {
+ start_section "device_cleanup_keep_logs_$1"
+ safe adb shell "rm -rf /data/local/tmp/{system,dalvik-cache,oat} /data/art-test /data/native-test"
+ end_section "device_cleanup_keep_logs_$1" "$?"
+}
+
+# Find out what Android version has been last pushed to the device, since dalvik stores compile
+# output in different places depending on it, and return the location of the compiled benchmarks.
+# $1: number of bits the benchmark was built for.
+# $2: build log file - Full path to the log file for the most recent device synchronization for
+# running benchmarks under android-root.
+get_compiled_benchmarks_location() {
+ local -r bits="$1"
+ local -r log_file="$2"
+ if [[ ! -f "${log_file}" ]]; then
+ log E "Log file must exist!"
+ log E "Please synchronize with the target device before using this function."
+ return 1
+ fi
+
+ # Get the last entry of PLATFORM_VERSION in the log file, corresponding to the last time
+ # android-root files were pushed to the target.
+ local -r platform_version=$(grep -oP "PLATFORM_VERSION=\K.*" "${log_file}" | tail -n 1)
+
+ # Determine which architecture corresponds to the supplied number of bits.
+ local arch
+ if [[ "${bits}" == 32 ]]; then
+ arch="arm"
+ else
+ arch="arm64"
+ fi
+
+ case "${platform_version}" in
+ 6\.*|M|7\.*|N)
+ echo "/data/local/tmp/dalvik-cache/${arch}/data@local@tmp@bench.apk@classes.dex"
+ ;;
+ 8\.*|O|9\.*|P)
+ echo "/data/local/tmp/oat/${arch}/bench.odex"
+ ;;
+ *)
+ echo "/data/local/tmp/oat/${arch}/bench.odex"
+ ;;
+ esac
+}
+
+# Pull the compiled benchmark(s) to the folder where perf will look for symbols.
+# $1: architecture.
+# $2: perf symbols folder - full path to the folder on the host machine where perf report will look
+# for symbols.
+pull_compiled_benchmark() {
+ local -r bits="$1"
+ local -r perf_symbols_folder="$2"
+
+ local -r compiled_benchmark=$(safe get_compiled_benchmarks_location "${bits}" \
+ "${ANDROID_BUILD_TOP}/log_build_sync_target_${bits}.txt")
+ local -r bench_remote_dir=$(safe dirname "${compiled_benchmark}")
+
+ safe mkdir -p "${perf_symbols_folder}${bench_remote_dir}"
+ adb pull "${compiled_benchmark}" "${perf_symbols_folder}${bench_remote_dir}"
+}
+
+# Return the absolute path to dalvikvm under android root.
+# $1: number of bits - which variant of dalvikvm is required.
+get_dalvikvm() {
+ local -r bits="$1"
+ echo "${ART_TEST_ANDROID_ROOT}/bin/dalvikvm${bits}"
+}
+
+# Return the command line for using dalvik to compile and run benchmarks.
+# $1: number of bits.
+# $2: apk folder - full path to the directory on the target device where bench.apk is located.
+# $3: custom dalvik flags - extra flags to pass to dalvik.
+get_dalvik_cmdline() {
+ local -r bits="$1"
+ local -r apk_folder="$2"
+ local -r vm_flags="$3"
+
+ local lib="${ART_TEST_ANDROID_ROOT}"
+ if [[ "${bits}" == 32 ]]; then
+ lib+="/lib"
+ else
+ lib+="/lib64"
+ fi
+
+ # Set environment variables for dalvik.
+ local env="ANDROID_DATA=${apk_folder} DEX_LOCATION=${apk_folder}"
+ env="ANDROID_ROOT=${ART_TEST_ANDROID_ROOT} LD_LIBRARY_PATH=${lib} ${env}"
+
+ # Set up dalvik options.
+ local runtime="-Ximage:/data/art-test/core.art"
+ runtime+=" -Xbootclasspath:${ART_TEST_ANDROID_ROOT}/framework/core-libart.jar"
+
+ # See if we can dump the CFG for the benchmarks.
+ local cfg
+ if has_art_disassembler "${bits}"; then
+ local -r vm="dalvikvm${bits}"
+ cfg="-Xcompiler-option -j1 -Xcompiler-option --dump-cfg=${apk_folder}/bench.${vm}.cfg"
+ else
+ cfg=""
+ fi
+
+ # Set up dex2oat options.
+ local -r filter="--compiler-filter=speed"
+ local comp_ops="-Xcompiler-option -g ${cfg} -Xcompiler-option ${filter}"
+ comp_ops+=" -Ximage-compiler-option ${filter}"
+
+ # Put everything together.
+ local -r dalvikvm=$(get_dalvikvm "${bits}")
+ local cmd="env ${env} ${dalvikvm} ${runtime} ${vm_flags} ${comp_ops} "
+ cmd+="-cp ${apk_folder}/bench.apk"
+
+ echo "${cmd}"
+}
+
+# Trigger Ahead Of Time compilation for the specified benchmark(s).
+# $1: cfg folder - full path to the folder on the target device where the CFG for the benchmark
+# should be dumped by dex2oat.
+# $2: number of bits - For how many bits the benchmark(s) should be compiled.
+# $3: apk folder - full path to the directory on the target device where bench.apk is located.
+# $4: custom dalvik flags - extra flags to pass to dalvik.
+# $5: assertion for dalvik cache - true|false. Enable checking that compile output is cached
+# correctly and that the benchmarks will not be compiled again on a subsequent run of dalvik.
+trigger_aot_compilation() {
+ local -r bench_cfg_folder="$1"
+ local -r bits="$2"
+ local -r apk_folder="$3"
+ local -r dalvikvm_flags="$4"
+ local -r check_caching="$5"
+ local -r dalvik_cmdline=$(get_dalvik_cmdline "${bits}" "${apk_folder}" "${dalvikvm_flags}")
+
+ start_adb_section "compile_benchmarks_${bits}"
+ # Use the bench runner with --help option to trigger compilation.
+ safe adb_shell "cd ${apk_folder} && ${dalvik_cmdline} org.linaro.bench.RunBench --help" \
+ > /dev/null
+ safe mkdir -p "${bench_cfg_folder}"
+ if has_art_disassembler "${bits}"; then
+ safe adb pull "${apk_folder}/bench.dalvikvm${bits}.cfg" "${bench_cfg_folder}"
+ else
+ safe touch "${bench_cfg_folder}/bench.dalvikvm${bits}.cfg"
+ fi
+
+ if [[ "${check_caching}" == "true" ]] && has_art_disassembler "${bits}"; then
+ # Remove the cfg file that was generated during compilation step.
+ safe adb_shell "rm ${apk_folder}/bench.dalvikvm${bits}.cfg"
+ # Run the dalvik command again.
+ safe adb_shell "cd ${apk_folder} && ${dalvik_cmdline} org.linaro.bench.RunBench --help" \
+ > /dev/null
+ # Compilation should not have happened again, therefore no cfg file should be present.
+ if adb_shell "[ -f ${apk_folder}/bench.dalvikvm${bits}.cfg ]"; then
+ log E "Dex2oat compile output is not being cached properly."
+ log E "This causes the benchmarks to be recompiled on a subsequent dalvik run."
+ exit 1
+ fi
+ fi
+ end_adb_section "compile_benchmarks_${bits}" "$?"
+}
+
+# Assert that the cpu option is valid.
+validate_cpu_option() {
+ local -r cpu="$1"
+
+ if [[ "${cpu}" != @(all|big|little|default) ]]; then
+ log E "Invalid CPU option: ${cpu}"
+ exit 1
+ fi
+}
+
+# Assert that the mode option is valid.
+validate_mode_option() {
+ local -r mode="$1"
+
+ if [[ "${mode}" != @(all|32|64) ]]; then
+ log E "Invalid mode option: ${mode}"
+ exit 1
+ fi
+}
diff --git a/utils/utils_perf.sh b/utils/utils_perf.sh
new file mode 100644
index 0000000..f5a18f9
--- /dev/null
+++ b/utils/utils_perf.sh
@@ -0,0 +1,181 @@
+#!/bin/bash
+#
+# Copyright (c) 2017, Linaro Ltd.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [[ ! -v JCPU_COUNT ]]; then
+ log E "utils_android.sh must be sourced before utils_perf.sh"
+ abort
+fi
+
+# Perf versions 3.14-3.19 have trouble with the --input|-i option and require that a file named
+# perf.data exists in the current directory. This function returns whether a workaround for this
+# issue is required for the host perf.
+requires_perf_workaround() {
+ local -r host_perf_version="$(safe perf version | grep -Eo '[0-9]+\.[0-9]+')"
+ [[ $(bc <<< "3.14 <= ${host_perf_version} && ${host_perf_version} <= 3.19") -eq 1 ]]
+}
+
+# Return a list of events extracted from a js file of PMU events.
+# $1: PMU events file - path to a file on the host machine containing PMU events.
+get_pmu_events_from_file() {
+ local -r events_file="$1"
+ # Look for the lines with the EventCode field, keeping only the event code. Also, convert
+ # '0x' if found at the beginning of the event code into 'r'.
+ local -r events="$(grep -Po '"EventCode":\s*"\K([[:alnum:]]+-*)+' "${events_file}" \
+ | sed -e 's/^0x/r/')"
+ echo "${events}"
+}
+
+# Use perf to record performance data for a single event for a command supplied by the user.
+# This can be any command, so not just a benchmarks command line.
+# Both the command and perf record run on the target device.
+# $1: command - the command that should be recorded.
+# $2: output folder - full path to the folder on the host machine where the perf data will be
+# pulled to.
+# $3: event - PMU event that perf record should be run for.
+# $4: remote perf.data file - full path to the file on the target device that perf record will
+# write to.
+# $5: command log folder - full path to a folder on the target device for logging the command.
+record_single_event() {
+ local -r cmd="$1"
+ local -r output_folder="$2"
+ local -r event="$3"
+ local -r remote_perf_data="$4"
+ local -r log_folder="$5"
+
+ safe mkdir -p "${output_folder}"
+
+ local perf_cmdline="cd /data/local/tmp && perf record "
+ perf_cmdline+="-g -e ${event} -o ${remote_perf_data} ${cmd}"
+ log I "Record commandline: adb shell \"${perf_cmdline}\""
+
+ # Record the command and pull perf.data to the host machine.
+ safe adb_shell "${perf_cmdline}"
+ safe adb pull "${remote_perf_data}" "${output_folder}/${event}.perf.data"
+
+ # Log the command for future reference/use.
+ # TODO: Consider storing perf.data files here as well as the command line.
+ safe adb_shell "mkdir -p ${log_folder}"
+ safe adb_shell "echo \"${perf_cmdline}\" > ${log_folder}/${event}.txt"
+}
+
+# Record benchmarks running for multiple events and use perf report to extract the information
+# from the perf.data files.
+# $1: list of benchmarks - This is a bash "list", i.e. a string of space-separated elements.
+# $2: list of PMU events to record - bash "list".
+# $3: dalvik command line - dalvik command line to be used for running each benchmark, but without
+# the name of the benchmark.
+# $4: perf output folder - full path to the folder on the host machine where the perf.data and
+# benchmark logs for each benchmark will be put.
+# $5: symbols folder - full path to the folder on the host machine which has all the non-stripped
+# binaries that perf will be looking in for symbols. Folder must exist.
+# $6: cpu - which of the CPUs available on the device will be active when the command is recorded.
+# This option only influences the name(s) of the output subfolder(s) that will be created for
+# each benchmark's data.
+# $7: dalvikvm - Which dalvikvm the data was recorded for: dalvikvm32 or dalvikvm64. Serves the
+# same purpose as cpu option.
+# $8: calibration - Run benchmarks with or without calibration: true|false.
+get_benchmarks_performance_data() {
+ local -r bench_list="$1"
+ local -r events="$2"
+ local -r dalvik_cmdline="$3"
+ local -r perf_out_folder="$4"
+ local -r perf_symbols_folder="$5"
+ local -r cpu="$6"
+ local -r dalvikvm="$7"
+ local -r calibration="$8"
+
+ local aarch64_objdump="${ANDROID_BUILD_TOP}/prebuilts/gcc/linux-x86/aarch64"
+ aarch64_objdump+="/aarch64-linux-android-4.9/bin/aarch64-linux-android-objdump"
+ start_adb_section "get_performance_data_${dalvikvm}_${cpu}"
+
+ local bench
+ for bench in ${bench_list}; do
+ # Remove the .java extension.
+ local benchmark_name="${bench%.java}"
+ # Convert slashes to dots, since ${benchmark_name} will be used to construct the name of the
+ # subfolder where all the data is logged for each benchmark.
+ local converted_benchmark_name="benchmarks.${benchmark_name//\//.}"
+ # Create name for local folder for storing perf data and report logs.
+ local bench_local_folder="${perf_out_folder}/${converted_benchmark_name}_${dalvikvm}_${cpu}"
+ # Create name for remote folder for logging the commands used to run the benchmarks.
+ # TODO: Create a unique folder where the command line should go.
+ local bench_log_folder="/data/local/tmp/${converted_benchmark_name}_${dalvikvm}_${cpu}"
+
+ # Construct the full command to be recorded with perf.
+ local cmd
+ if [[ "${calibration}" == "true" ]]; then
+ cmd="${dalvik_cmdline} org.linaro.bench.RunBench ${benchmark_name}"
+ else
+ cmd="${dalvik_cmdline} ${converted_benchmark_name}"
+ fi
+
+ local event
+ for event in ${events}; do
+ safe record_single_event "${cmd}" "${bench_local_folder}" "${event}" \
+ "/data/local/tmp/perf.data" "${bench_log_folder}"
+ # Construct the string command line for perf report.
+ local perf_cmd=(perf report --show-total-period -i ${bench_local_folder}/${event}.perf.data
+ --objdump=${aarch64_objdump} --symfs ${perf_symbols_folder})
+ # Show the command for future use on the host machine.
+ log I "Report command: ${perf_cmd[*]}"
+ safe "${perf_cmd[@]}" > "${bench_local_folder}/${event}.perf.report"
+ done
+ done
+ end_adb_section "get_performance_data_${dalvikvm}_${cpu}" "$?"
+}
+
+# Set up a folder with binaries on the host machine where perf can look for debug symbols.
+# This function will create a folder structure which matches the one encountered by perf on the
+# device, since perf on host will later expect to find files with the exact paths listed in
+# perf.data.
+# $1: symbols folder - full path to the folder on the host machine which has all the non-stripped
+# binaries that perf will be looking in for symbols.
+setup_symbols_folder() {
+ local -r perf_symbols_folder="$1"
+ local -r aosp_symbols_folder="${ANDROID_PRODUCT_OUT}/symbols"
+
+ start_section "setup_symbols_folder"
+ safe mkdir -p "${perf_symbols_folder}${ART_TEST_ANDROID_ROOT}"
+ safe ls "${perf_symbols_folder}${ART_TEST_ANDROID_ROOT}"
+ log I "ART_TEST_ANDROID_ROOT:${ART_TEST_ANDROID_ROOT}"
+ safe cp -r "${aosp_symbols_folder}"/system/* "${perf_symbols_folder}${ART_TEST_ANDROID_ROOT}"
+ safe cp -r "${OUT}/data/art-test" "${perf_symbols_folder}/data/"
+ end_section "setup_symbols_folder" "$?"
+}
+
+# Copy files or make symbolic links into a folder structure which matches the debug information.
+# It might be a bit ugly to do so. But there are issues with '--prefix' option of
+# aarch64-linux-android-objdump. Note: Steps to identify the issue:
+# 1. Take a look at how debug information is encoded in the symbol files.
+# 2. Try to dump the content with disassembly and source lines.
+# No source code intermixed.
+# 3. Try the host version.
+# Source code intermixed perfectly.
+# $1: structured sources folder - full path to the root folder on the host machine where the folder
+# structure should be created. It is not a requirement that this folder exists before this
+# function is called.
+# TODO: Investigate if this is still required.
+setup_sources_folder() {
+ local -r structured_src_folder="$1"
+
+ start_section "setup_sources_folder"
+ safe mkdir -p "${structured_src_folder}"
+ safe cp -rt "${structured_src_folder}" ./benchmarks/benchmarks ./benchmarks/framework/*
+ safe find "${ANDROID_BUILD_TOP}" -maxdepth 1 -mindepth 1 ! -iname "benchmarks" \
+ -exec ln -s {} "${structured_src_folder}" \;
+ end_section "setup_sources_folder" "$?"
+}