aboutsummaryrefslogtreecommitdiff
path: root/MicroBenchmarks
diff options
context:
space:
mode:
authorEizan Miyamoto <eizan.miyamoto@gmail.com>2017-11-17 00:16:06 +0000
committerEizan Miyamoto <eizan.miyamoto@gmail.com>2017-11-17 00:16:06 +0000
commit74debdfdd744cd4285d9e6d68b81312ee2fbd71d (patch)
treec535f1ecf64349eebd5cf926c50baaef93b27e70 /MicroBenchmarks
parent4034660caf4eed93c23b3fdd214bdfaba4b58aa5 (diff)
[XRay][test-suite] Upgrade Google benchmark library
Summary: This change allows us to use an updated idiom for defining microbenchmarks. Version being upgraded from 1.2.0 to 1.3.0. Reviewers: dberris Reviewed By: dberris Subscribers: dschuff, mgorny, javed.absar, krytarowski, llvm-commits, fedor.sergeev Differential Revision: https://reviews.llvm.org/D40154 git-svn-id: https://llvm.org/svn/llvm-project/test-suite/trunk@318476 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'MicroBenchmarks')
-rw-r--r--MicroBenchmarks/libs/CMakeLists.txt2
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/stat.h310
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/.clang-format (renamed from MicroBenchmarks/libs/benchmark-1.2.0/.clang-format)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/.gitignore (renamed from MicroBenchmarks/libs/benchmark-1.2.0/.gitignore)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/.travis-libcxx-setup.sh (renamed from MicroBenchmarks/libs/benchmark-1.2.0/.travis-libcxx-setup.sh)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/.travis.yml (renamed from MicroBenchmarks/libs/benchmark-1.2.0/.travis.yml)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/.ycm_extra_conf.py (renamed from MicroBenchmarks/libs/benchmark-1.2.0/.ycm_extra_conf.py)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/AUTHORS (renamed from MicroBenchmarks/libs/benchmark-1.2.0/AUTHORS)9
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/CMakeLists.txt (renamed from MicroBenchmarks/libs/benchmark-1.2.0/CMakeLists.txt)26
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/CONTRIBUTING.md (renamed from MicroBenchmarks/libs/benchmark-1.2.0/CONTRIBUTING.md)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/CONTRIBUTORS (renamed from MicroBenchmarks/libs/benchmark-1.2.0/CONTRIBUTORS)12
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/LICENSE (renamed from MicroBenchmarks/libs/benchmark-1.2.0/LICENSE)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/README.md (renamed from MicroBenchmarks/libs/benchmark-1.2.0/README.md)212
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/appveyor.yml (renamed from MicroBenchmarks/libs/benchmark-1.2.0/appveyor.yml)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/cmake/AddCXXCompilerFlag.cmake (renamed from MicroBenchmarks/libs/benchmark-1.2.0/cmake/AddCXXCompilerFlag.cmake)2
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/cmake/CXXFeatureCheck.cmake (renamed from MicroBenchmarks/libs/benchmark-1.2.0/cmake/CXXFeatureCheck.cmake)30
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/cmake/Config.cmake.in (renamed from MicroBenchmarks/libs/benchmark-1.2.0/cmake/Config.cmake.in)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/cmake/GetGitVersion.cmake (renamed from MicroBenchmarks/libs/benchmark-1.2.0/cmake/GetGitVersion.cmake)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/cmake/gnu_posix_regex.cpp (renamed from MicroBenchmarks/libs/benchmark-1.2.0/cmake/gnu_posix_regex.cpp)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/cmake/posix_regex.cpp (renamed from MicroBenchmarks/libs/benchmark-1.2.0/cmake/posix_regex.cpp)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/cmake/std_regex.cpp (renamed from MicroBenchmarks/libs/benchmark-1.2.0/cmake/std_regex.cpp)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/cmake/steady_clock.cpp (renamed from MicroBenchmarks/libs/benchmark-1.2.0/cmake/steady_clock.cpp)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/cmake/thread_safety_attributes.cpp (renamed from MicroBenchmarks/libs/benchmark-1.2.0/cmake/thread_safety_attributes.cpp)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/docs/tools.md (renamed from MicroBenchmarks/libs/benchmark-1.2.0/docs/tools.md)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/include/benchmark/benchmark.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/benchmark.h)227
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/include/benchmark/benchmark_api.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/benchmark_api.h)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/include/benchmark/reporter.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/reporter.h)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/mingw.py (renamed from MicroBenchmarks/libs/benchmark-1.2.0/mingw.py)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/CMakeLists.txt (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/CMakeLists.txt)46
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/arraysize.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/arraysize.h)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark.cc)29
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark_api_internal.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_api_internal.h)1
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark_register.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_register.cc)15
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/check.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/check.h)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/colorprint.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.cc)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/colorprint.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.h)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/commandlineflags.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.cc)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/commandlineflags.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.h)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/complexity.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.cc)104
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/complexity.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.h)7
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/console_reporter.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/console_reporter.cc)2
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/counter.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/counter.cc)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/counter.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/counter.h)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/csv_reporter.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/csv_reporter.cc)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/cycleclock.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/cycleclock.h)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/internal_macros.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/internal_macros.h)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/json_reporter.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/json_reporter.cc)25
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/log.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/log.h)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/mutex.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/mutex.h)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/re.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/re.h)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/reporter.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/reporter.cc)1
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/sleep.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.cc)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/sleep.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.h)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/statistics.cc175
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/statistics.h37
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/string_util.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.cc)12
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/string_util.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.h)2
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/sysinfo.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.cc)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/sysinfo.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.h)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/timers.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/timers.cc)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/src/timers.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/src/timers.h)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/CMakeLists.txt (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/CMakeLists.txt)20
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/basic_test.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/basic_test.cc)35
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/benchmark_test.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/benchmark_test.cc)38
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/complexity_test.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/complexity_test.cc)10
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/cxx03_test.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/cxx03_test.cc)15
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/diagnostics_test.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/diagnostics_test.cc)18
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/donotoptimize_test.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/donotoptimize_test.cc)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/filter_test.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/filter_test.cc)10
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/fixture_test.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/fixture_test.cc)4
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/map_test.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/map_test.cc)7
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/multiple_ranges_test.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/multiple_ranges_test.cc)6
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/options_test.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/options_test.cc)14
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/output_test.h (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/output_test.h)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/output_test_helper.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/output_test_helper.cc)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/register_benchmark_test.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/register_benchmark_test.cc)8
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/reporter_output_test.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/reporter_output_test.cc)139
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/skip_with_error_test.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/skip_with_error_test.cc)34
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/templated_fixture_test.cc28
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/user_counters_tabular_test.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/user_counters_tabular_test.cc)30
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/test/user_counters_test.cc (renamed from MicroBenchmarks/libs/benchmark-1.2.0/test/user_counters_test.cc)40
-rwxr-xr-xMicroBenchmarks/libs/benchmark-1.3.0/tools/compare_bench.py (renamed from MicroBenchmarks/libs/benchmark-1.2.0/tools/compare_bench.py)11
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/Inputs/test1_run1.json (renamed from MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/Inputs/test1_run1.json)44
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/Inputs/test1_run2.json (renamed from MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/Inputs/test1_run2.json)48
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/__init__.py (renamed from MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/__init__.py)0
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/report.py (renamed from MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/report.py)33
-rw-r--r--MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/util.py (renamed from MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/util.py)0
87 files changed, 1153 insertions, 725 deletions
diff --git a/MicroBenchmarks/libs/CMakeLists.txt b/MicroBenchmarks/libs/CMakeLists.txt
index 55a70ddf..a75b9e34 100644
--- a/MicroBenchmarks/libs/CMakeLists.txt
+++ b/MicroBenchmarks/libs/CMakeLists.txt
@@ -1,3 +1,3 @@
-add_subdirectory(benchmark-1.2.0)
+add_subdirectory(benchmark-1.3.0)
test_suite_add_build_dependencies(benchmark)
test_suite_add_build_dependencies(output_test_helper)
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/stat.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/stat.h
deleted file mode 100644
index d356875b..00000000
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/stat.h
+++ /dev/null
@@ -1,310 +0,0 @@
-#ifndef BENCHMARK_STAT_H_
-#define BENCHMARK_STAT_H_
-
-#include <cmath>
-#include <limits>
-#include <ostream>
-#include <type_traits>
-
-namespace benchmark {
-
-template <typename VType, typename NumType>
-class Stat1;
-
-template <typename VType, typename NumType>
-class Stat1MinMax;
-
-typedef Stat1<float, int64_t> Stat1_f;
-typedef Stat1<double, int64_t> Stat1_d;
-typedef Stat1MinMax<float, int64_t> Stat1MinMax_f;
-typedef Stat1MinMax<double, int64_t> Stat1MinMax_d;
-
-template <typename VType>
-class Vector2;
-template <typename VType>
-class Vector3;
-template <typename VType>
-class Vector4;
-
-template <typename VType, typename NumType>
-class Stat1 {
- public:
- typedef Stat1<VType, NumType> Self;
-
- Stat1() { Clear(); }
- // Create a sample of value dat and weight 1
- explicit Stat1(const VType &dat) {
- sum_ = dat;
- sum_squares_ = Sqr(dat);
- numsamples_ = 1;
- }
- // Create statistics for all the samples between begin (included)
- // and end(excluded)
- explicit Stat1(const VType *begin, const VType *end) {
- Clear();
- for (const VType *item = begin; item < end; ++item) {
- (*this) += Stat1(*item);
- }
- }
- // Create a sample of value dat and weight w
- Stat1(const VType &dat, const NumType &w) {
- sum_ = w * dat;
- sum_squares_ = w * Sqr(dat);
- numsamples_ = w;
- }
- // Copy operator
- Stat1(const Self &stat) {
- sum_ = stat.sum_;
- sum_squares_ = stat.sum_squares_;
- numsamples_ = stat.numsamples_;
- }
-
- void Clear() {
- numsamples_ = NumType();
- sum_squares_ = sum_ = VType();
- }
-
- Self &operator=(const Self &stat) {
- sum_ = stat.sum_;
- sum_squares_ = stat.sum_squares_;
- numsamples_ = stat.numsamples_;
- return (*this);
- }
- // Merge statistics from two sample sets.
- Self &operator+=(const Self &stat) {
- sum_ += stat.sum_;
- sum_squares_ += stat.sum_squares_;
- numsamples_ += stat.numsamples_;
- return (*this);
- }
- // The operation opposite to +=
- Self &operator-=(const Self &stat) {
- sum_ -= stat.sum_;
- sum_squares_ -= stat.sum_squares_;
- numsamples_ -= stat.numsamples_;
- return (*this);
- }
- // Multiply the weight of the set of samples by a factor k
- Self &operator*=(const VType &k) {
- sum_ *= k;
- sum_squares_ *= k;
- numsamples_ *= k;
- return (*this);
- }
-
- // Merge statistics from two sample sets.
- Self operator+(const Self &stat) const { return Self(*this) += stat; }
-
- // The operation opposite to +
- Self operator-(const Self &stat) const { return Self(*this) -= stat; }
-
- // Multiply the weight of the set of samples by a factor k
- Self operator*(const VType &k) const { return Self(*this) *= k; }
-
- // Return the total weight of this sample set
- NumType numSamples() const { return numsamples_; }
-
- // Return the sum of this sample set
- VType Sum() const { return sum_; }
-
- // Return the mean of this sample set
- VType Mean() const {
- if (numsamples_ == 0) return VType();
- return sum_ * (1.0 / numsamples_);
- }
-
- // Return the mean of this sample set and compute the standard deviation at
- // the same time.
- VType Mean(VType *stddev) const {
- if (numsamples_ == 0) return VType();
- VType mean = sum_ * (1.0 / numsamples_);
- if (stddev) {
- // Sample standard deviation is undefined for n = 1
- if (numsamples_ == 1) {
- *stddev = VType();
- } else {
- VType avg_squares = sum_squares_ * (1.0 / numsamples_);
- *stddev = Sqrt(numsamples_ / (numsamples_ - 1.0) * (avg_squares - Sqr(mean)));
- }
- }
- return mean;
- }
-
- // Return the standard deviation of the sample set
- VType StdDev() const {
- VType stddev = VType();
- Mean(&stddev);
- return stddev;
- }
-
- private:
- static_assert(std::is_integral<NumType>::value &&
- !std::is_same<NumType, bool>::value,
- "NumType must be an integral type that is not bool.");
- // Let i be the index of the samples provided (using +=)
- // and weight[i],value[i] be the data of sample #i
- // then the variables have the following meaning:
- NumType numsamples_; // sum of weight[i];
- VType sum_; // sum of weight[i]*value[i];
- VType sum_squares_; // sum of weight[i]*value[i]^2;
-
- // Template function used to square a number.
- // For a vector we square all components
- template <typename SType>
- static inline SType Sqr(const SType &dat) {
- return dat * dat;
- }
-
- template <typename SType>
- static inline Vector2<SType> Sqr(const Vector2<SType> &dat) {
- return dat.MulComponents(dat);
- }
-
- template <typename SType>
- static inline Vector3<SType> Sqr(const Vector3<SType> &dat) {
- return dat.MulComponents(dat);
- }
-
- template <typename SType>
- static inline Vector4<SType> Sqr(const Vector4<SType> &dat) {
- return dat.MulComponents(dat);
- }
-
- // Template function used to take the square root of a number.
- // For a vector we square all components
- template <typename SType>
- static inline SType Sqrt(const SType &dat) {
- // Avoid NaN due to imprecision in the calculations
- if (dat < 0) return 0;
- return sqrt(dat);
- }
-
- template <typename SType>
- static inline Vector2<SType> Sqrt(const Vector2<SType> &dat) {
- // Avoid NaN due to imprecision in the calculations
- return Max(dat, Vector2<SType>()).Sqrt();
- }
-
- template <typename SType>
- static inline Vector3<SType> Sqrt(const Vector3<SType> &dat) {
- // Avoid NaN due to imprecision in the calculations
- return Max(dat, Vector3<SType>()).Sqrt();
- }
-
- template <typename SType>
- static inline Vector4<SType> Sqrt(const Vector4<SType> &dat) {
- // Avoid NaN due to imprecision in the calculations
- return Max(dat, Vector4<SType>()).Sqrt();
- }
-};
-
-// Useful printing function
-template <typename VType, typename NumType>
-std::ostream &operator<<(std::ostream &out, const Stat1<VType, NumType> &s) {
- out << "{ avg = " << s.Mean() << " std = " << s.StdDev()
- << " nsamples = " << s.NumSamples() << "}";
- return out;
-}
-
-// Stat1MinMax: same as Stat1, but it also
-// keeps the Min and Max values; the "-"
-// operator is disabled because it cannot be implemented
-// efficiently
-template <typename VType, typename NumType>
-class Stat1MinMax : public Stat1<VType, NumType> {
- public:
- typedef Stat1MinMax<VType, NumType> Self;
-
- Stat1MinMax() { Clear(); }
- // Create a sample of value dat and weight 1
- explicit Stat1MinMax(const VType &dat) : Stat1<VType, NumType>(dat) {
- max_ = dat;
- min_ = dat;
- }
- // Create statistics for all the samples between begin (included)
- // and end(excluded)
- explicit Stat1MinMax(const VType *begin, const VType *end) {
- Clear();
- for (const VType *item = begin; item < end; ++item) {
- (*this) += Stat1MinMax(*item);
- }
- }
- // Create a sample of value dat and weight w
- Stat1MinMax(const VType &dat, const NumType &w)
- : Stat1<VType, NumType>(dat, w) {
- max_ = dat;
- min_ = dat;
- }
- // Copy operator
- Stat1MinMax(const Self &stat) : Stat1<VType, NumType>(stat) {
- max_ = stat.max_;
- min_ = stat.min_;
- }
-
- void Clear() {
- Stat1<VType, NumType>::Clear();
- if (std::numeric_limits<VType>::has_infinity) {
- min_ = std::numeric_limits<VType>::infinity();
- max_ = -std::numeric_limits<VType>::infinity();
- } else {
- min_ = std::numeric_limits<VType>::max();
- max_ = std::numeric_limits<VType>::min();
- }
- }
-
- Self &operator=(const Self &stat) {
- this->Stat1<VType, NumType>::operator=(stat);
- max_ = stat.max_;
- min_ = stat.min_;
- return (*this);
- }
- // Merge statistics from two sample sets.
- Self &operator+=(const Self &stat) {
- this->Stat1<VType, NumType>::operator+=(stat);
- if (stat.max_ > max_) max_ = stat.max_;
- if (stat.min_ < min_) min_ = stat.min_;
- return (*this);
- }
- // Multiply the weight of the set of samples by a factor k
- Self &operator*=(const VType &stat) {
- this->Stat1<VType, NumType>::operator*=(stat);
- return (*this);
- }
- // Merge statistics from two sample sets.
- Self operator+(const Self &stat) const { return Self(*this) += stat; }
- // Multiply the weight of the set of samples by a factor k
- Self operator*(const VType &k) const { return Self(*this) *= k; }
-
- // Return the maximal value in this sample set
- VType Max() const { return max_; }
- // Return the minimal value in this sample set
- VType Min() const { return min_; }
-
- private:
- // The - operation makes no sense with Min/Max
- // unless we keep the full list of values (but we don't)
- // make it private, and let it undefined so nobody can call it
- Self &operator-=(const Self &stat); // senseless. let it undefined.
-
- // The operation opposite to -
- Self operator-(const Self &stat) const; // senseless. let it undefined.
-
- // Let i be the index of the samples provided (using +=)
- // and weight[i],value[i] be the data of sample #i
- // then the variables have the following meaning:
- VType max_; // max of value[i]
- VType min_; // min of value[i]
-};
-
-// Useful printing function
-template <typename VType, typename NumType>
-std::ostream &operator<<(std::ostream &out,
- const Stat1MinMax<VType, NumType> &s) {
- out << "{ avg = " << s.Mean() << " std = " << s.StdDev()
- << " nsamples = " << s.NumSamples() << " min = " << s.Min()
- << " max = " << s.Max() << "}";
- return out;
-}
-} // end namespace benchmark
-
-#endif // BENCHMARK_STAT_H_
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/.clang-format b/MicroBenchmarks/libs/benchmark-1.3.0/.clang-format
index 4b3f13fa..4b3f13fa 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/.clang-format
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/.clang-format
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/.gitignore b/MicroBenchmarks/libs/benchmark-1.3.0/.gitignore
index 3c1b4f21..3c1b4f21 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/.gitignore
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/.gitignore
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/.travis-libcxx-setup.sh b/MicroBenchmarks/libs/benchmark-1.3.0/.travis-libcxx-setup.sh
index a591743c..a591743c 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/.travis-libcxx-setup.sh
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/.travis-libcxx-setup.sh
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/.travis.yml b/MicroBenchmarks/libs/benchmark-1.3.0/.travis.yml
index 36df0884..36df0884 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/.travis.yml
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/.travis.yml
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/.ycm_extra_conf.py b/MicroBenchmarks/libs/benchmark-1.3.0/.ycm_extra_conf.py
index 86194357..86194357 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/.ycm_extra_conf.py
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/.ycm_extra_conf.py
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/AUTHORS b/MicroBenchmarks/libs/benchmark-1.3.0/AUTHORS
index ae278df4..cb1080d9 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/AUTHORS
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/AUTHORS
@@ -10,9 +10,11 @@
Albert Pretorius <pretoalb@gmail.com>
Arne Beer <arne@twobeer.de>
+Carto
Christopher Seymour <chris.j.seymour@hotmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
-Dominic Hamon <dma@stripysock.com>
+Dirac Research
+Dominik Czarnota <dominik.b.czarnota@gmail.com>
Eric Fiselier <eric@efcs.ca>
Eugene Zhuk <eugene.zhuk@gmail.com>
Evgeny Safronov <division494@gmail.com>
@@ -21,8 +23,8 @@ Google Inc.
International Business Machines Corporation
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
Jern-Kuan Leong <jernkuan@gmail.com>
-Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
JianXiong Zhou <zhoujianxiong2@gmail.com>
+Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
Jussi Knuuttila <jussi.knuuttila@gmail.com>
Kaito Udagawa <umireon@gmail.com>
Lei Xu <eddyxu@gmail.com>
@@ -32,9 +34,8 @@ Nick Hutchinson <nshutchinson@gmail.com>
Oleksandr Sochka <sasha.sochka@gmail.com>
Paul Redmond <paul.redmond@gmail.com>
Radoslav Yovchev <radoslav.tm@gmail.com>
+Roman Lebedev <lebedev.ri@gmail.com>
Shuo Chen <chenshuo@chenshuo.com>
Yixuan Qiu <yixuanq@gmail.com>
Yusuke Suzuki <utatane.tea@gmail.com>
-Dirac Research
Zbigniew Skowron <zbychs@gmail.com>
-Dominik Czarnota <dominik.b.czarnota@gmail.com>
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/CMakeLists.txt b/MicroBenchmarks/libs/benchmark-1.3.0/CMakeLists.txt
index f7f1566f..fb8acb65 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/CMakeLists.txt
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/CMakeLists.txt
@@ -15,7 +15,8 @@ option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON)
option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON)
option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF)
option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF)
-option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library" OFF)
+option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF)
+option(BENCHMARK_ENABLE_INSTALL "Enable installation of benchmark. (Projects embedding benchmark may want to turn this OFF.)" ON)
# Make sure we can import out CMake functions
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
@@ -135,24 +136,21 @@ else()
endif()
# Coverage build type
- set(CMAKE_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG}" CACHE STRING
- "Flags used by the C++ compiler during coverage builds."
+ set(BENCHMARK_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG}"
+ CACHE STRING "Flags used by the C++ compiler during coverage builds."
FORCE)
- set(CMAKE_EXE_LINKER_FLAGS_COVERAGE
- "${CMAKE_EXE_LINKER_FLAGS_DEBUG}" CACHE STRING
- "Flags used for linking binaries during coverage builds."
+ set(BENCHMARK_EXE_LINKER_FLAGS_COVERAGE "${CMAKE_EXE_LINKER_FLAGS_DEBUG}"
+ CACHE STRING "Flags used for linking binaries during coverage builds."
FORCE)
- set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE
- "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}" CACHE STRING
- "Flags used by the shared libraries linker during coverage builds."
+ set(BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}"
+ CACHE STRING "Flags used by the shared libraries linker during coverage builds."
FORCE)
mark_as_advanced(
- CMAKE_CXX_FLAGS_COVERAGE
- CMAKE_EXE_LINKER_FLAGS_COVERAGE
- CMAKE_SHARED_LINKER_FLAGS_COVERAGE)
+ BENCHMARK_CXX_FLAGS_COVERAGE
+ BENCHMARK_EXE_LINKER_FLAGS_COVERAGE
+ BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE)
set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING
- "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage."
- FORCE)
+ "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage.")
add_cxx_compiler_flag(--coverage COVERAGE)
endif()
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/CONTRIBUTING.md b/MicroBenchmarks/libs/benchmark-1.3.0/CONTRIBUTING.md
index 43de4c9d..43de4c9d 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/CONTRIBUTING.md
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/CONTRIBUTING.md
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/CONTRIBUTORS b/MicroBenchmarks/libs/benchmark-1.3.0/CONTRIBUTORS
index 9abb6086..457e0ea4 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/CONTRIBUTORS
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/CONTRIBUTORS
@@ -28,18 +28,19 @@ Billy Robert O'Neal III <billy.oneal@gmail.com> <bion@microsoft.com>
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
Christopher Seymour <chris.j.seymour@hotmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
-Dominic Hamon <dma@stripysock.com>
+Dominic Hamon <dma@stripysock.com> <dominic@google.com>
+Dominik Czarnota <dominik.b.czarnota@gmail.com>
Eric Fiselier <eric@efcs.ca>
Eugene Zhuk <eugene.zhuk@gmail.com>
Evgeny Safronov <division494@gmail.com>
Felix Homann <linuxaudio@showlabor.de>
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
Jern-Kuan Leong <jernkuan@gmail.com>
-Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
JianXiong Zhou <zhoujianxiong2@gmail.com>
+Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
Jussi Knuuttila <jussi.knuuttila@gmail.com>
-Kaito Udagawa <umireon@gmail.com>
Kai Wolf <kai.wolf@gmail.com>
+Kaito Udagawa <umireon@gmail.com>
Lei Xu <eddyxu@gmail.com>
Matt Clarkson <mattyclarkson@gmail.com>
Maxim Vafin <maxvafin@gmail.com>
@@ -49,11 +50,12 @@ Pascal Leroy <phl@google.com>
Paul Redmond <paul.redmond@gmail.com>
Pierre Phaneuf <pphaneuf@google.com>
Radoslav Yovchev <radoslav.tm@gmail.com>
+Raul Marin <rmrodriguez@cartodb.com>
Ray Glover <ray.glover@uk.ibm.com>
+Roman Lebedev <lebedev.ri@gmail.com>
Shuo Chen <chenshuo@chenshuo.com>
+Tobias Ulvgård <tobias.ulvgard@dirac.se>
Tom Madams <tom.ej.madams@gmail.com> <tmadams@google.com>
Yixuan Qiu <yixuanq@gmail.com>
Yusuke Suzuki <utatane.tea@gmail.com>
-Tobias Ulvgård <tobias.ulvgard@dirac.se>
Zbigniew Skowron <zbychs@gmail.com>
-Dominik Czarnota <dominik.b.czarnota@gmail.com>
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/LICENSE b/MicroBenchmarks/libs/benchmark-1.3.0/LICENSE
index d6456956..d6456956 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/LICENSE
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/LICENSE
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/README.md b/MicroBenchmarks/libs/benchmark-1.3.0/README.md
index 2430d93b..67209090 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/README.md
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/README.md
@@ -18,8 +18,10 @@ IRC channel: https://freenode.net #googlebenchmark
Define a function that executes the code to be measured.
```c++
+#include <benchmark/benchmark.h>
+
static void BM_StringCreation(benchmark::State& state) {
- while (state.KeepRunning())
+ for (auto _ : state)
std::string empty_string;
}
// Register the function as a benchmark
@@ -28,7 +30,7 @@ BENCHMARK(BM_StringCreation);
// Define another benchmark
static void BM_StringCopy(benchmark::State& state) {
std::string x = "hello";
- while (state.KeepRunning())
+ for (auto _ : state)
std::string copy(x);
}
BENCHMARK(BM_StringCopy);
@@ -36,6 +38,8 @@ BENCHMARK(BM_StringCopy);
BENCHMARK_MAIN();
```
+Don't forget to inform your linker to add benchmark library e.g. through `-lbenchmark` compilation flag.
+
### Passing arguments
Sometimes a family of benchmarks can be implemented with just one routine that
takes an extra argument to specify which one of the family of benchmarks to
@@ -47,7 +51,7 @@ static void BM_memcpy(benchmark::State& state) {
char* src = new char[state.range(0)];
char* dst = new char[state.range(0)];
memset(src, 'x', state.range(0));
- while (state.KeepRunning())
+ for (auto _ : state)
memcpy(dst, src, state.range(0));
state.SetBytesProcessed(int64_t(state.iterations()) *
int64_t(state.range(0)));
@@ -80,22 +84,23 @@ insertion.
```c++
static void BM_SetInsert(benchmark::State& state) {
- while (state.KeepRunning()) {
+ std::set<int> data;
+ for (auto _ : state) {
state.PauseTiming();
- std::set<int> data = ConstructRandomSet(state.range(0));
+ data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
for (int j = 0; j < state.range(1); ++j)
data.insert(RandomNumber());
}
}
BENCHMARK(BM_SetInsert)
- ->Args({1<<10, 1})
- ->Args({1<<10, 8})
- ->Args({1<<10, 64})
+ ->Args({1<<10, 128})
+ ->Args({2<<10, 128})
+ ->Args({4<<10, 128})
+ ->Args({8<<10, 128})
->Args({1<<10, 512})
- ->Args({8<<10, 1})
- ->Args({8<<10, 8})
- ->Args({8<<10, 64})
+ ->Args({2<<10, 512})
+ ->Args({4<<10, 512})
->Args({8<<10, 512});
```
@@ -105,7 +110,7 @@ product of the two specified ranges and will generate a benchmark for each such
pair.
```c++
-BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {1, 512}});
+BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}});
```
For more complex patterns of inputs, passing a custom function to `Apply` allows
@@ -131,7 +136,7 @@ running time and the normalized root-mean square error of string comparison.
static void BM_StringCompare(benchmark::State& state) {
std::string s1(state.range(0), '-');
std::string s2(state.range(0), '-');
- while (state.KeepRunning()) {
+ for (auto _ : state) {
benchmark::DoNotOptimize(s1.compare(s2));
}
state.SetComplexityN(state.range(0));
@@ -165,7 +170,7 @@ absence of multiprogramming.
template <class Q> int BM_Sequential(benchmark::State& state) {
Q q;
typename Q::value_type v;
- while (state.KeepRunning()) {
+ for (auto _ : state) {
for (int i = state.range(0); i--; )
q.push(v);
for (int e = state.range(0); e--; )
@@ -181,7 +186,7 @@ BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue<int>)->Range(1<<0, 1<<10);
Three macros are provided for adding benchmark templates.
```c++
-#if __cplusplus >= 201103L // C++11 and greater.
+#ifdef BENCHMARK_HAS_CXX11
#define BENCHMARK_TEMPLATE(func, ...) // Takes any number of parameters.
#else // C++ < C++11
#define BENCHMARK_TEMPLATE(func, arg1)
@@ -190,6 +195,62 @@ Three macros are provided for adding benchmark templates.
#define BENCHMARK_TEMPLATE2(func, arg1, arg2)
```
+### A Faster KeepRunning loop
+
+In C++11 mode, a ranged-based for loop should be used in preference to
+the `KeepRunning` loop for running the benchmarks. For example:
+
+```c++
+static void BM_Fast(benchmark::State &state) {
+ for (auto _ : state) {
+ FastOperation();
+ }
+}
+BENCHMARK(BM_Fast);
+```
+
+The reason the ranged-for loop is faster than using `KeepRunning`, is
+because `KeepRunning` requires a memory load and store of the iteration count
+ever iteration, whereas the ranged-for variant is able to keep the iteration count
+in a register.
+
+For example, an empty inner loop of using the ranged-based for method looks like:
+
+```asm
+# Loop Init
+ mov rbx, qword ptr [r14 + 104]
+ call benchmark::State::StartKeepRunning()
+ test rbx, rbx
+ je .LoopEnd
+.LoopHeader: # =>This Inner Loop Header: Depth=1
+ add rbx, -1
+ jne .LoopHeader
+.LoopEnd:
+```
+
+Compared to an empty `KeepRunning` loop, which looks like:
+
+```asm
+.LoopHeader: # in Loop: Header=BB0_3 Depth=1
+ cmp byte ptr [rbx], 1
+ jne .LoopInit
+.LoopBody: # =>This Inner Loop Header: Depth=1
+ mov rax, qword ptr [rbx + 8]
+ lea rcx, [rax + 1]
+ mov qword ptr [rbx + 8], rcx
+ cmp rax, qword ptr [rbx + 104]
+ jb .LoopHeader
+ jmp .LoopEnd
+.LoopInit:
+ mov rdi, rbx
+ call benchmark::State::StartKeepRunning()
+ jmp .LoopBody
+.LoopEnd:
+```
+
+Unless C++03 compatibility is required, the ranged-for variant of writing
+the benchmark loop should be preferred.
+
## Passing arbitrary arguments to a benchmark
In C++11 it is possible to define a benchmark that takes an arbitrary number
of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)`
@@ -199,11 +260,11 @@ The `test_case_name` is appended to the name of the benchmark and
should describe the values passed.
```c++
-template <class ...ExtraArgs>`
+template <class ...ExtraArgs>
void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) {
[...]
}
-// Registers a benchmark named "BM_takes_args/int_string_test` that passes
+// Registers a benchmark named "BM_takes_args/int_string_test" that passes
// the specified values to `extra_args`.
BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc"));
```
@@ -223,8 +284,7 @@ scope, the `RegisterBenchmark` can be called anywhere. This allows for
benchmark tests to be registered programmatically.
Additionally `RegisterBenchmark` allows any callable object to be registered
-as a benchmark. Including capturing lambdas and function objects. This
-allows the creation
+as a benchmark. Including capturing lambdas and function objects.
For Example:
```c++
@@ -240,9 +300,10 @@ int main(int argc, char** argv) {
### Multithreaded benchmarks
In a multithreaded test (benchmark invoked by multiple threads simultaneously),
-it is guaranteed that none of the threads will start until all have called
-`KeepRunning`, and all will have finished before KeepRunning returns false. As
-such, any global setup or teardown can be wrapped in a check against the thread
+it is guaranteed that none of the threads will start until all have reached
+the start of the benchmark loop, and all will have finished before any thread
+exits the benchmark loop. (This behavior is also provided by the `KeepRunning()`
+API) As such, any global setup or teardown can be wrapped in a check against the thread
index:
```c++
@@ -250,7 +311,7 @@ static void BM_MultiThreaded(benchmark::State& state) {
if (state.thread_index == 0) {
// Setup code here.
}
- while (state.KeepRunning()) {
+ for (auto _ : state) {
// Run the test as normal.
}
if (state.thread_index == 0) {
@@ -274,10 +335,10 @@ Without `UseRealTime`, CPU time is used by default.
## Manual timing
For benchmarking something for which neither CPU time nor real-time are
correct or accurate enough, completely manual timing is supported using
-the `UseManualTime` function.
+the `UseManualTime` function.
When `UseManualTime` is used, the benchmarked code must call
-`SetIterationTime` once per iteration of the `KeepRunning` loop to
+`SetIterationTime` once per iteration of the benchmark loop to
report the manually measured time.
An example use case for this is benchmarking GPU execution (e.g. OpenCL
@@ -293,7 +354,7 @@ static void BM_ManualTiming(benchmark::State& state) {
static_cast<double>(microseconds)
};
- while (state.KeepRunning()) {
+ for (auto _ : state) {
auto start = std::chrono::high_resolution_clock::now();
// Simulate some useful workload with a sleep
std::this_thread::sleep_for(sleep_duration);
@@ -316,7 +377,7 @@ functions can be used.
```c++
static void BM_test(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
int x = 0;
for (int i=0; i < 64; ++i) {
benchmark::DoNotOptimize(x += i);
@@ -355,7 +416,7 @@ away.
```c++
static void BM_vector_push_back(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
std::vector<int> v;
v.reserve(1);
benchmark::DoNotOptimize(v.data()); // Allow v.data() to be clobbered.
@@ -384,7 +445,7 @@ the minimum time, or the wallclock time is 5x minimum time. The minimum time is
set as a flag `--benchmark_min_time` or per benchmark by calling `MinTime` on
the registered benchmark object.
-## Reporting the mean and standard devation by repeated benchmarks
+## Reporting the mean, median and standard deviation by repeated benchmarks
By default each benchmark is run once and that single result is reported.
However benchmarks are often noisy and a single result may not be representative
of the overall behavior. For this reason it's possible to repeatedly rerun the
@@ -392,19 +453,42 @@ benchmark.
The number of runs of each benchmark is specified globally by the
`--benchmark_repetitions` flag or on a per benchmark basis by calling
-`Repetitions` on the registered benchmark object. When a benchmark is run
-more than once the mean and standard deviation of the runs will be reported.
+`Repetitions` on the registered benchmark object. When a benchmark is run more
+than once the mean, median and standard deviation of the runs will be reported.
Additionally the `--benchmark_report_aggregates_only={true|false}` flag or
`ReportAggregatesOnly(bool)` function can be used to change how repeated tests
are reported. By default the result of each repeated run is reported. When this
-option is 'true' only the mean and standard deviation of the runs is reported.
+option is `true` only the mean, median and standard deviation of the runs is reported.
Calling `ReportAggregatesOnly(bool)` on a registered benchmark object overrides
the value of the flag for that benchmark.
+## User-defined statistics for repeated benchmarks
+While having mean, median and standard deviation is nice, this may not be
+enough for everyone. For example you may want to know what is the largest
+observation, e.g. because you have some real-time constraints. This is easy.
+The following code will specify a custom statistic to be calculated, defined
+by a lambda function.
+
+```c++
+void BM_spin_empty(benchmark::State& state) {
+ for (auto _ : state) {
+ for (int x = 0; x < state.range(0); ++x) {
+ benchmark::DoNotOptimize(x);
+ }
+ }
+}
+
+BENCHMARK(BM_spin_empty)
+ ->ComputeStatistics("max", [](const std::vector<double>& v) -> double {
+ return *(std::max_element(std::begin(v), std::end(v)));
+ })
+ ->Arg(512);
+```
+
## Fixtures
Fixture tests are created by
-first defining a type that derives from ::benchmark::Fixture and then
+first defining a type that derives from `::benchmark::Fixture` and then
creating/registering the tests using the following macros:
* `BENCHMARK_F(ClassName, Method)`
@@ -417,13 +501,13 @@ For Example:
class MyFixture : public benchmark::Fixture {};
BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) {
- while (st.KeepRunning()) {
+ for (auto _ : st) {
...
}
}
BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) {
- while (st.KeepRunning()) {
+ for (auto _ : st) {
...
}
}
@@ -432,6 +516,31 @@ BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2);
/* BarTest is now registered */
```
+### Templated fixtures
+Also you can create templated fixture by using the following macros:
+
+* `BENCHMARK_TEMPLATE_F(ClassName, Method, ...)`
+* `BENCHMARK_TEMPLATE_DEFINE_F(ClassName, Method, ...)`
+
+For example:
+```c++
+template<typename T>
+class MyFixture : public benchmark::Fixture {};
+
+BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) {
+ for (auto _ : st) {
+ ...
+ }
+}
+
+BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) {
+ for (auto _ : st) {
+ ...
+ }
+}
+
+BENCHMARK_REGISTER_F(MyFixture, DoubleTest)->Threads(2);
+```
## User-defined counters
@@ -441,7 +550,7 @@ will add columns "Foo", "Bar" and "Baz" in its output:
```c++
static void UserCountersExample1(benchmark::State& state) {
double numFoos = 0, numBars = 0, numBazs = 0;
- while (state.KeepRunning()) {
+ for (auto _ : state) {
// ... count Foo,Bar,Baz events
}
state.counters["Foo"] = numFoos;
@@ -564,11 +673,12 @@ When errors caused by external influences, such as file I/O and network
communication, occur within a benchmark the
`State::SkipWithError(const char* msg)` function can be used to skip that run
of benchmark and report the error. Note that only future iterations of the
-`KeepRunning()` are skipped. Users may explicitly return to exit the
-benchmark immediately.
+`KeepRunning()` are skipped. For the ranged-for version of the benchmark loop
+Users must explicitly exit the loop, otherwise all iterations will be performed.
+Users may explicitly return to exit the benchmark immediately.
The `SkipWithError(...)` function may be used at any point within the benchmark,
-including before and after the `KeepRunning()` loop.
+including before and after the benchmark loop.
For example:
@@ -579,7 +689,7 @@ static void BM_test(benchmark::State& state) {
state.SkipWithError("Resource is not good!");
// KeepRunning() loop will not be entered.
}
- while (state.KeepRunning()) {
+ for (state.KeepRunning()) {
auto data = resource.read_data();
if (!resource.good()) {
state.SkipWithError("Failed to read data!");
@@ -588,6 +698,14 @@ static void BM_test(benchmark::State& state) {
do_stuff(data);
}
}
+
+static void BM_test_ranged_fo(benchmark::State & state) {
+ state.SkipWithError("test will not be entered");
+ for (auto _ : state) {
+ state.SkipWithError("Failed!");
+ break; // REQUIRED to prevent all further iterations.
+ }
+}
```
## Running a subset of the benchmarks
@@ -614,7 +732,7 @@ The library supports multiple output formats. Use the
is the default format.
The Console format is intended to be a human readable format. By default
-the format generates color output. Context is output on stderr and the
+the format generates color output. Context is output on stderr and the
tabular data on stdout. Example tabular output looks like:
```
Benchmark Time(ns) CPU(ns) Iterations
@@ -717,6 +835,18 @@ Anything older *may* work.
Note: Using the library and its headers in C++03 is supported. C++11 is only
required to build the library.
+## Disable CPU frequency scaling
+If you see this error:
+```
+***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead.
+```
+you might want to disable the CPU frequency scaling while running the benchmark:
+```bash
+sudo cpupower frequency-set --governor performance
+./mybench
+sudo cpupower frequency-set --governor powersave
+```
+
# Known Issues
### Windows
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/appveyor.yml b/MicroBenchmarks/libs/benchmark-1.3.0/appveyor.yml
index e084f386..e084f386 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/appveyor.yml
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/appveyor.yml
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/AddCXXCompilerFlag.cmake b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/AddCXXCompilerFlag.cmake
index 0b176ba2..17d5f3dc 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/AddCXXCompilerFlag.cmake
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/AddCXXCompilerFlag.cmake
@@ -38,7 +38,7 @@ function(add_cxx_compiler_flag FLAG)
if(ARGV1)
string(TOUPPER "_${VARIANT}" VARIANT)
endif()
- set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
+ set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${BENCHMARK_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
endif()
endfunction()
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/CXXFeatureCheck.cmake b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/CXXFeatureCheck.cmake
index 2c4460f0..b2a82171 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/CXXFeatureCheck.cmake
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/CXXFeatureCheck.cmake
@@ -22,18 +22,35 @@ function(cxx_feature_check FILE)
string(TOUPPER ${FILE} VAR)
string(TOUPPER "HAVE_${VAR}" FEATURE)
if (DEFINED HAVE_${VAR})
- set(HAVE_${VAR} 1 CACHE INTERNAL "Feature test for ${FILE}" PARENT_SCOPE)
+ set(HAVE_${VAR} 1 PARENT_SCOPE)
add_definitions(-DHAVE_${VAR})
return()
endif()
+
message("-- Performing Test ${FEATURE}")
- try_run(RUN_${FEATURE} COMPILE_${FEATURE}
- ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
- CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
- LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
+ if(CMAKE_CROSSCOMPILING)
+ try_compile(COMPILE_${FEATURE}
+ ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
+ CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
+ LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
+ if(COMPILE_${FEATURE})
+ message(WARNING
+ "If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0")
+ set(RUN_${FEATURE} 0)
+ else()
+ set(RUN_${FEATURE} 1)
+ endif()
+ else()
+ message("-- Performing Test ${FEATURE}")
+ try_run(RUN_${FEATURE} COMPILE_${FEATURE}
+ ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
+ CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
+ LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
+ endif()
+
if(RUN_${FEATURE} EQUAL 0)
message("-- Performing Test ${FEATURE} -- success")
- set(HAVE_${VAR} 1 CACHE INTERNAL "Feature test for ${FILE}" PARENT_SCOPE)
+ set(HAVE_${VAR} 1 PARENT_SCOPE)
add_definitions(-DHAVE_${VAR})
else()
if(NOT COMPILE_${FEATURE})
@@ -43,4 +60,3 @@ function(cxx_feature_check FILE)
endif()
endif()
endfunction()
-
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/Config.cmake.in b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/Config.cmake.in
index 6e9256ee..6e9256ee 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/Config.cmake.in
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/Config.cmake.in
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/GetGitVersion.cmake b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/GetGitVersion.cmake
index 8dd94800..8dd94800 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/GetGitVersion.cmake
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/GetGitVersion.cmake
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/gnu_posix_regex.cpp b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/gnu_posix_regex.cpp
index b5b91cda..b5b91cda 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/gnu_posix_regex.cpp
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/gnu_posix_regex.cpp
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/posix_regex.cpp b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/posix_regex.cpp
index 466dc625..466dc625 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/posix_regex.cpp
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/posix_regex.cpp
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/std_regex.cpp b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/std_regex.cpp
index 696f2a26..696f2a26 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/std_regex.cpp
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/std_regex.cpp
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/steady_clock.cpp b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/steady_clock.cpp
index 66d50d17..66d50d17 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/steady_clock.cpp
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/steady_clock.cpp
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/thread_safety_attributes.cpp b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/thread_safety_attributes.cpp
index 46161bab..46161bab 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/cmake/thread_safety_attributes.cpp
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/cmake/thread_safety_attributes.cpp
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/docs/tools.md b/MicroBenchmarks/libs/benchmark-1.3.0/docs/tools.md
index f176f74a..f176f74a 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/docs/tools.md
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/docs/tools.md
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/benchmark.h b/MicroBenchmarks/libs/benchmark-1.3.0/include/benchmark/benchmark.h
index bd3b0ffb..d529e4bf 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/benchmark.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/include/benchmark/benchmark.h
@@ -18,7 +18,7 @@
// Define a function that executes the code to be measured a
// specified number of times:
static void BM_StringCreation(benchmark::State& state) {
- while (state.KeepRunning())
+ for (auto _ : state)
std::string empty_string;
}
@@ -28,7 +28,7 @@ BENCHMARK(BM_StringCreation);
// Define another benchmark
static void BM_StringCopy(benchmark::State& state) {
std::string x = "hello";
- while (state.KeepRunning())
+ for (auto _ : state)
std::string copy(x);
}
BENCHMARK(BM_StringCopy);
@@ -54,7 +54,7 @@ int main(int argc, char** argv) {
static void BM_memcpy(benchmark::State& state) {
char* src = new char[state.range(0)]; char* dst = new char[state.range(0)];
memset(src, 'x', state.range(0));
- while (state.KeepRunning())
+ for (auto _ : state)
memcpy(dst, src, state.range(0));
state.SetBytesProcessed(int64_t(state.iterations()) *
int64_t(state.range(0)));
@@ -72,29 +72,30 @@ BENCHMARK(BM_memcpy)->Range(8, 8<<10);
// example, the following code defines a family of microbenchmarks for
// measuring the speed of set insertion.
static void BM_SetInsert(benchmark::State& state) {
- while (state.KeepRunning()) {
+ set<int> data;
+ for (auto _ : state) {
state.PauseTiming();
- set<int> data = ConstructRandomSet(state.range(0));
+ data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
for (int j = 0; j < state.range(1); ++j)
data.insert(RandomNumber());
}
}
BENCHMARK(BM_SetInsert)
- ->Args({1<<10, 1})
- ->Args({1<<10, 8})
- ->Args({1<<10, 64})
+ ->Args({1<<10, 128})
+ ->Args({2<<10, 128})
+ ->Args({4<<10, 128})
+ ->Args({8<<10, 128})
->Args({1<<10, 512})
- ->Args({8<<10, 1})
- ->Args({8<<10, 8})
- ->Args({8<<10, 64})
+ ->Args({2<<10, 512})
+ ->Args({4<<10, 512})
->Args({8<<10, 512});
// The preceding code is quite repetitive, and can be replaced with
// the following short-hand. The following macro will pick a few
// appropriate arguments in the product of the two specified ranges
// and will generate a microbenchmark for each such pair.
-BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {1, 512}});
+BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}});
// For more complex patterns of inputs, passing a custom function
// to Apply allows programmatic specification of an
@@ -114,7 +115,7 @@ BENCHMARK(BM_SetInsert)->Apply(CustomArguments);
template <class Q> int BM_Sequential(benchmark::State& state) {
Q q;
typename Q::value_type v;
- while (state.KeepRunning()) {
+ for (auto _ : state) {
for (int i = state.range(0); i--; )
q.push(v);
for (int e = state.range(0); e--; )
@@ -135,15 +136,15 @@ void BM_test(benchmark::State& state) {
BENCHMARK(BM_test)->MinTime(2.0); // Run for at least 2 seconds.
In a multithreaded test, it is guaranteed that none of the threads will start
-until all have called KeepRunning, and all will have finished before KeepRunning
-returns false. As such, any global setup or teardown you want to do can be
-wrapped in a check against the thread index:
+until all have reached the loop start, and all will have finished before any
+thread exits the loop body. As such, any global setup or teardown you want to
+do can be wrapped in a check against the thread index:
static void BM_MultiThreaded(benchmark::State& state) {
if (state.thread_index == 0) {
// Setup code here.
}
- while (state.KeepRunning()) {
+ for (auto _ : state) {
// Run the test as normal.
}
if (state.thread_index == 0) {
@@ -164,7 +165,8 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
#define BENCHMARK_BENCHMARK_H_
-#if __cplusplus >= 201103L
+// The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer.
+#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L)
#define BENCHMARK_HAS_CXX11
#endif
@@ -237,7 +239,6 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
#define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
#endif
-
namespace benchmark {
class BenchmarkReporter;
@@ -289,10 +290,12 @@ BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams();
} // namespace internal
-#if !defined(__GNUC__) || defined(__pnacl__) || defined(EMSCRIPTN)
+#if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \
+ defined(EMSCRIPTN)
# define BENCHMARK_HAS_NO_INLINE_ASSEMBLY
#endif
+
// The DoNotOptimize(...) function can be used to prevent a value or
// expression from being optimized away by the compiler. This function is
// intended to add little to no overhead.
@@ -378,6 +381,18 @@ enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda };
// computational complexity for the benchmark.
typedef double(BigOFunc)(int);
+// StatisticsFunc is passed to a benchmark in order to compute some descriptive
+// statistics over all the measurements of some type
+typedef double(StatisticsFunc)(const std::vector<double>&);
+
+struct Statistics {
+ std::string name_;
+ StatisticsFunc* compute_;
+
+ Statistics(std::string name, StatisticsFunc* compute)
+ : name_(name), compute_(compute) {}
+};
+
namespace internal {
class ThreadTimer;
class ThreadManager;
@@ -398,6 +413,19 @@ enum ReportMode
// benchmark to use.
class State {
public:
+ struct StateIterator;
+ friend struct StateIterator;
+
+ // Returns iterators used to run each iteration of a benchmark using a
+ // C++11 ranged-based for loop. These functions should not be called directly.
+ //
+ // REQUIRES: The benchmark has not started running yet. Neither begin nor end
+ // have been called previously.
+ //
+ // NOTE: KeepRunning may not be used after calling either of these functions.
+ BENCHMARK_ALWAYS_INLINE StateIterator begin();
+ BENCHMARK_ALWAYS_INLINE StateIterator end();
+
// Returns true if the benchmark should continue through another iteration.
// NOTE: A benchmark may not return from the test until KeepRunning() has
// returned false.
@@ -405,7 +433,7 @@ class State {
if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
StartKeepRunning();
}
- bool const res = total_iterations_++ < max_iterations;
+ bool const res = --total_iterations_;
if (BENCHMARK_BUILTIN_EXPECT(!res, false)) {
FinishKeepRunning();
}
@@ -415,7 +443,7 @@ class State {
// REQUIRES: timer is running and 'SkipWithError(...)' has not been called
// by the current thread.
// Stop the benchmark timer. If not called, the timer will be
- // automatically stopped after KeepRunning() returns false for the first time.
+ // automatically stopped after the last iteration of the benchmark loop.
//
// For threaded benchmarks the PauseTiming() function only pauses the timing
// for the current thread.
@@ -431,7 +459,8 @@ class State {
// REQUIRES: timer is not running and 'SkipWithError(...)' has not been called
// by the current thread.
// Start the benchmark timer. The timer is NOT running on entrance to the
- // benchmark function. It begins running after the first call to KeepRunning()
+ // benchmark function. It begins running after control flow enters the
+ // benchmark loop.
//
// NOTE: PauseTiming()/ResumeTiming() are relatively
// heavyweight, and so their use should generally be avoided
@@ -440,9 +469,13 @@ class State {
// REQUIRES: 'SkipWithError(...)' has not been called previously by the
// current thread.
- // Skip any future iterations of the 'KeepRunning()' loop in the current
- // thread and report an error with the specified 'msg'. After this call
- // the user may explicitly 'return' from the benchmark.
+ // Report the benchmark as resulting in an error with the specified 'msg'.
+ // After this call the user may explicitly 'return' from the benchmark.
+ //
+ // If the ranged-for style of benchmark loop is used, the user must explicitly
+ // break from the loop, otherwise all future iterations will be run.
+ // If the 'KeepRunning()' loop is used the current thread will automatically
+ // exit the loop at the end of the current iteration.
//
// For threaded benchmarks only the current thread stops executing and future
// calls to `KeepRunning()` will block until all threads have completed
@@ -455,7 +488,7 @@ class State {
// responsibility to exit the scope as needed.
void SkipWithError(const char* msg);
- // REQUIRES: called exactly once per iteration of the KeepRunning loop.
+ // REQUIRES: called exactly once per iteration of the benchmarking loop.
// Set the manually measured time for this benchmark iteration, which
// is used instead of automatically measured time if UseManualTime() was
// specified.
@@ -470,7 +503,7 @@ class State {
// value > 0, the report is printed in MB/sec instead of nanoseconds
// per iteration.
//
- // REQUIRES: a benchmark has exited its KeepRunning loop.
+ // REQUIRES: a benchmark has exited its benchmarking loop.
BENCHMARK_ALWAYS_INLINE
void SetBytesProcessed(size_t bytes) { bytes_processed_ = bytes; }
@@ -493,7 +526,7 @@ class State {
// executing benchmark. It is typically called at the end of a processing
// benchmark where a processing items/second output is desired.
//
- // REQUIRES: a benchmark has exited its KeepRunning loop.
+ // REQUIRES: a benchmark has exited its benchmarking loop.
BENCHMARK_ALWAYS_INLINE
void SetItemsProcessed(size_t items) { items_processed_ = items; }
@@ -511,7 +544,7 @@ class State {
// Produces output that looks like:
// BM_Compress 50 50 14115038 compress:27.3%
//
- // REQUIRES: a benchmark has exited its KeepRunning loop.
+ // REQUIRES: a benchmark has exited its benchmarking loop.
void SetLabel(const char* label);
void BENCHMARK_ALWAYS_INLINE SetLabel(const std::string& str) {
@@ -532,7 +565,7 @@ class State {
int range_y() const { return range(1); }
BENCHMARK_ALWAYS_INLINE
- size_t iterations() const { return total_iterations_; }
+ size_t iterations() const { return (max_iterations - total_iterations_) + 1; }
private:
bool started_;
@@ -570,6 +603,53 @@ class State {
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(State);
};
+struct State::StateIterator {
+ struct BENCHMARK_UNUSED Value {};
+ typedef std::forward_iterator_tag iterator_category;
+ typedef Value value_type;
+ typedef Value reference;
+ typedef Value pointer;
+
+ private:
+ friend class State;
+ BENCHMARK_ALWAYS_INLINE
+ StateIterator() : cached_(0), parent_() {}
+
+ BENCHMARK_ALWAYS_INLINE
+ explicit StateIterator(State* st)
+ : cached_(st->error_occurred_ ? 0 : st->max_iterations), parent_(st) {}
+
+ public:
+ BENCHMARK_ALWAYS_INLINE
+ Value operator*() const { return Value(); }
+
+ BENCHMARK_ALWAYS_INLINE
+ StateIterator& operator++() {
+ assert(cached_ > 0);
+ --cached_;
+ return *this;
+ }
+
+ BENCHMARK_ALWAYS_INLINE
+ bool operator!=(StateIterator const&) const {
+ if (BENCHMARK_BUILTIN_EXPECT(cached_ != 0, true)) return true;
+ parent_->FinishKeepRunning();
+ return false;
+ }
+
+ private:
+ size_t cached_;
+ State* const parent_;
+};
+
+BENCHMARK_ALWAYS_INLINE inline State::StateIterator State::begin() {
+ return StateIterator(this);
+}
+BENCHMARK_ALWAYS_INLINE inline State::StateIterator State::end() {
+ StartKeepRunning();
+ return StateIterator();
+}
+
namespace internal {
typedef void(Function)(State&);
@@ -698,6 +778,9 @@ class Benchmark {
// the asymptotic computational complexity will be shown on the output.
Benchmark* Complexity(BigOFunc* complexity);
+ // Add this statistics to be computed over all the values of benchmark run
+ Benchmark* ComputeStatistics(std::string name, StatisticsFunc* statistics);
+
// Support for running multiple copies of the same benchmark concurrently
// in multiple threads. This may be useful when measuring the scaling
// of some piece of code.
@@ -758,6 +841,7 @@ class Benchmark {
bool use_manual_time_;
BigO complexity_;
BigOFunc* complexity_lambda_;
+ std::vector<Statistics> statistics_;
std::vector<int> thread_counts_;
Benchmark& operator=(Benchmark const&);
@@ -905,7 +989,7 @@ class Fixture : public internal::Benchmark {
#define BENCHMARK_RANGE2(n, l1, h1, l2, h2) \
BENCHMARK(n)->RangePair({{(l1), (h1)}, {(l2), (h2)}})
-#if __cplusplus >= 201103L
+#ifdef BENCHMARK_HAS_CXX11
// Register a benchmark which invokes the function specified by `func`
// with the additional arguments specified by `...`.
@@ -925,7 +1009,7 @@ class Fixture : public internal::Benchmark {
#func "/" #test_case_name, \
[](::benchmark::State& st) { func(st, __VA_ARGS__); })))
-#endif // __cplusplus >= 11
+#endif // BENCHMARK_HAS_CXX11
// This will register a benchmark for a templatized function. For example:
//
@@ -946,7 +1030,7 @@ class Fixture : public internal::Benchmark {
new ::benchmark::internal::FunctionBenchmark(#n "<" #a "," #b ">", \
n<a, b>)))
-#if __cplusplus >= 201103L
+#ifdef BENCHMARK_HAS_CXX11
#define BENCHMARK_TEMPLATE(n, ...) \
BENCHMARK_PRIVATE_DECLARE(n) = \
(::benchmark::internal::RegisterBenchmarkInternal( \
@@ -967,10 +1051,63 @@ class Fixture : public internal::Benchmark {
virtual void BenchmarkCase(::benchmark::State&); \
};
+#define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
+ class BaseClass##_##Method##_Benchmark : public BaseClass<a> { \
+ public: \
+ BaseClass##_##Method##_Benchmark() : BaseClass<a>() { \
+ this->SetName(#BaseClass"<" #a ">/" #Method); \
+ } \
+ \
+ protected: \
+ virtual void BenchmarkCase(::benchmark::State&); \
+ };
+
+#define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
+ class BaseClass##_##Method##_Benchmark : public BaseClass<a, b> { \
+ public: \
+ BaseClass##_##Method##_Benchmark() : BaseClass<a, b>() { \
+ this->SetName(#BaseClass"<" #a "," #b ">/" #Method); \
+ } \
+ \
+ protected: \
+ virtual void BenchmarkCase(::benchmark::State&); \
+ };
+
+#ifdef BENCHMARK_HAS_CXX11
+#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, ...) \
+ class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \
+ public: \
+ BaseClass##_##Method##_Benchmark() : BaseClass<__VA_ARGS__>() { \
+ this->SetName(#BaseClass"<" #__VA_ARGS__ ">/" #Method); \
+ } \
+ \
+ protected: \
+ virtual void BenchmarkCase(::benchmark::State&); \
+ };
+#else
+#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a)
+#endif
+
#define BENCHMARK_DEFINE_F(BaseClass, Method) \
BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
void BaseClass##_##Method##_Benchmark::BenchmarkCase
+#define BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) \
+ BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
+
+#define BENCHMARK_TEMPLATE2_DEFINE_F(BaseClass, Method, a, b) \
+ BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
+
+#ifdef BENCHMARK_HAS_CXX11
+#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, ...) \
+ BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
+#else
+#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a)
+#endif
+
#define BENCHMARK_REGISTER_F(BaseClass, Method) \
BENCHMARK_PRIVATE_REGISTER_F(BaseClass##_##Method##_Benchmark)
@@ -984,6 +1121,25 @@ class Fixture : public internal::Benchmark {
BENCHMARK_REGISTER_F(BaseClass, Method); \
void BaseClass##_##Method##_Benchmark::BenchmarkCase
+#define BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) \
+ BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
+ BENCHMARK_REGISTER_F(BaseClass, Method); \
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
+
+#define BENCHMARK_TEMPLATE2_F(BaseClass, Method, a, b) \
+ BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
+ BENCHMARK_REGISTER_F(BaseClass, Method); \
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
+
+#ifdef BENCHMARK_HAS_CXX11
+#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \
+ BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
+ BENCHMARK_REGISTER_F(BaseClass, Method); \
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
+#else
+#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_F(BaseClass, Method, a)
+#endif
+
// Helper macro to create a main routine in a test that runs the benchmarks
#define BENCHMARK_MAIN() \
int main(int argc, char** argv) { \
@@ -1065,6 +1221,9 @@ class BenchmarkReporter {
BigOFunc* complexity_lambda;
int complexity_n;
+ // what statistics to compute from the measurements
+ const std::vector<Statistics>* statistics;
+
// Inform print function whether the current run is a complexity report
bool report_big_o;
bool report_rms;
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/benchmark_api.h b/MicroBenchmarks/libs/benchmark-1.3.0/include/benchmark/benchmark_api.h
index a9ae6714..a9ae6714 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/benchmark_api.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/include/benchmark/benchmark_api.h
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/reporter.h b/MicroBenchmarks/libs/benchmark-1.3.0/include/benchmark/reporter.h
index 5baca1a7..5baca1a7 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/reporter.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/include/benchmark/reporter.h
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/mingw.py b/MicroBenchmarks/libs/benchmark-1.3.0/mingw.py
index 706ad559..706ad559 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/mingw.py
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/mingw.py
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/CMakeLists.txt b/MicroBenchmarks/libs/benchmark-1.3.0/src/CMakeLists.txt
index 244484b8..e419389c 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/CMakeLists.txt
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/CMakeLists.txt
@@ -54,25 +54,27 @@ write_basic_package_version_file(
configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY)
-# Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable)
-install(
- TARGETS benchmark
- EXPORT ${targets_export_name}
- ARCHIVE DESTINATION ${lib_install_dir}
- LIBRARY DESTINATION ${lib_install_dir}
- RUNTIME DESTINATION ${bin_install_dir}
- INCLUDES DESTINATION ${include_install_dir})
-
-install(
- DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark"
- DESTINATION ${include_install_dir}
- FILES_MATCHING PATTERN "*.*h")
-
-install(
- FILES "${project_config}" "${version_config}"
- DESTINATION "${config_install_dir}")
-
-install(
- EXPORT "${targets_export_name}"
- NAMESPACE "${namespace}"
- DESTINATION "${config_install_dir}")
+if (BENCHMARK_ENABLE_INSTALL)
+ # Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable)
+ install(
+ TARGETS benchmark
+ EXPORT ${targets_export_name}
+ ARCHIVE DESTINATION ${lib_install_dir}
+ LIBRARY DESTINATION ${lib_install_dir}
+ RUNTIME DESTINATION ${bin_install_dir}
+ INCLUDES DESTINATION ${include_install_dir})
+
+ install(
+ DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark"
+ DESTINATION ${include_install_dir}
+ FILES_MATCHING PATTERN "*.*h")
+
+ install(
+ FILES "${project_config}" "${version_config}"
+ DESTINATION "${config_install_dir}")
+
+ install(
+ EXPORT "${targets_export_name}"
+ NAMESPACE "${namespace}"
+ DESTINATION "${config_install_dir}")
+endif()
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/arraysize.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/arraysize.h
index 51a50f2d..51a50f2d 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/arraysize.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/arraysize.h
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark.cc b/MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark.cc
index 1ba0a50a..a72ac470 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark.cc
@@ -37,11 +37,11 @@
#include "colorprint.h"
#include "commandlineflags.h"
#include "complexity.h"
+#include "statistics.h"
#include "counter.h"
#include "log.h"
#include "mutex.h"
#include "re.h"
-#include "stat.h"
#include "string_util.h"
#include "sysinfo.h"
#include "timers.h"
@@ -99,20 +99,15 @@ DEFINE_bool(benchmark_counters_tabular, false,
DEFINE_int32(v, 0, "The level of verbose logging to output");
namespace benchmark {
-namespace internal {
-
-void UseCharPointer(char const volatile*) {}
-
-} // end namespace internal
namespace {
-
static const size_t kMaxIterations = 1000000000;
-
} // end namespace
namespace internal {
+void UseCharPointer(char const volatile*) {}
+
class ThreadManager {
public:
ThreadManager(int num_threads)
@@ -256,6 +251,7 @@ BenchmarkReporter::Run CreateRunReport(
report.complexity_n = results.complexity_n;
report.complexity = b.complexity;
report.complexity_lambda = b.complexity_lambda;
+ report.statistics = b.statistics;
report.counters = results.counters;
internal::Finish(&report.counters, seconds, b.threads);
}
@@ -401,7 +397,7 @@ State::State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
internal::ThreadManager* manager)
: started_(false),
finished_(false),
- total_iterations_(0),
+ total_iterations_(max_iters + 1),
range_(ranges),
bytes_processed_(0),
items_processed_(0),
@@ -414,6 +410,7 @@ State::State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
timer_(timer),
manager_(manager) {
CHECK(max_iterations != 0) << "At least one iteration must be run";
+ CHECK(total_iterations_ != 0) << "max iterations wrapped around";
CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
}
@@ -438,7 +435,7 @@ void State::SkipWithError(const char* msg) {
manager_->results.has_error_ = true;
}
}
- total_iterations_ = max_iterations;
+ total_iterations_ = 1;
if (timer_->running()) timer_->StopTimer();
}
@@ -463,8 +460,8 @@ void State::FinishKeepRunning() {
if (!error_occurred_) {
PauseTiming();
}
- // Total iterations now is one greater than max iterations. Fix this.
- total_iterations_ = max_iterations;
+ // Total iterations has now wrapped around zero. Fix this.
+ total_iterations_ = 1;
finished_ = true;
manager_->StartStopBarrier();
}
@@ -481,17 +478,21 @@ void RunBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
// Determine the width of the name field using a minimum width of 10.
bool has_repetitions = FLAGS_benchmark_repetitions > 1;
size_t name_field_width = 10;
+ size_t stat_field_width = 0;
for (const Benchmark::Instance& benchmark : benchmarks) {
name_field_width =
std::max<size_t>(name_field_width, benchmark.name.size());
has_repetitions |= benchmark.repetitions > 1;
+
+ for(const auto& Stat : *benchmark.statistics)
+ stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
}
- if (has_repetitions) name_field_width += std::strlen("_stddev");
+ if (has_repetitions) name_field_width += 1 + stat_field_width;
// Print header here
BenchmarkReporter::Context context;
context.num_cpus = NumCPUs();
- context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f;
+ context.mhz_per_cpu = CyclesPerSecond() / 1000000.0;
context.cpu_scaling_enabled = CpuScalingEnabled();
context.name_field_width = name_field_width;
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_api_internal.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark_api_internal.h
index 36d23404..d481dc52 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_api_internal.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark_api_internal.h
@@ -25,6 +25,7 @@ struct Benchmark::Instance {
BigO complexity;
BigOFunc* complexity_lambda;
UserCounters counters;
+ const std::vector<Statistics>* statistics;
bool last_benchmark_instance;
int repetitions;
double min_time;
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_register.cc b/MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark_register.cc
index ed70d820..c1b80674 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_register.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark_register.cc
@@ -37,10 +37,10 @@
#include "check.h"
#include "commandlineflags.h"
#include "complexity.h"
+#include "statistics.h"
#include "log.h"
#include "mutex.h"
#include "re.h"
-#include "stat.h"
#include "string_util.h"
#include "sysinfo.h"
#include "timers.h"
@@ -159,6 +159,7 @@ bool BenchmarkFamilies::FindBenchmarks(
instance.use_manual_time = family->use_manual_time_;
instance.complexity = family->complexity_;
instance.complexity_lambda = family->complexity_lambda_;
+ instance.statistics = &family->statistics_;
instance.threads = num_threads;
// Add arguments to instance name
@@ -236,7 +237,11 @@ Benchmark::Benchmark(const char* name)
use_real_time_(false),
use_manual_time_(false),
complexity_(oNone),
- complexity_lambda_(nullptr) {}
+ complexity_lambda_(nullptr) {
+ ComputeStatistics("mean", StatisticsMean);
+ ComputeStatistics("median", StatisticsMedian);
+ ComputeStatistics("stddev", StatisticsStdDev);
+}
Benchmark::~Benchmark() {}
@@ -409,6 +414,12 @@ Benchmark* Benchmark::Complexity(BigOFunc* complexity) {
return this;
}
+Benchmark* Benchmark::ComputeStatistics(std::string name,
+ StatisticsFunc* statistics) {
+ statistics_.emplace_back(name, statistics);
+ return this;
+}
+
Benchmark* Benchmark::Threads(int t) {
CHECK_GT(t, 0);
thread_counts_.push_back(t);
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/check.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/check.h
index 73bead2f..73bead2f 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/check.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/check.h
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.cc b/MicroBenchmarks/libs/benchmark-1.3.0/src/colorprint.cc
index 2dec4a8b..2dec4a8b 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/colorprint.cc
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/colorprint.h
index 9f6fab9b..9f6fab9b 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/colorprint.h
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.cc b/MicroBenchmarks/libs/benchmark-1.3.0/src/commandlineflags.cc
index 2fc92517..2fc92517 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/commandlineflags.cc
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/commandlineflags.h
index 945c9a9f..945c9a9f 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/commandlineflags.h
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.cc b/MicroBenchmarks/libs/benchmark-1.3.0/src/complexity.cc
index 33975be5..88832698 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/complexity.cc
@@ -21,7 +21,6 @@
#include <cmath>
#include "check.h"
#include "complexity.h"
-#include "stat.h"
namespace benchmark {
@@ -150,109 +149,6 @@ LeastSq MinimalLeastSq(const std::vector<int>& n,
return best_fit;
}
-std::vector<BenchmarkReporter::Run> ComputeStats(
- const std::vector<BenchmarkReporter::Run>& reports) {
- typedef BenchmarkReporter::Run Run;
- std::vector<Run> results;
-
- auto error_count =
- std::count_if(reports.begin(), reports.end(),
- [](Run const& run) { return run.error_occurred; });
-
- if (reports.size() - error_count < 2) {
- // We don't report aggregated data if there was a single run.
- return results;
- }
- // Accumulators.
- Stat1_d real_accumulated_time_stat;
- Stat1_d cpu_accumulated_time_stat;
- Stat1_d bytes_per_second_stat;
- Stat1_d items_per_second_stat;
- // All repetitions should be run with the same number of iterations so we
- // can take this information from the first benchmark.
- int64_t const run_iterations = reports.front().iterations;
- // create stats for user counters
- struct CounterStat {
- Counter c;
- Stat1_d s;
- };
- std::map< std::string, CounterStat > counter_stats;
- for(Run const& r : reports) {
- for(auto const& cnt : r.counters) {
- auto it = counter_stats.find(cnt.first);
- if(it == counter_stats.end()) {
- counter_stats.insert({cnt.first, {cnt.second, Stat1_d{}}});
- } else {
- CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
- }
- }
- }
-
- // Populate the accumulators.
- for (Run const& run : reports) {
- CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
- CHECK_EQ(run_iterations, run.iterations);
- if (run.error_occurred) continue;
- real_accumulated_time_stat +=
- Stat1_d(run.real_accumulated_time / run.iterations);
- cpu_accumulated_time_stat +=
- Stat1_d(run.cpu_accumulated_time / run.iterations);
- items_per_second_stat += Stat1_d(run.items_per_second);
- bytes_per_second_stat += Stat1_d(run.bytes_per_second);
- // user counters
- for(auto const& cnt : run.counters) {
- auto it = counter_stats.find(cnt.first);
- CHECK_NE(it, counter_stats.end());
- it->second.s += Stat1_d(cnt.second);
- }
- }
-
- // Get the data from the accumulator to BenchmarkReporter::Run's.
- Run mean_data;
- mean_data.benchmark_name = reports[0].benchmark_name + "_mean";
- mean_data.iterations = run_iterations;
- mean_data.real_accumulated_time =
- real_accumulated_time_stat.Mean() * run_iterations;
- mean_data.cpu_accumulated_time =
- cpu_accumulated_time_stat.Mean() * run_iterations;
- mean_data.bytes_per_second = bytes_per_second_stat.Mean();
- mean_data.items_per_second = items_per_second_stat.Mean();
- mean_data.time_unit = reports[0].time_unit;
- // user counters
- for(auto const& kv : counter_stats) {
- auto c = Counter(kv.second.s.Mean(), counter_stats[kv.first].c.flags);
- mean_data.counters[kv.first] = c;
- }
-
- // Only add label to mean/stddev if it is same for all runs
- mean_data.report_label = reports[0].report_label;
- for (std::size_t i = 1; i < reports.size(); i++) {
- if (reports[i].report_label != reports[0].report_label) {
- mean_data.report_label = "";
- break;
- }
- }
-
- Run stddev_data;
- stddev_data.benchmark_name = reports[0].benchmark_name + "_stddev";
- stddev_data.report_label = mean_data.report_label;
- stddev_data.iterations = 0;
- stddev_data.real_accumulated_time = real_accumulated_time_stat.StdDev();
- stddev_data.cpu_accumulated_time = cpu_accumulated_time_stat.StdDev();
- stddev_data.bytes_per_second = bytes_per_second_stat.StdDev();
- stddev_data.items_per_second = items_per_second_stat.StdDev();
- stddev_data.time_unit = reports[0].time_unit;
- // user counters
- for(auto const& kv : counter_stats) {
- auto c = Counter(kv.second.s.StdDev(), counter_stats[kv.first].c.flags);
- stddev_data.counters[kv.first] = c;
- }
-
- results.push_back(mean_data);
- results.push_back(stddev_data);
- return results;
-}
-
std::vector<BenchmarkReporter::Run> ComputeBigO(
const std::vector<BenchmarkReporter::Run>& reports) {
typedef BenchmarkReporter::Run Run;
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/complexity.h
index c0ca60e6..df29b48d 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/complexity.h
@@ -25,12 +25,6 @@
namespace benchmark {
-// Return a vector containing the mean and standard devation information for
-// the specified list of reports. If 'reports' contains less than two
-// non-errored runs an empty vector is returned
-std::vector<BenchmarkReporter::Run> ComputeStats(
- const std::vector<BenchmarkReporter::Run>& reports);
-
// Return a vector containing the bigO and RMS information for the specified
// list of reports. If 'reports.size() < 2' an empty vector is returned.
std::vector<BenchmarkReporter::Run> ComputeBigO(
@@ -57,4 +51,5 @@ struct LeastSq {
std::string GetBigOString(BigO complexity);
} // end namespace benchmark
+
#endif // COMPLEXITY_H_
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/console_reporter.cc b/MicroBenchmarks/libs/benchmark-1.3.0/src/console_reporter.cc
index 4bb6f712..1226d234 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/console_reporter.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/console_reporter.cc
@@ -148,7 +148,7 @@ void ConsoleReporter::PrintRunData(const Run& result) {
}
for (auto& c : result.counters) {
- auto const& s = HumanReadableNumber(c.second.value);
+ auto const& s = HumanReadableNumber(c.second.value, 1000);
if (output_options_ & OO_Tabular) {
if (c.second.flags & Counter::kIsRate) {
printer(Out, COLOR_DEFAULT, " %8s/s", s.c_str());
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/counter.cc b/MicroBenchmarks/libs/benchmark-1.3.0/src/counter.cc
index ed1aa044..ed1aa044 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/counter.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/counter.cc
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/counter.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/counter.h
index dd6865a3..dd6865a3 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/counter.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/counter.h
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/csv_reporter.cc b/MicroBenchmarks/libs/benchmark-1.3.0/src/csv_reporter.cc
index 35510645..35510645 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/csv_reporter.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/csv_reporter.cc
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/cycleclock.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/cycleclock.h
index 4251fe4c..4251fe4c 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/cycleclock.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/cycleclock.h
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/internal_macros.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/internal_macros.h
index 94288745..94288745 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/internal_macros.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/internal_macros.h
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/json_reporter.cc b/MicroBenchmarks/libs/benchmark-1.3.0/src/json_reporter.cc
index edf6ecc8..a49f5b0f 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/json_reporter.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/json_reporter.cc
@@ -21,6 +21,8 @@
#include <string>
#include <tuple>
#include <vector>
+#include <iomanip> // for setprecision
+#include <limits>
#include "string_util.h"
#include "timers.h"
@@ -48,7 +50,14 @@ std::string FormatKV(std::string const& key, int64_t value) {
}
std::string FormatKV(std::string const& key, double value) {
- return StringPrintF("\"%s\": %.2f", key.c_str(), value);
+ std::stringstream ss;
+ ss << '"' << key << "\": ";
+
+ const auto max_digits10 = std::numeric_limits<decltype (value)>::max_digits10;
+ const auto max_fractional_digits10 = max_digits10 - 1;
+
+ ss << std::scientific << std::setprecision(max_fractional_digits10) << value;
+ return ss.str();
}
int64_t RoundDouble(double v) { return static_cast<int64_t>(v + 0.5); }
@@ -125,18 +134,18 @@ void JSONReporter::PrintRunData(Run const& run) {
if (!run.report_big_o && !run.report_rms) {
out << indent << FormatKV("iterations", run.iterations) << ",\n";
out << indent
- << FormatKV("real_time", RoundDouble(run.GetAdjustedRealTime()))
+ << FormatKV("real_time", run.GetAdjustedRealTime())
<< ",\n";
out << indent
- << FormatKV("cpu_time", RoundDouble(run.GetAdjustedCPUTime()));
+ << FormatKV("cpu_time", run.GetAdjustedCPUTime());
out << ",\n"
<< indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
} else if (run.report_big_o) {
out << indent
- << FormatKV("cpu_coefficient", RoundDouble(run.GetAdjustedCPUTime()))
+ << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime())
<< ",\n";
out << indent
- << FormatKV("real_coefficient", RoundDouble(run.GetAdjustedRealTime()))
+ << FormatKV("real_coefficient", run.GetAdjustedRealTime())
<< ",\n";
out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
@@ -147,17 +156,17 @@ void JSONReporter::PrintRunData(Run const& run) {
if (run.bytes_per_second > 0.0) {
out << ",\n"
<< indent
- << FormatKV("bytes_per_second", RoundDouble(run.bytes_per_second));
+ << FormatKV("bytes_per_second", run.bytes_per_second);
}
if (run.items_per_second > 0.0) {
out << ",\n"
<< indent
- << FormatKV("items_per_second", RoundDouble(run.items_per_second));
+ << FormatKV("items_per_second", run.items_per_second);
}
for(auto &c : run.counters) {
out << ",\n"
<< indent
- << FormatKV(c.first, RoundDouble(c.second));
+ << FormatKV(c.first, c.second);
}
if (!run.report_label.empty()) {
out << ",\n" << indent << FormatKV("label", run.report_label);
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/log.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/log.h
index d06e1031..d06e1031 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/log.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/log.h
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/mutex.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/mutex.h
index 5f461d05..5f461d05 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/mutex.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/mutex.h
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/re.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/re.h
index 01e97365..01e97365 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/re.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/re.h
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/reporter.cc b/MicroBenchmarks/libs/benchmark-1.3.0/src/reporter.cc
index aacd4531..9a0830b0 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/reporter.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/reporter.cc
@@ -22,7 +22,6 @@
#include <vector>
#include "check.h"
-#include "stat.h"
namespace benchmark {
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.cc b/MicroBenchmarks/libs/benchmark-1.3.0/src/sleep.cc
index 54aa04a4..54aa04a4 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/sleep.cc
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/sleep.h
index f98551af..f98551af 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/sleep.h
diff --git a/MicroBenchmarks/libs/benchmark-1.3.0/src/statistics.cc b/MicroBenchmarks/libs/benchmark-1.3.0/src/statistics.cc
new file mode 100644
index 00000000..5932ad43
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/statistics.cc
@@ -0,0 +1,175 @@
+// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
+// Copyright 2017 Roman Lebedev. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+
+#include <algorithm>
+#include <cmath>
+#include <string>
+#include <vector>
+#include <numeric>
+#include "check.h"
+#include "statistics.h"
+
+namespace benchmark {
+
+auto StatisticsSum = [](const std::vector<double>& v) {
+ return std::accumulate(v.begin(), v.end(), 0.0);
+};
+
+double StatisticsMean(const std::vector<double>& v) {
+ if (v.size() == 0) return 0.0;
+ return StatisticsSum(v) * (1.0 / v.size());
+}
+
+double StatisticsMedian(const std::vector<double>& v) {
+ if (v.size() < 3) return StatisticsMean(v);
+ std::vector<double> partial;
+ // we need roundDown(count/2)+1 slots
+ partial.resize(1 + (v.size() / 2));
+ std::partial_sort_copy(v.begin(), v.end(), partial.begin(), partial.end());
+ // did we have odd number of samples?
+ // if yes, then the last element of partially-sorted vector is the median
+ // it no, then the average of the last two elements is the median
+ if(v.size() % 2 == 1)
+ return partial.back();
+ return (partial[partial.size() - 2] + partial[partial.size() - 1]) / 2.0;
+}
+
+// Return the sum of the squares of this sample set
+auto SumSquares = [](const std::vector<double>& v) {
+ return std::inner_product(v.begin(), v.end(), v.begin(), 0.0);
+};
+
+auto Sqr = [](const double dat) { return dat * dat; };
+auto Sqrt = [](const double dat) {
+ // Avoid NaN due to imprecision in the calculations
+ if (dat < 0.0) return 0.0;
+ return std::sqrt(dat);
+};
+
+double StatisticsStdDev(const std::vector<double>& v) {
+ const auto mean = StatisticsMean(v);
+ if (v.size() == 0) return mean;
+
+ // Sample standard deviation is undefined for n = 1
+ if (v.size() == 1)
+ return 0.0;
+
+ const double avg_squares = SumSquares(v) * (1.0 / v.size());
+ return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean)));
+}
+
+std::vector<BenchmarkReporter::Run> ComputeStats(
+ const std::vector<BenchmarkReporter::Run>& reports) {
+ typedef BenchmarkReporter::Run Run;
+ std::vector<Run> results;
+
+ auto error_count =
+ std::count_if(reports.begin(), reports.end(),
+ [](Run const& run) { return run.error_occurred; });
+
+ if (reports.size() - error_count < 2) {
+ // We don't report aggregated data if there was a single run.
+ return results;
+ }
+
+ // Accumulators.
+ std::vector<double> real_accumulated_time_stat;
+ std::vector<double> cpu_accumulated_time_stat;
+ std::vector<double> bytes_per_second_stat;
+ std::vector<double> items_per_second_stat;
+
+ real_accumulated_time_stat.reserve(reports.size());
+ cpu_accumulated_time_stat.reserve(reports.size());
+ bytes_per_second_stat.reserve(reports.size());
+ items_per_second_stat.reserve(reports.size());
+
+ // All repetitions should be run with the same number of iterations so we
+ // can take this information from the first benchmark.
+ int64_t const run_iterations = reports.front().iterations;
+ // create stats for user counters
+ struct CounterStat {
+ Counter c;
+ std::vector<double> s;
+ };
+ std::map< std::string, CounterStat > counter_stats;
+ for(Run const& r : reports) {
+ for(auto const& cnt : r.counters) {
+ auto it = counter_stats.find(cnt.first);
+ if(it == counter_stats.end()) {
+ counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}});
+ it = counter_stats.find(cnt.first);
+ it->second.s.reserve(reports.size());
+ } else {
+ CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
+ }
+ }
+ }
+
+ // Populate the accumulators.
+ for (Run const& run : reports) {
+ CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
+ CHECK_EQ(run_iterations, run.iterations);
+ if (run.error_occurred) continue;
+ real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
+ cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
+ items_per_second_stat.emplace_back(run.items_per_second);
+ bytes_per_second_stat.emplace_back(run.bytes_per_second);
+ // user counters
+ for(auto const& cnt : run.counters) {
+ auto it = counter_stats.find(cnt.first);
+ CHECK_NE(it, counter_stats.end());
+ it->second.s.emplace_back(cnt.second);
+ }
+ }
+
+ // Only add label if it is same for all runs
+ std::string report_label = reports[0].report_label;
+ for (std::size_t i = 1; i < reports.size(); i++) {
+ if (reports[i].report_label != report_label) {
+ report_label = "";
+ break;
+ }
+ }
+
+ for(const auto& Stat : *reports[0].statistics) {
+ // Get the data from the accumulator to BenchmarkReporter::Run's.
+ Run data;
+ data.benchmark_name = reports[0].benchmark_name + "_" + Stat.name_;
+ data.report_label = report_label;
+ data.iterations = run_iterations;
+
+ data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat);
+ data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat);
+ data.bytes_per_second = Stat.compute_(bytes_per_second_stat);
+ data.items_per_second = Stat.compute_(items_per_second_stat);
+
+ data.time_unit = reports[0].time_unit;
+
+ // user counters
+ for(auto const& kv : counter_stats) {
+ const auto uc_stat = Stat.compute_(kv.second.s);
+ auto c = Counter(uc_stat, counter_stats[kv.first].c.flags);
+ data.counters[kv.first] = c;
+ }
+
+ results.push_back(data);
+ }
+
+ return results;
+}
+
+} // end namespace benchmark
diff --git a/MicroBenchmarks/libs/benchmark-1.3.0/src/statistics.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/statistics.h
new file mode 100644
index 00000000..7eccc855
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/statistics.h
@@ -0,0 +1,37 @@
+// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
+// Copyright 2017 Roman Lebedev. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef STATISTICS_H_
+#define STATISTICS_H_
+
+#include <vector>
+
+#include "benchmark/benchmark.h"
+
+namespace benchmark {
+
+// Return a vector containing the mean, median and standard devation information
+// (and any user-specified info) for the specified list of reports. If 'reports'
+// contains less than two non-errored runs an empty vector is returned
+std::vector<BenchmarkReporter::Run> ComputeStats(
+ const std::vector<BenchmarkReporter::Run>& reports);
+
+double StatisticsMean(const std::vector<double>& v);
+double StatisticsMedian(const std::vector<double>& v);
+double StatisticsStdDev(const std::vector<double>& v);
+
+} // end namespace benchmark
+
+#endif // STATISTICS_H_
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.cc b/MicroBenchmarks/libs/benchmark-1.3.0/src/string_util.cc
index cd4e7cfd..29edb2a4 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/string_util.cc
@@ -27,8 +27,6 @@ static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits),
static const int64_t kUnitsSize = arraysize(kBigSIUnits);
-} // end anonymous namespace
-
void ToExponentAndMantissa(double val, double thresh, int precision,
double one_k, std::string* mantissa,
int64_t* exponent) {
@@ -100,14 +98,16 @@ std::string ExponentToPrefix(int64_t exponent, bool iec) {
}
std::string ToBinaryStringFullySpecified(double value, double threshold,
- int precision) {
+ int precision, double one_k = 1024.0) {
std::string mantissa;
int64_t exponent;
- ToExponentAndMantissa(value, threshold, precision, 1024.0, &mantissa,
+ ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa,
&exponent);
return mantissa + ExponentToPrefix(exponent, false);
}
+} // end namespace
+
void AppendHumanReadable(int n, std::string* str) {
std::stringstream ss;
// Round down to the nearest SI prefix.
@@ -115,11 +115,11 @@ void AppendHumanReadable(int n, std::string* str) {
*str += ss.str();
}
-std::string HumanReadableNumber(double n) {
+std::string HumanReadableNumber(double n, double one_k) {
// 1.1 means that figures up to 1.1k should be shown with the next unit down;
// this softens edge effects.
// 1 means that we should show one decimal place of precision.
- return ToBinaryStringFullySpecified(n, 1.1, 1);
+ return ToBinaryStringFullySpecified(n, 1.1, 1, one_k);
}
std::string StringPrintFImp(const char* msg, va_list args) {
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/string_util.h
index 0b190b91..c3d53bfd 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/string_util.h
@@ -10,7 +10,7 @@ namespace benchmark {
void AppendHumanReadable(int n, std::string* str);
-std::string HumanReadableNumber(double n);
+std::string HumanReadableNumber(double n, double one_k = 1024.0);
std::string StringPrintF(const char* format, ...);
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.cc b/MicroBenchmarks/libs/benchmark-1.3.0/src/sysinfo.cc
index 7feb79e6..7feb79e6 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/sysinfo.cc
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/sysinfo.h
index c5d9916d..c5d9916d 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/sysinfo.h
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/timers.cc b/MicroBenchmarks/libs/benchmark-1.3.0/src/timers.cc
index 817272d0..817272d0 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/timers.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/timers.cc
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/timers.h b/MicroBenchmarks/libs/benchmark-1.3.0/src/timers.h
index 65606ccd..65606ccd 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/src/timers.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/src/timers.h
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/CMakeLists.txt b/MicroBenchmarks/libs/benchmark-1.3.0/test/CMakeLists.txt
index b55612b4..baf21cbc 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/CMakeLists.txt
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/CMakeLists.txt
@@ -98,6 +98,9 @@ add_test(multiple_ranges_test multiple_ranges_test --benchmark_min_time=0.01)
compile_output_test(reporter_output_test)
add_test(reporter_output_test reporter_output_test --benchmark_min_time=0.01)
+compile_output_test(templated_fixture_test)
+add_test(templated_fixture_test templated_fixture_test --benchmark_min_time=0.01)
+
compile_output_test(user_counters_test)
add_test(user_counters_test user_counters_test --benchmark_min_time=0.01)
@@ -106,13 +109,20 @@ add_test(user_counters_tabular_test user_counters_tabular_test --benchmark_count
check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG)
if (BENCHMARK_HAS_CXX03_FLAG)
- set(CXX03_FLAGS "${CMAKE_CXX_FLAGS}")
- string(REPLACE "-std=c++11" "-std=c++03" CXX03_FLAGS "${CXX03_FLAGS}")
- string(REPLACE "-std=c++0x" "-std=c++03" CXX03_FLAGS "${CXX03_FLAGS}")
-
compile_benchmark_test(cxx03_test)
set_target_properties(cxx03_test
- PROPERTIES COMPILE_FLAGS "${CXX03_FLAGS}")
+ PROPERTIES
+ COMPILE_FLAGS "-std=c++03")
+ # libstdc++ provides different definitions within <map> between dialects. When
+ # LTO is enabled and -Werror is specified GCC diagnoses this ODR violation
+ # causing the test to fail to compile. To prevent this we explicitly disable
+ # the warning.
+ check_cxx_compiler_flag(-Wno-odr BENCHMARK_HAS_WNO_ODR)
+ if (BENCHMARK_ENABLE_LTO AND BENCHMARK_HAS_WNO_ODR)
+ set_target_properties(cxx03_test
+ PROPERTIES
+ LINK_FLAGS "-Wno-odr")
+ endif()
add_test(cxx03 cxx03_test --benchmark_min_time=0.01)
endif()
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/basic_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/basic_test.cc
index bc1f96d9..8a27b925 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/basic_test.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/basic_test.cc
@@ -4,7 +4,7 @@
#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
void BM_empty(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
benchmark::DoNotOptimize(state.iterations());
}
}
@@ -12,7 +12,7 @@ BENCHMARK(BM_empty);
BENCHMARK(BM_empty)->ThreadPerCpu();
void BM_spin_empty(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
for (int x = 0; x < state.range(0); ++x) {
benchmark::DoNotOptimize(x);
}
@@ -25,7 +25,7 @@ void BM_spin_pause_before(benchmark::State& state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
- while (state.KeepRunning()) {
+ for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
@@ -35,7 +35,7 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_before);
BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
void BM_spin_pause_during(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
state.PauseTiming();
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
@@ -50,7 +50,7 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_during);
BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
void BM_pause_during(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
state.PauseTiming();
state.ResumeTiming();
}
@@ -61,7 +61,7 @@ BENCHMARK(BM_pause_during)->UseRealTime();
BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
void BM_spin_pause_after(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
@@ -77,7 +77,7 @@ void BM_spin_pause_before_and_after(benchmark::State& state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
- while (state.KeepRunning()) {
+ for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
@@ -90,10 +90,29 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
void BM_empty_stop_start(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK(BM_empty_stop_start);
BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
+
+void BM_KeepRunning(benchmark::State& state) {
+ size_t iter_count = 0;
+ while (state.KeepRunning()) {
+ ++iter_count;
+ }
+ assert(iter_count == state.max_iterations);
+}
+BENCHMARK(BM_KeepRunning);
+
+void BM_RangedFor(benchmark::State& state) {
+ size_t iter_count = 0;
+ for (auto _ : state) {
+ ++iter_count;
+ }
+ assert(iter_count == state.max_iterations);
+}
+BENCHMARK(BM_RangedFor);
+
BENCHMARK_MAIN()
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/benchmark_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/benchmark_test.cc
index 7a16466e..d4326012 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/benchmark_test.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/benchmark_test.cc
@@ -42,7 +42,7 @@ double CalculatePi(int depth) {
std::set<int> ConstructRandomSet(int size) {
std::set<int> s;
- for (int i = 0; i < size; ++i) s.insert(i);
+ for (int i = 0; i < size; ++i) s.insert(s.end(), i);
return s;
}
@@ -53,7 +53,7 @@ std::vector<int>* test_vector = nullptr;
static void BM_Factorial(benchmark::State& state) {
int fac_42 = 0;
- while (state.KeepRunning()) fac_42 = Factorial(8);
+ for (auto _ : state) fac_42 = Factorial(8);
// Prevent compiler optimizations
std::stringstream ss;
ss << fac_42;
@@ -64,7 +64,7 @@ BENCHMARK(BM_Factorial)->UseRealTime();
static void BM_CalculatePiRange(benchmark::State& state) {
double pi = 0.0;
- while (state.KeepRunning()) pi = CalculatePi(state.range(0));
+ for (auto _ : state) pi = CalculatePi(state.range(0));
std::stringstream ss;
ss << pi;
state.SetLabel(ss.str());
@@ -73,7 +73,7 @@ BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024);
static void BM_CalculatePi(benchmark::State& state) {
static const int depth = 1024;
- while (state.KeepRunning()) {
+ for (auto _ : state) {
benchmark::DoNotOptimize(CalculatePi(depth));
}
}
@@ -82,22 +82,26 @@ BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32);
BENCHMARK(BM_CalculatePi)->ThreadPerCpu();
static void BM_SetInsert(benchmark::State& state) {
- while (state.KeepRunning()) {
+ std::set<int> data;
+ for (auto _ : state) {
state.PauseTiming();
- std::set<int> data = ConstructRandomSet(state.range(0));
+ data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
for (int j = 0; j < state.range(1); ++j) data.insert(rand());
}
state.SetItemsProcessed(state.iterations() * state.range(1));
state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int));
}
-BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {1, 10}});
+
+// Test many inserts at once to reduce the total iterations needed. Otherwise, the slower,
+// non-timed part of each iteration will make the benchmark take forever.
+BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}});
template <typename Container,
typename ValueType = typename Container::value_type>
static void BM_Sequential(benchmark::State& state) {
ValueType v = 42;
- while (state.KeepRunning()) {
+ for (auto _ : state) {
Container c;
for (int i = state.range(0); --i;) c.push_back(v);
}
@@ -109,14 +113,14 @@ BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int)
->Range(1 << 0, 1 << 10);
BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10);
// Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond.
-#if __cplusplus >= 201103L
+#ifdef BENCHMARK_HAS_CXX11
BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512);
#endif
static void BM_StringCompare(benchmark::State& state) {
std::string s1(state.range(0), '-');
std::string s2(state.range(0), '-');
- while (state.KeepRunning()) benchmark::DoNotOptimize(s1.compare(s2));
+ for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2));
}
BENCHMARK(BM_StringCompare)->Range(1, 1 << 20);
@@ -126,7 +130,7 @@ static void BM_SetupTeardown(benchmark::State& state) {
test_vector = new std::vector<int>();
}
int i = 0;
- while (state.KeepRunning()) {
+ for (auto _ : state) {
std::lock_guard<std::mutex> l(test_vector_mu);
if (i % 2 == 0)
test_vector->push_back(i);
@@ -142,7 +146,7 @@ BENCHMARK(BM_SetupTeardown)->ThreadPerCpu();
static void BM_LongTest(benchmark::State& state) {
double tracker = 0.0;
- while (state.KeepRunning()) {
+ for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i)
benchmark::DoNotOptimize(tracker += i);
}
@@ -159,7 +163,7 @@ static void BM_ParallelMemset(benchmark::State& state) {
test_vector = new std::vector<int>(size);
}
- while (state.KeepRunning()) {
+ for (auto _ : state) {
for (int i = from; i < to; i++) {
// No need to lock test_vector_mu as ranges
// do not overlap between threads.
@@ -179,7 +183,7 @@ static void BM_ManualTiming(benchmark::State& state) {
std::chrono::duration<double, std::micro> sleep_duration{
static_cast<double>(microseconds)};
- while (state.KeepRunning()) {
+ for (auto _ : state) {
auto start = std::chrono::high_resolution_clock::now();
// Simulate some useful workload with a sleep
std::this_thread::sleep_for(
@@ -197,11 +201,11 @@ static void BM_ManualTiming(benchmark::State& state) {
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime();
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime();
-#if __cplusplus >= 201103L
+#ifdef BENCHMARK_HAS_CXX11
template <class... Args>
void BM_with_args(benchmark::State& state, Args&&...) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44);
@@ -213,7 +217,7 @@ void BM_non_template_args(benchmark::State& state, int, double) {
}
BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0);
-#endif // __cplusplus >= 201103L
+#endif // BENCHMARK_HAS_CXX11
static void BM_DenseThreadRanges(benchmark::State& st) {
switch (st.range(0)) {
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/complexity_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/complexity_test.cc
index 62d1154d..89dfa580 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/complexity_test.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/complexity_test.cc
@@ -25,8 +25,8 @@ int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name,
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
{"^%rms_name %rms %rms[ ]*$", MR_Next}});
AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"},
- {"\"cpu_coefficient\": [0-9]+,$", MR_Next},
- {"\"real_coefficient\": [0-9]{1,5},$", MR_Next},
+ {"\"cpu_coefficient\": %float,$", MR_Next},
+ {"\"real_coefficient\": %float,$", MR_Next},
{"\"big_o\": \"%bigo\",$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next},
@@ -46,7 +46,7 @@ int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name,
// ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
for (int i = 0; i < 1024; ++i) {
benchmark::DoNotOptimize(&i);
}
@@ -94,7 +94,7 @@ void BM_Complexity_O_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range(0));
const int item_not_in_vector =
state.range(0) * 2; // Test worst case scenario (item not in vector)
- while (state.KeepRunning()) {
+ for (auto _ : state) {
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
}
state.SetComplexityN(state.range(0));
@@ -129,7 +129,7 @@ ADD_COMPLEXITY_CASES(big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n);
static void BM_Complexity_O_N_log_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range(0));
- while (state.KeepRunning()) {
+ for (auto _ : state) {
std::sort(v.begin(), v.end());
}
state.SetComplexityN(state.range(0));
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/cxx03_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/cxx03_test.cc
index a79d964e..cc8abaf7 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/cxx03_test.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/cxx03_test.cc
@@ -8,6 +8,10 @@
#error C++11 or greater detected. Should be C++03.
#endif
+#ifdef BENCHMARK_HAS_CXX11
+#error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined.
+#endif
+
void BM_empty(benchmark::State& state) {
while (state.KeepRunning()) {
volatile std::size_t x = state.iterations();
@@ -39,6 +43,17 @@ void BM_template1(benchmark::State& state) {
BENCHMARK_TEMPLATE(BM_template1, long);
BENCHMARK_TEMPLATE1(BM_template1, int);
+template <class T>
+struct BM_Fixture : public ::benchmark::Fixture {
+};
+
+BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) {
+ BM_empty(state);
+}
+BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) {
+ BM_empty(state);
+}
+
void BM_counters(benchmark::State& state) {
BM_empty(state);
state.counters["Foo"] = 2;
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/diagnostics_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/diagnostics_test.cc
index 7aac8069..dd64a336 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/diagnostics_test.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/diagnostics_test.cc
@@ -47,7 +47,7 @@ void BM_diagnostic_test(benchmark::State& state) {
if (called_once == false) try_invalid_pause_resume(state);
- while (state.KeepRunning()) {
+ for (auto _ : state) {
benchmark::DoNotOptimize(state.iterations());
}
@@ -57,6 +57,22 @@ void BM_diagnostic_test(benchmark::State& state) {
}
BENCHMARK(BM_diagnostic_test);
+
+void BM_diagnostic_test_keep_running(benchmark::State& state) {
+ static bool called_once = false;
+
+ if (called_once == false) try_invalid_pause_resume(state);
+
+ while(state.KeepRunning()) {
+ benchmark::DoNotOptimize(state.iterations());
+ }
+
+ if (called_once == false) try_invalid_pause_resume(state);
+
+ called_once = true;
+}
+BENCHMARK(BM_diagnostic_test_keep_running);
+
int main(int argc, char* argv[]) {
benchmark::internal::GetAbortHandler() = &TestHandler;
benchmark::Initialize(&argc, argv);
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/donotoptimize_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/donotoptimize_test.cc
index a705654a..a705654a 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/donotoptimize_test.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/donotoptimize_test.cc
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/filter_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/filter_test.cc
index 3a205295..0e27065c 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/filter_test.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/filter_test.cc
@@ -36,31 +36,31 @@ class TestReporter : public benchmark::ConsoleReporter {
} // end namespace
static void NoPrefix(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK(NoPrefix);
static void BM_Foo(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK(BM_Foo);
static void BM_Bar(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK(BM_Bar);
static void BM_FooBar(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK(BM_FooBar);
static void BM_FooBa(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK(BM_FooBa);
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/fixture_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/fixture_test.cc
index bbc2f957..9b0b0e17 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/fixture_test.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/fixture_test.cc
@@ -28,7 +28,7 @@ class MyFixture : public ::benchmark::Fixture {
BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) {
assert(data.get() != nullptr);
assert(*data == 42);
- while (st.KeepRunning()) {
+ for (auto _ : st) {
}
}
@@ -37,7 +37,7 @@ BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) {
assert(data.get() != nullptr);
assert(*data == 42);
}
- while (st.KeepRunning()) {
+ for (auto _ : st) {
assert(data.get() != nullptr);
assert(*data == 42);
}
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/map_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/map_test.cc
index 83457c99..0f8238d9 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/map_test.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/map_test.cc
@@ -18,9 +18,10 @@ std::map<int, int> ConstructRandomMap(int size) {
// Basic version.
static void BM_MapLookup(benchmark::State& state) {
const int size = state.range(0);
- while (state.KeepRunning()) {
+ std::map<int, int> m;
+ for (auto _ : state) {
state.PauseTiming();
- std::map<int, int> m = ConstructRandomMap(size);
+ m = ConstructRandomMap(size);
state.ResumeTiming();
for (int i = 0; i < size; ++i) {
benchmark::DoNotOptimize(m.find(rand() % size));
@@ -44,7 +45,7 @@ class MapFixture : public ::benchmark::Fixture {
BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) {
const int size = state.range(0);
- while (state.KeepRunning()) {
+ for (auto _ : state) {
for (int i = 0; i < size; ++i) {
benchmark::DoNotOptimize(m.find(rand() % size));
}
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/multiple_ranges_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/multiple_ranges_test.cc
index 8e67b3b2..6a3a6d85 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/multiple_ranges_test.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/multiple_ranges_test.cc
@@ -43,7 +43,7 @@ class MultipleRangesFixture : public ::benchmark::Fixture {
};
BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
int product = state.range(0) * state.range(1) * state.range(2);
for (int x = 0; x < product; x++) {
benchmark::DoNotOptimize(x);
@@ -60,13 +60,13 @@ void BM_CheckDefaultArgument(benchmark::State& state) {
// Test that the 'range()' without an argument is the same as 'range(0)'.
assert(state.range() == state.range(0));
assert(state.range() != state.range(1));
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}});
static void BM_MultipleRanges(benchmark::State& st) {
- while (st.KeepRunning()) {
+ for (auto _ : st) {
}
}
BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}});
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/options_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/options_test.cc
index 8eac068b..0690a13f 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/options_test.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/options_test.cc
@@ -8,13 +8,13 @@
#include <cassert>
void BM_basic(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
void BM_basic_slow(benchmark::State& state) {
std::chrono::milliseconds sleep_duration(state.range(0));
- while (state.KeepRunning()) {
+ for (auto _ : state) {
std::this_thread::sleep_for(
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
}
@@ -44,7 +44,7 @@ void CustomArgs(benchmark::internal::Benchmark* b) {
BENCHMARK(BM_basic)->Apply(CustomArgs);
-void BM_explicit_iteration_count(benchmark::State& st) {
+void BM_explicit_iteration_count(benchmark::State& state) {
// Test that benchmarks specified with an explicit iteration count are
// only run once.
static bool invoked_before = false;
@@ -52,12 +52,12 @@ void BM_explicit_iteration_count(benchmark::State& st) {
invoked_before = true;
// Test that the requested iteration count is respected.
- assert(st.max_iterations == 42);
+ assert(state.max_iterations == 42);
size_t actual_iterations = 0;
- while (st.KeepRunning())
+ for (auto _ : state)
++actual_iterations;
- assert(st.iterations() == st.max_iterations);
- assert(st.iterations() == 42);
+ assert(state.iterations() == state.max_iterations);
+ assert(state.iterations() == 42);
}
BENCHMARK(BM_explicit_iteration_count)->Iterations(42);
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/output_test.h b/MicroBenchmarks/libs/benchmark-1.3.0/test/output_test.h
index 897a1386..897a1386 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/output_test.h
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/output_test.h
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/output_test_helper.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/output_test_helper.cc
index 24746f6d..24746f6d 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/output_test_helper.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/output_test_helper.cc
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/register_benchmark_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/register_benchmark_test.cc
index 2769b7a6..8ab2c299 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/register_benchmark_test.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/register_benchmark_test.cc
@@ -61,7 +61,7 @@ typedef benchmark::internal::Benchmark* ReturnVal;
// Test RegisterBenchmark with no additional arguments
//----------------------------------------------------------------------------//
void BM_function(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK(BM_function);
@@ -77,7 +77,7 @@ ADD_CASES({"BM_function"}, {"BM_function_manual_registration"});
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
void BM_extra_args(benchmark::State& st, const char* label) {
- while (st.KeepRunning()) {
+ for (auto _ : st) {
}
st.SetLabel(label);
}
@@ -99,7 +99,7 @@ ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"});
struct CustomFixture {
void operator()(benchmark::State& st) {
- while (st.KeepRunning()) {
+ for (auto _ : st) {
}
}
};
@@ -116,7 +116,7 @@ void TestRegistrationAtRuntime() {
{
const char* x = "42";
auto capturing_lam = [=](benchmark::State& st) {
- while (st.KeepRunning()) {
+ for (auto _ : st) {
}
st.SetLabel(x);
};
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/reporter_output_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/reporter_output_test.cc
index 4a481433..eac88066 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/reporter_output_test.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/reporter_output_test.cc
@@ -20,7 +20,7 @@ ADD_CASES(TC_CSVOut, {{"%csv_header"}});
// ========================================================================= //
void BM_basic(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK(BM_basic);
@@ -28,8 +28,8 @@ BENCHMARK(BM_basic);
ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"},
{"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %int,$", MR_Next},
- {"\"cpu_time\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
@@ -39,20 +39,20 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
// ========================================================================= //
void BM_bytes_per_second(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
state.SetBytesProcessed(1);
}
BENCHMARK(BM_bytes_per_second);
ADD_CASES(TC_ConsoleOut,
- {{"^BM_bytes_per_second %console_report +%floatB/s$"}});
+ {{"^BM_bytes_per_second %console_report +%float[kM]{0,1}B/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"},
{"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %int,$", MR_Next},
- {"\"cpu_time\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
- {"\"bytes_per_second\": %int$", MR_Next},
+ {"\"bytes_per_second\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
@@ -61,20 +61,20 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
// ========================================================================= //
void BM_items_per_second(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
state.SetItemsProcessed(1);
}
BENCHMARK(BM_items_per_second);
ADD_CASES(TC_ConsoleOut,
- {{"^BM_items_per_second %console_report +%float items/s$"}});
+ {{"^BM_items_per_second %console_report +%float[kM]{0,1} items/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"},
{"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %int,$", MR_Next},
- {"\"cpu_time\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
- {"\"items_per_second\": %int$", MR_Next},
+ {"\"items_per_second\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}});
@@ -83,7 +83,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}});
// ========================================================================= //
void BM_label(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
state.SetLabel("some label");
}
@@ -92,8 +92,8 @@ BENCHMARK(BM_label);
ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"},
{"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %int,$", MR_Next},
- {"\"cpu_time\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"label\": \"some label\"$", MR_Next},
{"}", MR_Next}});
@@ -106,7 +106,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some "
void BM_error(benchmark::State& state) {
state.SkipWithError("message");
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK(BM_error);
@@ -123,7 +123,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_error\",,,,,,,,true,\"message\"$"}});
// ========================================================================= //
void BM_no_arg_name(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK(BM_no_arg_name)->Arg(3);
@@ -136,7 +136,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}});
// ========================================================================= //
void BM_arg_name(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3);
@@ -149,7 +149,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}});
// ========================================================================= //
void BM_arg_names(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"});
@@ -163,7 +163,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}});
// ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
state.SetComplexityN(state.range(0));
}
@@ -179,30 +179,74 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"},
// Test that non-aggregate data is printed by default
void BM_Repeat(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
+// need two repetitions min to be able to output any aggregate output
+BENCHMARK(BM_Repeat)->Repetitions(2);
+ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:2 %console_report$"},
+ {"^BM_Repeat/repeats:2 %console_report$"},
+ {"^BM_Repeat/repeats:2_mean %console_report$"},
+ {"^BM_Repeat/repeats:2_median %console_report$"},
+ {"^BM_Repeat/repeats:2_stddev %console_report$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"},
+ {"\"name\": \"BM_Repeat/repeats:2\",$"},
+ {"\"name\": \"BM_Repeat/repeats:2_mean\",$"},
+ {"\"name\": \"BM_Repeat/repeats:2_median\",$"},
+ {"\"name\": \"BM_Repeat/repeats:2_stddev\",$"}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:2\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:2_mean\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:2_median\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}});
+// but for two repetitions, mean and median is the same, so let's repeat..
BENCHMARK(BM_Repeat)->Repetitions(3);
ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3_mean %console_report$"},
+ {"^BM_Repeat/repeats:3_median %console_report$"},
{"^BM_Repeat/repeats:3_stddev %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"},
{"\"name\": \"BM_Repeat/repeats:3\",$"},
{"\"name\": \"BM_Repeat/repeats:3\",$"},
{"\"name\": \"BM_Repeat/repeats:3_mean\",$"},
+ {"\"name\": \"BM_Repeat/repeats:3_median\",$"},
{"\"name\": \"BM_Repeat/repeats:3_stddev\",$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3_mean\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:3_median\",%csv_report$"},
{"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}});
+// median differs between even/odd number of repetitions, so just to be sure
+BENCHMARK(BM_Repeat)->Repetitions(4);
+ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:4 %console_report$"},
+ {"^BM_Repeat/repeats:4 %console_report$"},
+ {"^BM_Repeat/repeats:4 %console_report$"},
+ {"^BM_Repeat/repeats:4 %console_report$"},
+ {"^BM_Repeat/repeats:4_mean %console_report$"},
+ {"^BM_Repeat/repeats:4_median %console_report$"},
+ {"^BM_Repeat/repeats:4_stddev %console_report$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"},
+ {"\"name\": \"BM_Repeat/repeats:4\",$"},
+ {"\"name\": \"BM_Repeat/repeats:4\",$"},
+ {"\"name\": \"BM_Repeat/repeats:4\",$"},
+ {"\"name\": \"BM_Repeat/repeats:4_mean\",$"},
+ {"\"name\": \"BM_Repeat/repeats:4_median\",$"},
+ {"\"name\": \"BM_Repeat/repeats:4_stddev\",$"}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:4\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:4\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:4\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:4_mean\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:4_median\",%csv_report$"},
+ {"^\"BM_Repeat/repeats:4_stddev\",%csv_report$"}});
// Test that a non-repeated test still prints non-aggregate results even when
// only-aggregate reports have been requested
void BM_RepeatOnce(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly();
@@ -212,23 +256,26 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}});
// Test that non-aggregate data is not reported
void BM_SummaryRepeat(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
ADD_CASES(TC_ConsoleOut,
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^BM_SummaryRepeat/repeats:3_mean %console_report$"},
+ {"^BM_SummaryRepeat/repeats:3_median %console_report$"},
{"^BM_SummaryRepeat/repeats:3_stddev %console_report$"}});
ADD_CASES(TC_JSONOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
+ {"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"},
{"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"}});
ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"},
+ {"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"},
{"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}});
void BM_RepeatTimeUnit(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
}
BENCHMARK(BM_RepeatTimeUnit)
@@ -238,18 +285,60 @@ BENCHMARK(BM_RepeatTimeUnit)
ADD_CASES(TC_ConsoleOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^BM_RepeatTimeUnit/repeats:3_mean %console_us_report$"},
+ {"^BM_RepeatTimeUnit/repeats:3_median %console_us_report$"},
{"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_report$"}});
ADD_CASES(TC_JSONOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
{"\"time_unit\": \"us\",?$"},
+ {"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"},
+ {"\"time_unit\": \"us\",?$"},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"},
{"\"time_unit\": \"us\",?$"}});
ADD_CASES(TC_CSVOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"},
+ {"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"},
{"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}});
// ========================================================================= //
+// -------------------- Testing user-provided statistics ------------------- //
+// ========================================================================= //
+
+const auto UserStatistics = [](const std::vector<double>& v) {
+ return v.back();
+};
+void BM_UserStats(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_UserStats)
+ ->Repetitions(3)
+ ->ComputeStatistics("", UserStatistics);
+// check that user-provided stats is calculated, and is after the default-ones
+// empty string as name is intentional, it would sort before anything else
+ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/repeats:3 %console_report$"},
+ {"^BM_UserStats/repeats:3 %console_report$"},
+ {"^BM_UserStats/repeats:3 %console_report$"},
+ {"^BM_UserStats/repeats:3_mean %console_report$"},
+ {"^BM_UserStats/repeats:3_median %console_report$"},
+ {"^BM_UserStats/repeats:3_stddev %console_report$"},
+ {"^BM_UserStats/repeats:3_ %console_report$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_UserStats/repeats:3\",$"},
+ {"\"name\": \"BM_UserStats/repeats:3\",$"},
+ {"\"name\": \"BM_UserStats/repeats:3\",$"},
+ {"\"name\": \"BM_UserStats/repeats:3_mean\",$"},
+ {"\"name\": \"BM_UserStats/repeats:3_median\",$"},
+ {"\"name\": \"BM_UserStats/repeats:3_stddev\",$"},
+ {"\"name\": \"BM_UserStats/repeats:3_\",$"}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_UserStats/repeats:3\",%csv_report$"},
+ {"^\"BM_UserStats/repeats:3\",%csv_report$"},
+ {"^\"BM_UserStats/repeats:3\",%csv_report$"},
+ {"^\"BM_UserStats/repeats:3_mean\",%csv_report$"},
+ {"^\"BM_UserStats/repeats:3_median\",%csv_report$"},
+ {"^\"BM_UserStats/repeats:3_stddev\",%csv_report$"},
+ {"^\"BM_UserStats/repeats:3_\",%csv_report$"}});
+
+// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/skip_with_error_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/skip_with_error_test.cc
index b74d33c5..0c2f3481 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/skip_with_error_test.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/skip_with_error_test.cc
@@ -70,6 +70,15 @@ void BM_error_before_running(benchmark::State& state) {
BENCHMARK(BM_error_before_running);
ADD_CASES("BM_error_before_running", {{"", true, "error message"}});
+void BM_error_before_running_range_for(benchmark::State& state) {
+ state.SkipWithError("error message");
+ for (auto _ : state) {
+ assert(false);
+ }
+}
+BENCHMARK(BM_error_before_running_range_for);
+ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}});
+
void BM_error_during_running(benchmark::State& state) {
int first_iter = true;
while (state.KeepRunning()) {
@@ -93,8 +102,31 @@ ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"},
{"/2/threads:4", false, ""},
{"/2/threads:8", false, ""}});
+void BM_error_during_running_ranged_for(benchmark::State& state) {
+ assert(state.max_iterations > 3 && "test requires at least a few iterations");
+ int first_iter = true;
+ // NOTE: Users should not write the for loop explicitly.
+ for (auto It = state.begin(), End = state.end(); It != End; ++It) {
+ if (state.range(0) == 1) {
+ assert(first_iter);
+ first_iter = false;
+ state.SkipWithError("error message");
+ // Test the unfortunate but documented behavior that the ranged-for loop
+ // doesn't automatically terminate when SkipWithError is set.
+ assert(++It != End);
+ break; // Required behavior
+ }
+ }
+}
+BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5);
+ADD_CASES("BM_error_during_running_ranged_for",
+ {{"/1/iterations:5", true, "error message"},
+ {"/2/iterations:5", false, ""}});
+
+
+
void BM_error_after_running(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
benchmark::DoNotOptimize(state.iterations());
}
if (state.thread_index <= (state.threads / 2))
diff --git a/MicroBenchmarks/libs/benchmark-1.3.0/test/templated_fixture_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/templated_fixture_test.cc
new file mode 100644
index 00000000..9341b786
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/templated_fixture_test.cc
@@ -0,0 +1,28 @@
+
+#include "benchmark/benchmark.h"
+
+#include <cassert>
+#include <memory>
+
+template<typename T>
+class MyFixture : public ::benchmark::Fixture {
+public:
+ MyFixture() : data(0) {}
+
+ T data;
+};
+
+BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State &st) {
+ for (auto _ : st) {
+ data += 1;
+ }
+}
+
+BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State& st) {
+ for (auto _ : st) {
+ data += 1.0;
+ }
+}
+BENCHMARK_REGISTER_F(MyFixture, Bar);
+
+BENCHMARK_MAIN()
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/user_counters_tabular_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/user_counters_tabular_test.cc
index 5fc5b4d9..9b8a6132 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/user_counters_tabular_test.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/user_counters_tabular_test.cc
@@ -54,7 +54,7 @@ ADD_CASES(TC_CSVOut, {{"%csv_header,"
// ========================================================================= //
void BM_Counters_Tabular(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
@@ -69,8 +69,8 @@ void BM_Counters_Tabular(benchmark::State& state) {
BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"},
{"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %int,$", MR_Next},
- {"\"cpu_time\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
@@ -98,7 +98,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular);
// ========================================================================= //
void BM_CounterRates_Tabular(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
@@ -113,8 +113,8 @@ void BM_CounterRates_Tabular(benchmark::State& state) {
BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
{"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %int,$", MR_Next},
- {"\"cpu_time\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
@@ -145,7 +145,7 @@ CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int",
// set only some of the counters
void BM_CounterSet0_Tabular(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
@@ -157,8 +157,8 @@ void BM_CounterSet0_Tabular(benchmark::State& state) {
BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
{"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %int,$", MR_Next},
- {"\"cpu_time\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
@@ -177,7 +177,7 @@ CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0);
// again.
void BM_CounterSet1_Tabular(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
@@ -189,8 +189,8 @@ void BM_CounterSet1_Tabular(benchmark::State& state) {
BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
{"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %int,$", MR_Next},
- {"\"cpu_time\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
@@ -213,7 +213,7 @@ CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1);
// set only some of the counters, different set now.
void BM_CounterSet2_Tabular(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
@@ -225,8 +225,8 @@ void BM_CounterSet2_Tabular(benchmark::State& state) {
BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
{"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %int,$", MR_Next},
- {"\"cpu_time\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/test/user_counters_test.cc b/MicroBenchmarks/libs/benchmark-1.3.0/test/user_counters_test.cc
index 66df48b3..06aafb1f 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/test/user_counters_test.cc
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/test/user_counters_test.cc
@@ -19,7 +19,7 @@ ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}});
// ========================================================================= //
void BM_Counters_Simple(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
state.counters["foo"] = 1;
state.counters["bar"] = 2 * (double)state.iterations();
@@ -28,8 +28,8 @@ BENCHMARK(BM_Counters_Simple);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
{"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %int,$", MR_Next},
- {"\"cpu_time\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
@@ -51,7 +51,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple);
namespace { int num_calls1 = 0; }
void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
state.counters["foo"] = 1;
state.counters["bar"] = ++num_calls1;
@@ -64,11 +64,11 @@ ADD_CASES(TC_ConsoleOut,
"bar=%hrfloat foo=%hrfloat +%hrfloatB/s +%hrfloat items/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
{"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %int,$", MR_Next},
- {"\"cpu_time\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
- {"\"bytes_per_second\": %int,$", MR_Next},
- {"\"items_per_second\": %int,$", MR_Next},
+ {"\"bytes_per_second\": %float,$", MR_Next},
+ {"\"items_per_second\": %float,$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
@@ -92,7 +92,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec",
// ========================================================================= //
void BM_Counters_Rate(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate};
@@ -102,8 +102,8 @@ BENCHMARK(BM_Counters_Rate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
{"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %int,$", MR_Next},
- {"\"cpu_time\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
@@ -124,7 +124,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
// ========================================================================= //
void BM_Counters_Threads(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
state.counters["foo"] = 1;
state.counters["bar"] = 2;
@@ -133,8 +133,8 @@ BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
{"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %int,$", MR_Next},
- {"\"cpu_time\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
@@ -153,7 +153,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads);
// ========================================================================= //
void BM_Counters_AvgThreads(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads};
@@ -163,8 +163,8 @@ BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
{"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %int,$", MR_Next},
- {"\"cpu_time\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
@@ -184,7 +184,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int",
// ========================================================================= //
void BM_Counters_AvgThreadsRate(benchmark::State& state) {
- while (state.KeepRunning()) {
+ for (auto _ : state) {
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate};
@@ -194,8 +194,8 @@ BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
{"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %int,$", MR_Next},
- {"\"cpu_time\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/tools/compare_bench.py b/MicroBenchmarks/libs/benchmark-1.3.0/tools/compare_bench.py
index d54baaa0..7bbf0d01 100755
--- a/MicroBenchmarks/libs/benchmark-1.2.0/tools/compare_bench.py
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/tools/compare_bench.py
@@ -39,21 +39,20 @@ def main():
parser.add_argument(
'test2', metavar='test2', type=str, nargs=1,
help='A benchmark executable or JSON output file')
- # FIXME this is a dummy argument which will never actually match
- # any --benchmark flags but it helps generate a better usage message
parser.add_argument(
- 'benchmark_options', metavar='benchmark_option', nargs='*',
+ 'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables'
)
args, unknown_args = parser.parse_known_args()
# Parse the command line flags
test1 = args.test1[0]
test2 = args.test2[0]
- if args.benchmark_options:
+ if unknown_args:
+ # should never happen
print("Unrecognized positional argument arguments: '%s'"
- % args.benchmark_options)
+ % unknown_args)
exit(1)
- benchmark_options = unknown_args
+ benchmark_options = args.benchmark_options
check_inputs(test1, test2, benchmark_options)
# Run the benchmarks and report the results
json1 = gbench.util.run_or_load_benchmark(test1, benchmark_options)
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/Inputs/test1_run1.json b/MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/Inputs/test1_run1.json
index 37faed46..d7ec6a9c 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/Inputs/test1_run1.json
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/Inputs/test1_run1.json
@@ -29,6 +29,20 @@
"time_unit": "ns"
},
{
+ "name": "BM_1PercentFaster",
+ "iterations": 1000,
+ "real_time": 100,
+ "cpu_time": 100,
+ "time_unit": "ns"
+ },
+ {
+ "name": "BM_1PercentSlower",
+ "iterations": 1000,
+ "real_time": 100,
+ "cpu_time": 100,
+ "time_unit": "ns"
+ },
+ {
"name": "BM_10PercentFaster",
"iterations": 1000,
"real_time": 100,
@@ -55,6 +69,34 @@
"real_time": 10000,
"cpu_time": 10000,
"time_unit": "ns"
+ },
+ {
+ "name": "BM_10PercentCPUToTime",
+ "iterations": 1000,
+ "real_time": 100,
+ "cpu_time": 100,
+ "time_unit": "ns"
+ },
+ {
+ "name": "BM_ThirdFaster",
+ "iterations": 1000,
+ "real_time": 100,
+ "cpu_time": 100,
+ "time_unit": "ns"
+ },
+ {
+ "name": "BM_BadTimeUnit",
+ "iterations": 1000,
+ "real_time": 0.4,
+ "cpu_time": 0.5,
+ "time_unit": "s"
+ },
+ {
+ "name": "BM_DifferentTimeUnit",
+ "iterations": 1,
+ "real_time": 1,
+ "cpu_time": 1,
+ "time_unit": "s"
}
]
-} \ No newline at end of file
+}
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/Inputs/test1_run2.json b/MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/Inputs/test1_run2.json
index aed5151d..59a5ffac 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/Inputs/test1_run2.json
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/Inputs/test1_run2.json
@@ -29,6 +29,20 @@
"time_unit": "ns"
},
{
+ "name": "BM_1PercentFaster",
+ "iterations": 1000,
+ "real_time": 98.9999999,
+ "cpu_time": 98.9999999,
+ "time_unit": "ns"
+ },
+ {
+ "name": "BM_1PercentSlower",
+ "iterations": 1000,
+ "real_time": 100.9999999,
+ "cpu_time": 100.9999999,
+ "time_unit": "ns"
+ },
+ {
"name": "BM_10PercentFaster",
"iterations": 1000,
"real_time": 90,
@@ -45,8 +59,8 @@
{
"name": "BM_100xSlower",
"iterations": 1000,
- "real_time": 10000,
- "cpu_time": 10000,
+ "real_time": 1.0000e+04,
+ "cpu_time": 1.0000e+04,
"time_unit": "ns"
},
{
@@ -55,6 +69,34 @@
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
+ },
+ {
+ "name": "BM_10PercentCPUToTime",
+ "iterations": 1000,
+ "real_time": 110,
+ "cpu_time": 90,
+ "time_unit": "ns"
+ },
+ {
+ "name": "BM_ThirdFaster",
+ "iterations": 1000,
+ "real_time": 66.665,
+ "cpu_time": 66.664,
+ "time_unit": "ns"
+ },
+ {
+ "name": "BM_BadTimeUnit",
+ "iterations": 1000,
+ "real_time": 0.04,
+ "cpu_time": 0.6,
+ "time_unit": "s"
+ },
+ {
+ "name": "BM_DifferentTimeUnit",
+ "iterations": 1,
+ "real_time": 1,
+ "cpu_time": 1,
+ "time_unit": "ns"
}
]
-} \ No newline at end of file
+}
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/__init__.py b/MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/__init__.py
index fce1a1ac..fce1a1ac 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/__init__.py
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/__init__.py
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/report.py b/MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/report.py
index 015d33d9..666a6090 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/report.py
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/report.py
@@ -71,13 +71,13 @@ def generate_difference_report(json1, json2, use_color=True):
Calculate and report the difference between each test of two benchmarks
runs specified as 'json1' and 'json2'.
"""
- first_col_width = find_longest_name(json1['benchmarks']) + 5
+ first_col_width = find_longest_name(json1['benchmarks'])
def find_test(name):
for b in json2['benchmarks']:
if b['name'] == name:
return b
return None
- first_line = "{:<{}s} Time CPU Old New".format(
+ first_line = "{:<{}s} Time CPU Time Old Time New CPU Old CPU New".format(
'Benchmark', first_col_width)
output_strs = [first_line, '-' * len(first_line)]
@@ -87,6 +87,9 @@ def generate_difference_report(json1, json2, use_color=True):
if not other_bench:
continue
+ if bn['time_unit'] != other_bench['time_unit']:
+ continue
+
def get_color(res):
if res > 0.05:
return BC_FAIL
@@ -94,12 +97,13 @@ def generate_difference_report(json1, json2, use_color=True):
return BC_WHITE
else:
return BC_CYAN
- fmt_str = "{}{:<{}s}{endc}{}{:+9.2f}{endc}{}{:+14.2f}{endc}{:14d}{:14d}"
+ fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
tres = calculate_change(bn['real_time'], other_bench['real_time'])
cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
output_strs += [color_format(use_color, fmt_str,
BC_HEADER, bn['name'], first_col_width,
get_color(tres), tres, get_color(cpures), cpures,
+ bn['real_time'], other_bench['real_time'],
bn['cpu_time'], other_bench['cpu_time'],
endc=BC_ENDC)]
return output_strs
@@ -123,22 +127,27 @@ class TestReportDifference(unittest.TestCase):
def test_basic(self):
expect_lines = [
- ['BM_SameTimes', '+0.00', '+0.00', '10', '10'],
- ['BM_2xFaster', '-0.50', '-0.50', '50', '25'],
- ['BM_2xSlower', '+1.00', '+1.00', '50', '100'],
- ['BM_10PercentFaster', '-0.10', '-0.10', '100', '90'],
- ['BM_10PercentSlower', '+0.10', '+0.10', '100', '110'],
- ['BM_100xSlower', '+99.00', '+99.00', '100', '10000'],
- ['BM_100xFaster', '-0.99', '-0.99', '10000', '100'],
+ ['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'],
+ ['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'],
+ ['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'],
+ ['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'],
+ ['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'],
+ ['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'],
+ ['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'],
+ ['BM_100xSlower', '+99.0000', '+99.0000', '100', '10000', '100', '10000'],
+ ['BM_100xFaster', '-0.9900', '-0.9900', '10000', '100', '10000', '100'],
+ ['BM_10PercentCPUToTime', '+0.1000', '-0.1000', '100', '110', '100', '90'],
+ ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'],
+ ['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
]
json1, json2 = self.load_results()
output_lines_with_header = generate_difference_report(json1, json2, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
- for i in xrange(0, len(output_lines)):
+ for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
- self.assertEqual(len(parts), 5)
+ self.assertEqual(len(parts), 7)
self.assertEqual(parts, expect_lines[i])
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/util.py b/MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/util.py
index 07c23772..07c23772 100644
--- a/MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/util.py
+++ b/MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/util.py