aboutsummaryrefslogtreecommitdiff
path: root/MicroBenchmarks/libs/benchmark-1.2.0/src
diff options
context:
space:
mode:
Diffstat (limited to 'MicroBenchmarks/libs/benchmark-1.2.0/src')
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/CMakeLists.txt78
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/arraysize.h33
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark.cc715
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_api_internal.h46
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_register.cc467
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/check.h79
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.cc188
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.h33
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.cc218
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.h79
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.cc324
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.h60
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/console_reporter.cc180
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/counter.cc68
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/counter.h26
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/csv_reporter.cc149
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/cycleclock.h172
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/internal_macros.h57
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/json_reporter.cc168
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/log.h73
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/mutex.h155
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/re.h140
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/reporter.cc68
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.cc51
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.h15
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/stat.h310
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.cc172
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.h40
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.cc355
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.h10
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/timers.cc212
-rw-r--r--MicroBenchmarks/libs/benchmark-1.2.0/src/timers.h48
32 files changed, 4789 insertions, 0 deletions
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/CMakeLists.txt b/MicroBenchmarks/libs/benchmark-1.2.0/src/CMakeLists.txt
new file mode 100644
index 00000000..244484b8
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/CMakeLists.txt
@@ -0,0 +1,78 @@
+# Allow the source files to find headers in src/
+include_directories(${PROJECT_SOURCE_DIR}/src)
+
+if (DEFINED BENCHMARK_CXX_LINKER_FLAGS)
+ list(APPEND CMAKE_SHARED_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS})
+ list(APPEND CMAKE_MODULE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS})
+endif()
+
+file(GLOB
+ SOURCE_FILES
+ *.cc
+ ${PROJECT_SOURCE_DIR}/include/benchmark/*.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/*.h)
+
+add_library(benchmark ${SOURCE_FILES})
+set_target_properties(benchmark PROPERTIES
+ OUTPUT_NAME "benchmark"
+ VERSION ${GENERIC_LIB_VERSION}
+ SOVERSION ${GENERIC_LIB_SOVERSION}
+)
+target_include_directories(benchmark PUBLIC
+ $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include>
+ )
+
+# Link threads.
+target_link_libraries(benchmark ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
+find_library(LIBRT rt)
+if(LIBRT)
+ target_link_libraries(benchmark ${LIBRT})
+endif()
+
+# We need extra libraries on Windows
+if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
+ target_link_libraries(benchmark Shlwapi)
+endif()
+
+set(include_install_dir "include")
+set(lib_install_dir "lib/")
+set(bin_install_dir "bin/")
+set(config_install_dir "lib/cmake/${PROJECT_NAME}")
+
+set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
+
+set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake")
+set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake")
+set(targets_export_name "${PROJECT_NAME}Targets")
+
+set(namespace "${PROJECT_NAME}::")
+
+include(CMakePackageConfigHelpers)
+write_basic_package_version_file(
+ "${version_config}" VERSION ${GIT_VERSION} COMPATIBILITY SameMajorVersion
+)
+
+configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY)
+
+# Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable)
+install(
+ TARGETS benchmark
+ EXPORT ${targets_export_name}
+ ARCHIVE DESTINATION ${lib_install_dir}
+ LIBRARY DESTINATION ${lib_install_dir}
+ RUNTIME DESTINATION ${bin_install_dir}
+ INCLUDES DESTINATION ${include_install_dir})
+
+install(
+ DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark"
+ DESTINATION ${include_install_dir}
+ FILES_MATCHING PATTERN "*.*h")
+
+install(
+ FILES "${project_config}" "${version_config}"
+ DESTINATION "${config_install_dir}")
+
+install(
+ EXPORT "${targets_export_name}"
+ NAMESPACE "${namespace}"
+ DESTINATION "${config_install_dir}")
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/arraysize.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/arraysize.h
new file mode 100644
index 00000000..51a50f2d
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/arraysize.h
@@ -0,0 +1,33 @@
+#ifndef BENCHMARK_ARRAYSIZE_H_
+#define BENCHMARK_ARRAYSIZE_H_
+
+#include "internal_macros.h"
+
+namespace benchmark {
+namespace internal {
+// The arraysize(arr) macro returns the # of elements in an array arr.
+// The expression is a compile-time constant, and therefore can be
+// used in defining new arrays, for example. If you use arraysize on
+// a pointer by mistake, you will get a compile-time error.
+//
+
+// This template function declaration is used in defining arraysize.
+// Note that the function doesn't need an implementation, as we only
+// use its type.
+template <typename T, size_t N>
+char (&ArraySizeHelper(T (&array)[N]))[N];
+
+// That gcc wants both of these prototypes seems mysterious. VC, for
+// its part, can't decide which to use (another mystery). Matching of
+// template overloads: the final frontier.
+#ifndef COMPILER_MSVC
+template <typename T, size_t N>
+char (&ArraySizeHelper(const T (&array)[N]))[N];
+#endif
+
+#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array)))
+
+} // end namespace internal
+} // end namespace benchmark
+
+#endif // BENCHMARK_ARRAYSIZE_H_
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark.cc b/MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark.cc
new file mode 100644
index 00000000..1ba0a50a
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark.cc
@@ -0,0 +1,715 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+#include "benchmark_api_internal.h"
+#include "internal_macros.h"
+
+#ifndef BENCHMARK_OS_WINDOWS
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#include <algorithm>
+#include <atomic>
+#include <condition_variable>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <fstream>
+#include <iostream>
+#include <memory>
+#include <thread>
+
+#include "check.h"
+#include "colorprint.h"
+#include "commandlineflags.h"
+#include "complexity.h"
+#include "counter.h"
+#include "log.h"
+#include "mutex.h"
+#include "re.h"
+#include "stat.h"
+#include "string_util.h"
+#include "sysinfo.h"
+#include "timers.h"
+
+DEFINE_bool(benchmark_list_tests, false,
+ "Print a list of benchmarks. This option overrides all other "
+ "options.");
+
+DEFINE_string(benchmark_filter, ".",
+ "A regular expression that specifies the set of benchmarks "
+ "to execute. If this flag is empty, no benchmarks are run. "
+ "If this flag is the string \"all\", all benchmarks linked "
+ "into the process are run.");
+
+DEFINE_double(benchmark_min_time, 0.5,
+ "Minimum number of seconds we should run benchmark before "
+ "results are considered significant. For cpu-time based "
+ "tests, this is the lower bound on the total cpu time "
+ "used by all threads that make up the test. For real-time "
+ "based tests, this is the lower bound on the elapsed time "
+ "of the benchmark execution, regardless of number of "
+ "threads.");
+
+DEFINE_int32(benchmark_repetitions, 1,
+ "The number of runs of each benchmark. If greater than 1, the "
+ "mean and standard deviation of the runs will be reported.");
+
+DEFINE_bool(benchmark_report_aggregates_only, false,
+ "Report the result of each benchmark repetitions. When 'true' is "
+ "specified only the mean, standard deviation, and other statistics "
+ "are reported for repeated benchmarks.");
+
+DEFINE_string(benchmark_format, "console",
+ "The format to use for console output. Valid values are "
+ "'console', 'json', or 'csv'.");
+
+DEFINE_string(benchmark_out_format, "json",
+ "The format to use for file output. Valid values are "
+ "'console', 'json', or 'csv'.");
+
+DEFINE_string(benchmark_out, "", "The file to write additonal output to");
+
+DEFINE_string(benchmark_color, "auto",
+ "Whether to use colors in the output. Valid values: "
+ "'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use "
+ "colors if the output is being sent to a terminal and the TERM "
+ "environment variable is set to a terminal type that supports "
+ "colors.");
+
+DEFINE_bool(benchmark_counters_tabular, false,
+ "Whether to use tabular format when printing user counters to "
+ "the console. Valid values: 'true'/'yes'/1, 'false'/'no'/0."
+ "Defaults to false.");
+
+DEFINE_int32(v, 0, "The level of verbose logging to output");
+
+namespace benchmark {
+namespace internal {
+
+void UseCharPointer(char const volatile*) {}
+
+} // end namespace internal
+
+namespace {
+
+static const size_t kMaxIterations = 1000000000;
+
+} // end namespace
+
+namespace internal {
+
+class ThreadManager {
+ public:
+ ThreadManager(int num_threads)
+ : alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
+
+ Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
+ return benchmark_mutex_;
+ }
+
+ bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
+ return start_stop_barrier_.wait();
+ }
+
+ void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
+ start_stop_barrier_.removeThread();
+ if (--alive_threads_ == 0) {
+ MutexLock lock(end_cond_mutex_);
+ end_condition_.notify_all();
+ }
+ }
+
+ void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
+ MutexLock lock(end_cond_mutex_);
+ end_condition_.wait(lock.native_handle(),
+ [this]() { return alive_threads_ == 0; });
+ }
+
+ public:
+ struct Result {
+ double real_time_used = 0;
+ double cpu_time_used = 0;
+ double manual_time_used = 0;
+ int64_t bytes_processed = 0;
+ int64_t items_processed = 0;
+ int complexity_n = 0;
+ std::string report_label_;
+ std::string error_message_;
+ bool has_error_ = false;
+ UserCounters counters;
+ };
+ GUARDED_BY(GetBenchmarkMutex()) Result results;
+
+ private:
+ mutable Mutex benchmark_mutex_;
+ std::atomic<int> alive_threads_;
+ Barrier start_stop_barrier_;
+ Mutex end_cond_mutex_;
+ Condition end_condition_;
+};
+
+// Timer management class
+class ThreadTimer {
+ public:
+ ThreadTimer() = default;
+
+ // Called by each thread
+ void StartTimer() {
+ running_ = true;
+ start_real_time_ = ChronoClockNow();
+ start_cpu_time_ = ThreadCPUUsage();
+ }
+
+ // Called by each thread
+ void StopTimer() {
+ CHECK(running_);
+ running_ = false;
+ real_time_used_ += ChronoClockNow() - start_real_time_;
+ cpu_time_used_ += ThreadCPUUsage() - start_cpu_time_;
+ }
+
+ // Called by each thread
+ void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
+
+ bool running() const { return running_; }
+
+ // REQUIRES: timer is not running
+ double real_time_used() {
+ CHECK(!running_);
+ return real_time_used_;
+ }
+
+ // REQUIRES: timer is not running
+ double cpu_time_used() {
+ CHECK(!running_);
+ return cpu_time_used_;
+ }
+
+ // REQUIRES: timer is not running
+ double manual_time_used() {
+ CHECK(!running_);
+ return manual_time_used_;
+ }
+
+ private:
+ bool running_ = false; // Is the timer running
+ double start_real_time_ = 0; // If running_
+ double start_cpu_time_ = 0; // If running_
+
+ // Accumulated time so far (does not contain current slice if running_)
+ double real_time_used_ = 0;
+ double cpu_time_used_ = 0;
+ // Manually set iteration time. User sets this with SetIterationTime(seconds).
+ double manual_time_used_ = 0;
+};
+
+namespace {
+
+BenchmarkReporter::Run CreateRunReport(
+ const benchmark::internal::Benchmark::Instance& b,
+ const internal::ThreadManager::Result& results, size_t iters,
+ double seconds) {
+ // Create report about this benchmark run.
+ BenchmarkReporter::Run report;
+
+ report.benchmark_name = b.name;
+ report.error_occurred = results.has_error_;
+ report.error_message = results.error_message_;
+ report.report_label = results.report_label_;
+ // Report the total iterations across all threads.
+ report.iterations = static_cast<int64_t>(iters) * b.threads;
+ report.time_unit = b.time_unit;
+
+ if (!report.error_occurred) {
+ double bytes_per_second = 0;
+ if (results.bytes_processed > 0 && seconds > 0.0) {
+ bytes_per_second = (results.bytes_processed / seconds);
+ }
+ double items_per_second = 0;
+ if (results.items_processed > 0 && seconds > 0.0) {
+ items_per_second = (results.items_processed / seconds);
+ }
+
+ if (b.use_manual_time) {
+ report.real_accumulated_time = results.manual_time_used;
+ } else {
+ report.real_accumulated_time = results.real_time_used;
+ }
+ report.cpu_accumulated_time = results.cpu_time_used;
+ report.bytes_per_second = bytes_per_second;
+ report.items_per_second = items_per_second;
+ report.complexity_n = results.complexity_n;
+ report.complexity = b.complexity;
+ report.complexity_lambda = b.complexity_lambda;
+ report.counters = results.counters;
+ internal::Finish(&report.counters, seconds, b.threads);
+ }
+ return report;
+}
+
+// Execute one thread of benchmark b for the specified number of iterations.
+// Adds the stats collected for the thread into *total.
+void RunInThread(const benchmark::internal::Benchmark::Instance* b,
+ size_t iters, int thread_id,
+ internal::ThreadManager* manager) {
+ internal::ThreadTimer timer;
+ State st(iters, b->arg, thread_id, b->threads, &timer, manager);
+ b->benchmark->Run(st);
+ CHECK(st.iterations() == st.max_iterations)
+ << "Benchmark returned before State::KeepRunning() returned false!";
+ {
+ MutexLock l(manager->GetBenchmarkMutex());
+ internal::ThreadManager::Result& results = manager->results;
+ results.cpu_time_used += timer.cpu_time_used();
+ results.real_time_used += timer.real_time_used();
+ results.manual_time_used += timer.manual_time_used();
+ results.bytes_processed += st.bytes_processed();
+ results.items_processed += st.items_processed();
+ results.complexity_n += st.complexity_length_n();
+ internal::Increment(&results.counters, st.counters);
+ }
+ manager->NotifyThreadComplete();
+}
+
+std::vector<BenchmarkReporter::Run> RunBenchmark(
+ const benchmark::internal::Benchmark::Instance& b,
+ std::vector<BenchmarkReporter::Run>* complexity_reports) {
+ std::vector<BenchmarkReporter::Run> reports; // return value
+
+ const bool has_explicit_iteration_count = b.iterations != 0;
+ size_t iters = has_explicit_iteration_count ? b.iterations : 1;
+ std::unique_ptr<internal::ThreadManager> manager;
+ std::vector<std::thread> pool(b.threads - 1);
+ const int repeats =
+ b.repetitions != 0 ? b.repetitions : FLAGS_benchmark_repetitions;
+ const bool report_aggregates_only =
+ repeats != 1 &&
+ (b.report_mode == internal::RM_Unspecified
+ ? FLAGS_benchmark_report_aggregates_only
+ : b.report_mode == internal::RM_ReportAggregatesOnly);
+ for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
+ for (;;) {
+ // Try benchmark
+ VLOG(2) << "Running " << b.name << " for " << iters << "\n";
+
+ manager.reset(new internal::ThreadManager(b.threads));
+ for (std::size_t ti = 0; ti < pool.size(); ++ti) {
+ pool[ti] = std::thread(&RunInThread, &b, iters,
+ static_cast<int>(ti + 1), manager.get());
+ }
+ RunInThread(&b, iters, 0, manager.get());
+ manager->WaitForAllThreads();
+ for (std::thread& thread : pool) thread.join();
+ internal::ThreadManager::Result results;
+ {
+ MutexLock l(manager->GetBenchmarkMutex());
+ results = manager->results;
+ }
+ manager.reset();
+ // Adjust real/manual time stats since they were reported per thread.
+ results.real_time_used /= b.threads;
+ results.manual_time_used /= b.threads;
+
+ VLOG(2) << "Ran in " << results.cpu_time_used << "/"
+ << results.real_time_used << "\n";
+
+ // Base decisions off of real time if requested by this benchmark.
+ double seconds = results.cpu_time_used;
+ if (b.use_manual_time) {
+ seconds = results.manual_time_used;
+ } else if (b.use_real_time) {
+ seconds = results.real_time_used;
+ }
+
+ const double min_time =
+ !IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time;
+
+ // Determine if this run should be reported; Either it has
+ // run for a sufficient amount of time or because an error was reported.
+ const bool should_report = repetition_num > 0
+ || has_explicit_iteration_count // An exact iteration count was requested
+ || results.has_error_
+ || iters >= kMaxIterations
+ || seconds >= min_time // the elapsed time is large enough
+ // CPU time is specified but the elapsed real time greatly exceeds the
+ // minimum time. Note that user provided timers are except from this
+ // sanity check.
+ || ((results.real_time_used >= 5 * min_time) && !b.use_manual_time);
+
+ if (should_report) {
+ BenchmarkReporter::Run report =
+ CreateRunReport(b, results, iters, seconds);
+ if (!report.error_occurred && b.complexity != oNone)
+ complexity_reports->push_back(report);
+ reports.push_back(report);
+ break;
+ }
+
+ // See how much iterations should be increased by
+ // Note: Avoid division by zero with max(seconds, 1ns).
+ double multiplier = min_time * 1.4 / std::max(seconds, 1e-9);
+ // If our last run was at least 10% of FLAGS_benchmark_min_time then we
+ // use the multiplier directly. Otherwise we use at most 10 times
+ // expansion.
+ // NOTE: When the last run was at least 10% of the min time the max
+ // expansion should be 14x.
+ bool is_significant = (seconds / min_time) > 0.1;
+ multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
+ if (multiplier <= 1.0) multiplier = 2.0;
+ double next_iters = std::max(multiplier * iters, iters + 1.0);
+ if (next_iters > kMaxIterations) {
+ next_iters = kMaxIterations;
+ }
+ VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
+ iters = static_cast<int>(next_iters + 0.5);
+ }
+ }
+ // Calculate additional statistics
+ auto stat_reports = ComputeStats(reports);
+ if ((b.complexity != oNone) && b.last_benchmark_instance) {
+ auto additional_run_stats = ComputeBigO(*complexity_reports);
+ stat_reports.insert(stat_reports.end(), additional_run_stats.begin(),
+ additional_run_stats.end());
+ complexity_reports->clear();
+ }
+
+ if (report_aggregates_only) reports.clear();
+ reports.insert(reports.end(), stat_reports.begin(), stat_reports.end());
+ return reports;
+}
+
+} // namespace
+} // namespace internal
+
+State::State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
+ int n_threads, internal::ThreadTimer* timer,
+ internal::ThreadManager* manager)
+ : started_(false),
+ finished_(false),
+ total_iterations_(0),
+ range_(ranges),
+ bytes_processed_(0),
+ items_processed_(0),
+ complexity_n_(0),
+ error_occurred_(false),
+ counters(),
+ thread_index(thread_i),
+ threads(n_threads),
+ max_iterations(max_iters),
+ timer_(timer),
+ manager_(manager) {
+ CHECK(max_iterations != 0) << "At least one iteration must be run";
+ CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
+}
+
+void State::PauseTiming() {
+ // Add in time accumulated so far
+ CHECK(started_ && !finished_ && !error_occurred_);
+ timer_->StopTimer();
+}
+
+void State::ResumeTiming() {
+ CHECK(started_ && !finished_ && !error_occurred_);
+ timer_->StartTimer();
+}
+
+void State::SkipWithError(const char* msg) {
+ CHECK(msg);
+ error_occurred_ = true;
+ {
+ MutexLock l(manager_->GetBenchmarkMutex());
+ if (manager_->results.has_error_ == false) {
+ manager_->results.error_message_ = msg;
+ manager_->results.has_error_ = true;
+ }
+ }
+ total_iterations_ = max_iterations;
+ if (timer_->running()) timer_->StopTimer();
+}
+
+void State::SetIterationTime(double seconds) {
+ timer_->SetIterationTime(seconds);
+}
+
+void State::SetLabel(const char* label) {
+ MutexLock l(manager_->GetBenchmarkMutex());
+ manager_->results.report_label_ = label;
+}
+
+void State::StartKeepRunning() {
+ CHECK(!started_ && !finished_);
+ started_ = true;
+ manager_->StartStopBarrier();
+ if (!error_occurred_) ResumeTiming();
+}
+
+void State::FinishKeepRunning() {
+ CHECK(started_ && (!finished_ || error_occurred_));
+ if (!error_occurred_) {
+ PauseTiming();
+ }
+ // Total iterations now is one greater than max iterations. Fix this.
+ total_iterations_ = max_iterations;
+ finished_ = true;
+ manager_->StartStopBarrier();
+}
+
+namespace internal {
+namespace {
+
+void RunBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
+ BenchmarkReporter* console_reporter,
+ BenchmarkReporter* file_reporter) {
+ // Note the file_reporter can be null.
+ CHECK(console_reporter != nullptr);
+
+ // Determine the width of the name field using a minimum width of 10.
+ bool has_repetitions = FLAGS_benchmark_repetitions > 1;
+ size_t name_field_width = 10;
+ for (const Benchmark::Instance& benchmark : benchmarks) {
+ name_field_width =
+ std::max<size_t>(name_field_width, benchmark.name.size());
+ has_repetitions |= benchmark.repetitions > 1;
+ }
+ if (has_repetitions) name_field_width += std::strlen("_stddev");
+
+ // Print header here
+ BenchmarkReporter::Context context;
+ context.num_cpus = NumCPUs();
+ context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f;
+
+ context.cpu_scaling_enabled = CpuScalingEnabled();
+ context.name_field_width = name_field_width;
+
+ // Keep track of runing times of all instances of current benchmark
+ std::vector<BenchmarkReporter::Run> complexity_reports;
+
+ // We flush streams after invoking reporter methods that write to them. This
+ // ensures users get timely updates even when streams are not line-buffered.
+ auto flushStreams = [](BenchmarkReporter* reporter) {
+ if (!reporter) return;
+ std::flush(reporter->GetOutputStream());
+ std::flush(reporter->GetErrorStream());
+ };
+
+ if (console_reporter->ReportContext(context) &&
+ (!file_reporter || file_reporter->ReportContext(context))) {
+ flushStreams(console_reporter);
+ flushStreams(file_reporter);
+ for (const auto& benchmark : benchmarks) {
+ std::vector<BenchmarkReporter::Run> reports =
+ RunBenchmark(benchmark, &complexity_reports);
+ console_reporter->ReportRuns(reports);
+ if (file_reporter) file_reporter->ReportRuns(reports);
+ flushStreams(console_reporter);
+ flushStreams(file_reporter);
+ }
+ }
+ console_reporter->Finalize();
+ if (file_reporter) file_reporter->Finalize();
+ flushStreams(console_reporter);
+ flushStreams(file_reporter);
+}
+
+std::unique_ptr<BenchmarkReporter> CreateReporter(
+ std::string const& name, ConsoleReporter::OutputOptions output_opts) {
+ typedef std::unique_ptr<BenchmarkReporter> PtrType;
+ if (name == "console") {
+ return PtrType(new ConsoleReporter(output_opts));
+ } else if (name == "json") {
+ return PtrType(new JSONReporter);
+ } else if (name == "csv") {
+ return PtrType(new CSVReporter);
+ } else {
+ std::cerr << "Unexpected format: '" << name << "'\n";
+ std::exit(1);
+ }
+}
+
+} // end namespace
+
+bool IsZero(double n) {
+ return std::abs(n) < std::numeric_limits<double>::epsilon();
+}
+
+ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
+ int output_opts = ConsoleReporter::OO_Defaults;
+ if ((FLAGS_benchmark_color == "auto" && IsColorTerminal()) ||
+ IsTruthyFlagValue(FLAGS_benchmark_color)) {
+ output_opts |= ConsoleReporter::OO_Color;
+ } else {
+ output_opts &= ~ConsoleReporter::OO_Color;
+ }
+ if(force_no_color) {
+ output_opts &= ~ConsoleReporter::OO_Color;
+ }
+ if(FLAGS_benchmark_counters_tabular) {
+ output_opts |= ConsoleReporter::OO_Tabular;
+ } else {
+ output_opts &= ~ConsoleReporter::OO_Tabular;
+ }
+ return static_cast< ConsoleReporter::OutputOptions >(output_opts);
+}
+
+} // end namespace internal
+
+size_t RunSpecifiedBenchmarks() {
+ return RunSpecifiedBenchmarks(nullptr, nullptr);
+}
+
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter) {
+ return RunSpecifiedBenchmarks(console_reporter, nullptr);
+}
+
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
+ BenchmarkReporter* file_reporter) {
+ std::string spec = FLAGS_benchmark_filter;
+ if (spec.empty() || spec == "all")
+ spec = "."; // Regexp that matches all benchmarks
+
+ // Setup the reporters
+ std::ofstream output_file;
+ std::unique_ptr<BenchmarkReporter> default_console_reporter;
+ std::unique_ptr<BenchmarkReporter> default_file_reporter;
+ if (!console_reporter) {
+ default_console_reporter = internal::CreateReporter(
+ FLAGS_benchmark_format, internal::GetOutputOptions());
+ console_reporter = default_console_reporter.get();
+ }
+ auto& Out = console_reporter->GetOutputStream();
+ auto& Err = console_reporter->GetErrorStream();
+
+ std::string const& fname = FLAGS_benchmark_out;
+ if (fname.empty() && file_reporter) {
+ Err << "A custom file reporter was provided but "
+ "--benchmark_out=<file> was not specified."
+ << std::endl;
+ std::exit(1);
+ }
+ if (!fname.empty()) {
+ output_file.open(fname);
+ if (!output_file.is_open()) {
+ Err << "invalid file name: '" << fname << std::endl;
+ std::exit(1);
+ }
+ if (!file_reporter) {
+ default_file_reporter = internal::CreateReporter(
+ FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
+ file_reporter = default_file_reporter.get();
+ }
+ file_reporter->SetOutputStream(&output_file);
+ file_reporter->SetErrorStream(&output_file);
+ }
+
+ std::vector<internal::Benchmark::Instance> benchmarks;
+ if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
+
+ if (benchmarks.empty()) {
+ Err << "Failed to match any benchmarks against regex: " << spec << "\n";
+ return 0;
+ }
+
+ if (FLAGS_benchmark_list_tests) {
+ for (auto const& benchmark : benchmarks) Out << benchmark.name << "\n";
+ } else {
+ internal::RunBenchmarks(benchmarks, console_reporter, file_reporter);
+ }
+
+ return benchmarks.size();
+}
+
+namespace internal {
+
+void PrintUsageAndExit() {
+ fprintf(stdout,
+ "benchmark"
+ " [--benchmark_list_tests={true|false}]\n"
+ " [--benchmark_filter=<regex>]\n"
+ " [--benchmark_min_time=<min_time>]\n"
+ " [--benchmark_repetitions=<num_repetitions>]\n"
+ " [--benchmark_report_aggregates_only={true|false}\n"
+ " [--benchmark_format=<console|json|csv>]\n"
+ " [--benchmark_out=<filename>]\n"
+ " [--benchmark_out_format=<json|console|csv>]\n"
+ " [--benchmark_color={auto|true|false}]\n"
+ " [--benchmark_counters_tabular={true|false}]\n"
+ " [--v=<verbosity>]\n");
+ exit(0);
+}
+
+void ParseCommandLineFlags(int* argc, char** argv) {
+ using namespace benchmark;
+ for (int i = 1; i < *argc; ++i) {
+ if (ParseBoolFlag(argv[i], "benchmark_list_tests",
+ &FLAGS_benchmark_list_tests) ||
+ ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
+ ParseDoubleFlag(argv[i], "benchmark_min_time",
+ &FLAGS_benchmark_min_time) ||
+ ParseInt32Flag(argv[i], "benchmark_repetitions",
+ &FLAGS_benchmark_repetitions) ||
+ ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
+ &FLAGS_benchmark_report_aggregates_only) ||
+ ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
+ ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
+ ParseStringFlag(argv[i], "benchmark_out_format",
+ &FLAGS_benchmark_out_format) ||
+ ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
+ // "color_print" is the deprecated name for "benchmark_color".
+ // TODO: Remove this.
+ ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
+ ParseBoolFlag(argv[i], "benchmark_counters_tabular",
+ &FLAGS_benchmark_counters_tabular) ||
+ ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
+ for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
+
+ --(*argc);
+ --i;
+ } else if (IsFlag(argv[i], "help")) {
+ PrintUsageAndExit();
+ }
+ }
+ for (auto const* flag :
+ {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format})
+ if (*flag != "console" && *flag != "json" && *flag != "csv") {
+ PrintUsageAndExit();
+ }
+ if (FLAGS_benchmark_color.empty()) {
+ PrintUsageAndExit();
+ }
+}
+
+int InitializeStreams() {
+ static std::ios_base::Init init;
+ return 0;
+}
+
+} // end namespace internal
+
+void Initialize(int* argc, char** argv) {
+ internal::ParseCommandLineFlags(argc, argv);
+ internal::LogLevel() = FLAGS_v;
+}
+
+bool ReportUnrecognizedArguments(int argc, char** argv) {
+ for (int i = 1; i < argc; ++i) {
+ fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], argv[i]);
+ }
+ return argc > 1;
+}
+
+} // end namespace benchmark
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_api_internal.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_api_internal.h
new file mode 100644
index 00000000..36d23404
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_api_internal.h
@@ -0,0 +1,46 @@
+#ifndef BENCHMARK_API_INTERNAL_H
+#define BENCHMARK_API_INTERNAL_H
+
+#include "benchmark/benchmark.h"
+
+#include <cmath>
+#include <iosfwd>
+#include <limits>
+#include <string>
+#include <vector>
+
+namespace benchmark {
+namespace internal {
+
+// Information kept per benchmark we may want to run
+struct Benchmark::Instance {
+ std::string name;
+ Benchmark* benchmark;
+ ReportMode report_mode;
+ std::vector<int> arg;
+ TimeUnit time_unit;
+ int range_multiplier;
+ bool use_real_time;
+ bool use_manual_time;
+ BigO complexity;
+ BigOFunc* complexity_lambda;
+ UserCounters counters;
+ bool last_benchmark_instance;
+ int repetitions;
+ double min_time;
+ size_t iterations;
+ int threads; // Number of concurrent threads to us
+};
+
+bool FindBenchmarksInternal(const std::string& re,
+ std::vector<Benchmark::Instance>* benchmarks,
+ std::ostream* Err);
+
+bool IsZero(double n);
+
+ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false);
+
+} // end namespace internal
+} // end namespace benchmark
+
+#endif // BENCHMARK_API_INTERNAL_H
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_register.cc b/MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_register.cc
new file mode 100644
index 00000000..ed70d820
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_register.cc
@@ -0,0 +1,467 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+#include "benchmark_api_internal.h"
+#include "internal_macros.h"
+
+#ifndef BENCHMARK_OS_WINDOWS
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#include <algorithm>
+#include <atomic>
+#include <condition_variable>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <fstream>
+#include <iostream>
+#include <memory>
+#include <sstream>
+#include <thread>
+
+#include "check.h"
+#include "commandlineflags.h"
+#include "complexity.h"
+#include "log.h"
+#include "mutex.h"
+#include "re.h"
+#include "stat.h"
+#include "string_util.h"
+#include "sysinfo.h"
+#include "timers.h"
+
+namespace benchmark {
+
+namespace {
+// For non-dense Range, intermediate values are powers of kRangeMultiplier.
+static const int kRangeMultiplier = 8;
+// The size of a benchmark family determines is the number of inputs to repeat
+// the benchmark on. If this is "large" then warn the user during configuration.
+static const size_t kMaxFamilySize = 100;
+} // end namespace
+
+namespace internal {
+
+//=============================================================================//
+// BenchmarkFamilies
+//=============================================================================//
+
+// Class for managing registered benchmarks. Note that each registered
+// benchmark identifies a family of related benchmarks to run.
+class BenchmarkFamilies {
+ public:
+ static BenchmarkFamilies* GetInstance();
+
+ // Registers a benchmark family and returns the index assigned to it.
+ size_t AddBenchmark(std::unique_ptr<Benchmark> family);
+
+ // Clear all registered benchmark families.
+ void ClearBenchmarks();
+
+ // Extract the list of benchmark instances that match the specified
+ // regular expression.
+ bool FindBenchmarks(const std::string& re,
+ std::vector<Benchmark::Instance>* benchmarks,
+ std::ostream* Err);
+
+ private:
+ BenchmarkFamilies() {}
+
+ std::vector<std::unique_ptr<Benchmark>> families_;
+ Mutex mutex_;
+};
+
+BenchmarkFamilies* BenchmarkFamilies::GetInstance() {
+ static BenchmarkFamilies instance;
+ return &instance;
+}
+
+size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr<Benchmark> family) {
+ MutexLock l(mutex_);
+ size_t index = families_.size();
+ families_.push_back(std::move(family));
+ return index;
+}
+
+void BenchmarkFamilies::ClearBenchmarks() {
+ MutexLock l(mutex_);
+ families_.clear();
+ families_.shrink_to_fit();
+}
+
+bool BenchmarkFamilies::FindBenchmarks(
+ const std::string& spec, std::vector<Benchmark::Instance>* benchmarks,
+ std::ostream* ErrStream) {
+ CHECK(ErrStream);
+ auto& Err = *ErrStream;
+ // Make regular expression out of command-line flag
+ std::string error_msg;
+ Regex re;
+ if (!re.Init(spec, &error_msg)) {
+ Err << "Could not compile benchmark re: " << error_msg << std::endl;
+ return false;
+ }
+
+ // Special list of thread counts to use when none are specified
+ const std::vector<int> one_thread = {1};
+
+ MutexLock l(mutex_);
+ for (std::unique_ptr<Benchmark>& family : families_) {
+ // Family was deleted or benchmark doesn't match
+ if (!family) continue;
+
+ if (family->ArgsCnt() == -1) {
+ family->Args({});
+ }
+ const std::vector<int>* thread_counts =
+ (family->thread_counts_.empty()
+ ? &one_thread
+ : &static_cast<const std::vector<int>&>(family->thread_counts_));
+ const size_t family_size = family->args_.size() * thread_counts->size();
+ // The benchmark will be run at least 'family_size' different inputs.
+ // If 'family_size' is very large warn the user.
+ if (family_size > kMaxFamilySize) {
+ Err << "The number of inputs is very large. " << family->name_
+ << " will be repeated at least " << family_size << " times.\n";
+ }
+ // reserve in the special case the regex ".", since we know the final
+ // family size.
+ if (spec == ".") benchmarks->reserve(family_size);
+
+ for (auto const& args : family->args_) {
+ for (int num_threads : *thread_counts) {
+ Benchmark::Instance instance;
+ instance.name = family->name_;
+ instance.benchmark = family.get();
+ instance.report_mode = family->report_mode_;
+ instance.arg = args;
+ instance.time_unit = family->time_unit_;
+ instance.range_multiplier = family->range_multiplier_;
+ instance.min_time = family->min_time_;
+ instance.iterations = family->iterations_;
+ instance.repetitions = family->repetitions_;
+ instance.use_real_time = family->use_real_time_;
+ instance.use_manual_time = family->use_manual_time_;
+ instance.complexity = family->complexity_;
+ instance.complexity_lambda = family->complexity_lambda_;
+ instance.threads = num_threads;
+
+ // Add arguments to instance name
+ size_t arg_i = 0;
+ for (auto const& arg : args) {
+ instance.name += "/";
+
+ if (arg_i < family->arg_names_.size()) {
+ const auto& arg_name = family->arg_names_[arg_i];
+ if (!arg_name.empty()) {
+ instance.name +=
+ StringPrintF("%s:", family->arg_names_[arg_i].c_str());
+ }
+ }
+
+ instance.name += StringPrintF("%d", arg);
+ ++arg_i;
+ }
+
+ if (!IsZero(family->min_time_))
+ instance.name += StringPrintF("/min_time:%0.3f", family->min_time_);
+ if (family->iterations_ != 0)
+ instance.name += StringPrintF("/iterations:%d", family->iterations_);
+ if (family->repetitions_ != 0)
+ instance.name += StringPrintF("/repeats:%d", family->repetitions_);
+
+ if (family->use_manual_time_) {
+ instance.name += "/manual_time";
+ } else if (family->use_real_time_) {
+ instance.name += "/real_time";
+ }
+
+ // Add the number of threads used to the name
+ if (!family->thread_counts_.empty()) {
+ instance.name += StringPrintF("/threads:%d", instance.threads);
+ }
+
+ if (re.Match(instance.name)) {
+ instance.last_benchmark_instance = (&args == &family->args_.back());
+ benchmarks->push_back(std::move(instance));
+ }
+ }
+ }
+ }
+ return true;
+}
+
+Benchmark* RegisterBenchmarkInternal(Benchmark* bench) {
+ std::unique_ptr<Benchmark> bench_ptr(bench);
+ BenchmarkFamilies* families = BenchmarkFamilies::GetInstance();
+ families->AddBenchmark(std::move(bench_ptr));
+ return bench;
+}
+
+// FIXME: This function is a hack so that benchmark.cc can access
+// `BenchmarkFamilies`
+bool FindBenchmarksInternal(const std::string& re,
+ std::vector<Benchmark::Instance>* benchmarks,
+ std::ostream* Err) {
+ return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err);
+}
+
+//=============================================================================//
+// Benchmark
+//=============================================================================//
+
+Benchmark::Benchmark(const char* name)
+ : name_(name),
+ report_mode_(RM_Unspecified),
+ time_unit_(kNanosecond),
+ range_multiplier_(kRangeMultiplier),
+ min_time_(0),
+ iterations_(0),
+ repetitions_(0),
+ use_real_time_(false),
+ use_manual_time_(false),
+ complexity_(oNone),
+ complexity_lambda_(nullptr) {}
+
+Benchmark::~Benchmark() {}
+
+void Benchmark::AddRange(std::vector<int>* dst, int lo, int hi, int mult) {
+ CHECK_GE(lo, 0);
+ CHECK_GE(hi, lo);
+ CHECK_GE(mult, 2);
+
+ // Add "lo"
+ dst->push_back(lo);
+
+ static const int kint32max = std::numeric_limits<int32_t>::max();
+
+ // Now space out the benchmarks in multiples of "mult"
+ for (int32_t i = 1; i < kint32max / mult; i *= mult) {
+ if (i >= hi) break;
+ if (i > lo) {
+ dst->push_back(i);
+ }
+ }
+ // Add "hi" (if different from "lo")
+ if (hi != lo) {
+ dst->push_back(hi);
+ }
+}
+
+Benchmark* Benchmark::Arg(int x) {
+ CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
+ args_.push_back({x});
+ return this;
+}
+
+Benchmark* Benchmark::Unit(TimeUnit unit) {
+ time_unit_ = unit;
+ return this;
+}
+
+Benchmark* Benchmark::Range(int start, int limit) {
+ CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
+ std::vector<int> arglist;
+ AddRange(&arglist, start, limit, range_multiplier_);
+
+ for (int i : arglist) {
+ args_.push_back({i});
+ }
+ return this;
+}
+
+Benchmark* Benchmark::Ranges(const std::vector<std::pair<int, int>>& ranges) {
+ CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
+ std::vector<std::vector<int>> arglists(ranges.size());
+ std::size_t total = 1;
+ for (std::size_t i = 0; i < ranges.size(); i++) {
+ AddRange(&arglists[i], ranges[i].first, ranges[i].second,
+ range_multiplier_);
+ total *= arglists[i].size();
+ }
+
+ std::vector<std::size_t> ctr(arglists.size(), 0);
+
+ for (std::size_t i = 0; i < total; i++) {
+ std::vector<int> tmp;
+ tmp.reserve(arglists.size());
+
+ for (std::size_t j = 0; j < arglists.size(); j++) {
+ tmp.push_back(arglists[j].at(ctr[j]));
+ }
+
+ args_.push_back(std::move(tmp));
+
+ for (std::size_t j = 0; j < arglists.size(); j++) {
+ if (ctr[j] + 1 < arglists[j].size()) {
+ ++ctr[j];
+ break;
+ }
+ ctr[j] = 0;
+ }
+ }
+ return this;
+}
+
+Benchmark* Benchmark::ArgName(const std::string& name) {
+ CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
+ arg_names_ = {name};
+ return this;
+}
+
+Benchmark* Benchmark::ArgNames(const std::vector<std::string>& names) {
+ CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(names.size()));
+ arg_names_ = names;
+ return this;
+}
+
+Benchmark* Benchmark::DenseRange(int start, int limit, int step) {
+ CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
+ CHECK_GE(start, 0);
+ CHECK_LE(start, limit);
+ for (int arg = start; arg <= limit; arg += step) {
+ args_.push_back({arg});
+ }
+ return this;
+}
+
+Benchmark* Benchmark::Args(const std::vector<int>& args) {
+ CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
+ args_.push_back(args);
+ return this;
+}
+
+Benchmark* Benchmark::Apply(void (*custom_arguments)(Benchmark* benchmark)) {
+ custom_arguments(this);
+ return this;
+}
+
+Benchmark* Benchmark::RangeMultiplier(int multiplier) {
+ CHECK(multiplier > 1);
+ range_multiplier_ = multiplier;
+ return this;
+}
+
+
+Benchmark* Benchmark::MinTime(double t) {
+ CHECK(t > 0.0);
+ CHECK(iterations_ == 0);
+ min_time_ = t;
+ return this;
+}
+
+
+Benchmark* Benchmark::Iterations(size_t n) {
+ CHECK(n > 0);
+ CHECK(IsZero(min_time_));
+ iterations_ = n;
+ return this;
+}
+
+Benchmark* Benchmark::Repetitions(int n) {
+ CHECK(n > 0);
+ repetitions_ = n;
+ return this;
+}
+
+Benchmark* Benchmark::ReportAggregatesOnly(bool value) {
+ report_mode_ = value ? RM_ReportAggregatesOnly : RM_Default;
+ return this;
+}
+
+Benchmark* Benchmark::UseRealTime() {
+ CHECK(!use_manual_time_)
+ << "Cannot set UseRealTime and UseManualTime simultaneously.";
+ use_real_time_ = true;
+ return this;
+}
+
+Benchmark* Benchmark::UseManualTime() {
+ CHECK(!use_real_time_)
+ << "Cannot set UseRealTime and UseManualTime simultaneously.";
+ use_manual_time_ = true;
+ return this;
+}
+
+Benchmark* Benchmark::Complexity(BigO complexity) {
+ complexity_ = complexity;
+ return this;
+}
+
+Benchmark* Benchmark::Complexity(BigOFunc* complexity) {
+ complexity_lambda_ = complexity;
+ complexity_ = oLambda;
+ return this;
+}
+
+Benchmark* Benchmark::Threads(int t) {
+ CHECK_GT(t, 0);
+ thread_counts_.push_back(t);
+ return this;
+}
+
+Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) {
+ CHECK_GT(min_threads, 0);
+ CHECK_GE(max_threads, min_threads);
+
+ AddRange(&thread_counts_, min_threads, max_threads, 2);
+ return this;
+}
+
+Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads,
+ int stride) {
+ CHECK_GT(min_threads, 0);
+ CHECK_GE(max_threads, min_threads);
+ CHECK_GE(stride, 1);
+
+ for (auto i = min_threads; i < max_threads; i += stride) {
+ thread_counts_.push_back(i);
+ }
+ thread_counts_.push_back(max_threads);
+ return this;
+}
+
+Benchmark* Benchmark::ThreadPerCpu() {
+ static int num_cpus = NumCPUs();
+ thread_counts_.push_back(num_cpus);
+ return this;
+}
+
+void Benchmark::SetName(const char* name) { name_ = name; }
+
+int Benchmark::ArgsCnt() const {
+ if (args_.empty()) {
+ if (arg_names_.empty()) return -1;
+ return static_cast<int>(arg_names_.size());
+ }
+ return static_cast<int>(args_.front().size());
+}
+
+//=============================================================================//
+// FunctionBenchmark
+//=============================================================================//
+
+void FunctionBenchmark::Run(State& st) { func_(st); }
+
+} // end namespace internal
+
+void ClearRegisteredBenchmarks() {
+ internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks();
+}
+
+} // end namespace benchmark
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/check.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/check.h
new file mode 100644
index 00000000..73bead2f
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/check.h
@@ -0,0 +1,79 @@
+#ifndef CHECK_H_
+#define CHECK_H_
+
+#include <cstdlib>
+#include <ostream>
+#include <cmath>
+
+#include "internal_macros.h"
+#include "log.h"
+
+namespace benchmark {
+namespace internal {
+
+typedef void(AbortHandlerT)();
+
+inline AbortHandlerT*& GetAbortHandler() {
+ static AbortHandlerT* handler = &std::abort;
+ return handler;
+}
+
+BENCHMARK_NORETURN inline void CallAbortHandler() {
+ GetAbortHandler()();
+ std::abort(); // fallback to enforce noreturn
+}
+
+// CheckHandler is the class constructed by failing CHECK macros. CheckHandler
+// will log information about the failures and abort when it is destructed.
+class CheckHandler {
+ public:
+ CheckHandler(const char* check, const char* file, const char* func, int line)
+ : log_(GetErrorLogInstance()) {
+ log_ << file << ":" << line << ": " << func << ": Check `" << check
+ << "' failed. ";
+ }
+
+ LogType& GetLog() { return log_; }
+
+ BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) {
+ log_ << std::endl;
+ CallAbortHandler();
+ }
+
+ CheckHandler& operator=(const CheckHandler&) = delete;
+ CheckHandler(const CheckHandler&) = delete;
+ CheckHandler() = delete;
+
+ private:
+ LogType& log_;
+};
+
+} // end namespace internal
+} // end namespace benchmark
+
+// The CHECK macro returns a std::ostream object that can have extra information
+// written to it.
+#ifndef NDEBUG
+#define CHECK(b) \
+ (b ? ::benchmark::internal::GetNullLogInstance() \
+ : ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \
+ .GetLog())
+#else
+#define CHECK(b) ::benchmark::internal::GetNullLogInstance()
+#endif
+
+#define CHECK_EQ(a, b) CHECK((a) == (b))
+#define CHECK_NE(a, b) CHECK((a) != (b))
+#define CHECK_GE(a, b) CHECK((a) >= (b))
+#define CHECK_LE(a, b) CHECK((a) <= (b))
+#define CHECK_GT(a, b) CHECK((a) > (b))
+#define CHECK_LT(a, b) CHECK((a) < (b))
+
+#define CHECK_FLOAT_EQ(a, b, eps) CHECK(std::fabs((a) - (b)) < (eps))
+#define CHECK_FLOAT_NE(a, b, eps) CHECK(std::fabs((a) - (b)) >= (eps))
+#define CHECK_FLOAT_GE(a, b, eps) CHECK((a) - (b) > -(eps))
+#define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps))
+#define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps))
+#define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps))
+
+#endif // CHECK_H_
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.cc b/MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.cc
new file mode 100644
index 00000000..2dec4a8b
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.cc
@@ -0,0 +1,188 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "colorprint.h"
+
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <memory>
+#include <string>
+
+#include "check.h"
+#include "internal_macros.h"
+
+#ifdef BENCHMARK_OS_WINDOWS
+#include <Windows.h>
+#include <io.h>
+#else
+#include <unistd.h>
+#endif // BENCHMARK_OS_WINDOWS
+
+namespace benchmark {
+namespace {
+#ifdef BENCHMARK_OS_WINDOWS
+typedef WORD PlatformColorCode;
+#else
+typedef const char* PlatformColorCode;
+#endif
+
+PlatformColorCode GetPlatformColorCode(LogColor color) {
+#ifdef BENCHMARK_OS_WINDOWS
+ switch (color) {
+ case COLOR_RED:
+ return FOREGROUND_RED;
+ case COLOR_GREEN:
+ return FOREGROUND_GREEN;
+ case COLOR_YELLOW:
+ return FOREGROUND_RED | FOREGROUND_GREEN;
+ case COLOR_BLUE:
+ return FOREGROUND_BLUE;
+ case COLOR_MAGENTA:
+ return FOREGROUND_BLUE | FOREGROUND_RED;
+ case COLOR_CYAN:
+ return FOREGROUND_BLUE | FOREGROUND_GREEN;
+ case COLOR_WHITE: // fall through to default
+ default:
+ return 0;
+ }
+#else
+ switch (color) {
+ case COLOR_RED:
+ return "1";
+ case COLOR_GREEN:
+ return "2";
+ case COLOR_YELLOW:
+ return "3";
+ case COLOR_BLUE:
+ return "4";
+ case COLOR_MAGENTA:
+ return "5";
+ case COLOR_CYAN:
+ return "6";
+ case COLOR_WHITE:
+ return "7";
+ default:
+ return nullptr;
+ };
+#endif
+}
+
+} // end namespace
+
+std::string FormatString(const char* msg, va_list args) {
+ // we might need a second shot at this, so pre-emptivly make a copy
+ va_list args_cp;
+ va_copy(args_cp, args);
+
+ std::size_t size = 256;
+ char local_buff[256];
+ auto ret = vsnprintf(local_buff, size, msg, args_cp);
+
+ va_end(args_cp);
+
+ // currently there is no error handling for failure, so this is hack.
+ CHECK(ret >= 0);
+
+ if (ret == 0) // handle empty expansion
+ return {};
+ else if (static_cast<size_t>(ret) < size)
+ return local_buff;
+ else {
+ // we did not provide a long enough buffer on our first attempt.
+ size = (size_t)ret + 1; // + 1 for the null byte
+ std::unique_ptr<char[]> buff(new char[size]);
+ ret = vsnprintf(buff.get(), size, msg, args);
+ CHECK(ret > 0 && ((size_t)ret) < size);
+ return buff.get();
+ }
+}
+
+std::string FormatString(const char* msg, ...) {
+ va_list args;
+ va_start(args, msg);
+ auto tmp = FormatString(msg, args);
+ va_end(args);
+ return tmp;
+}
+
+void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ ColorPrintf(out, color, fmt, args);
+ va_end(args);
+}
+
+void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
+ va_list args) {
+#ifdef BENCHMARK_OS_WINDOWS
+ ((void)out); // suppress unused warning
+
+ const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
+
+ // Gets the current text color.
+ CONSOLE_SCREEN_BUFFER_INFO buffer_info;
+ GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
+ const WORD old_color_attrs = buffer_info.wAttributes;
+
+ // We need to flush the stream buffers into the console before each
+ // SetConsoleTextAttribute call lest it affect the text that is already
+ // printed but has not yet reached the console.
+ fflush(stdout);
+ SetConsoleTextAttribute(stdout_handle,
+ GetPlatformColorCode(color) | FOREGROUND_INTENSITY);
+ vprintf(fmt, args);
+
+ fflush(stdout);
+ // Restores the text color.
+ SetConsoleTextAttribute(stdout_handle, old_color_attrs);
+#else
+ const char* color_code = GetPlatformColorCode(color);
+ if (color_code) out << FormatString("\033[0;3%sm", color_code);
+ out << FormatString(fmt, args) << "\033[m";
+#endif
+}
+
+bool IsColorTerminal() {
+#if BENCHMARK_OS_WINDOWS
+ // On Windows the TERM variable is usually not set, but the
+ // console there does support colors.
+ return 0 != _isatty(_fileno(stdout));
+#else
+ // On non-Windows platforms, we rely on the TERM variable. This list of
+ // supported TERM values is copied from Google Test:
+ // <https://github.com/google/googletest/blob/master/googletest/src/gtest.cc#L2925>.
+ const char* const SUPPORTED_TERM_VALUES[] = {
+ "xterm", "xterm-color", "xterm-256color",
+ "screen", "screen-256color", "tmux",
+ "tmux-256color", "rxvt-unicode", "rxvt-unicode-256color",
+ "linux", "cygwin",
+ };
+
+ const char* const term = getenv("TERM");
+
+ bool term_supports_color = false;
+ for (const char* candidate : SUPPORTED_TERM_VALUES) {
+ if (term && 0 == strcmp(term, candidate)) {
+ term_supports_color = true;
+ break;
+ }
+ }
+
+ return 0 != isatty(fileno(stdout)) && term_supports_color;
+#endif // BENCHMARK_OS_WINDOWS
+}
+
+} // end namespace benchmark
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.h
new file mode 100644
index 00000000..9f6fab9b
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.h
@@ -0,0 +1,33 @@
+#ifndef BENCHMARK_COLORPRINT_H_
+#define BENCHMARK_COLORPRINT_H_
+
+#include <cstdarg>
+#include <iostream>
+#include <string>
+
+namespace benchmark {
+enum LogColor {
+ COLOR_DEFAULT,
+ COLOR_RED,
+ COLOR_GREEN,
+ COLOR_YELLOW,
+ COLOR_BLUE,
+ COLOR_MAGENTA,
+ COLOR_CYAN,
+ COLOR_WHITE
+};
+
+std::string FormatString(const char* msg, va_list args);
+std::string FormatString(const char* msg, ...);
+
+void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
+ va_list args);
+void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...);
+
+// Returns true if stdout appears to be a terminal that supports colored
+// output, false otherwise.
+bool IsColorTerminal();
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_COLORPRINT_H_
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.cc b/MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.cc
new file mode 100644
index 00000000..2fc92517
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.cc
@@ -0,0 +1,218 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "commandlineflags.h"
+
+#include <cctype>
+#include <cstdlib>
+#include <cstring>
+#include <iostream>
+#include <limits>
+
+namespace benchmark {
+// Parses 'str' for a 32-bit signed integer. If successful, writes
+// the result to *value and returns true; otherwise leaves *value
+// unchanged and returns false.
+bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) {
+ // Parses the environment variable as a decimal integer.
+ char* end = nullptr;
+ const long long_value = strtol(str, &end, 10); // NOLINT
+
+ // Has strtol() consumed all characters in the string?
+ if (*end != '\0') {
+ // No - an invalid character was encountered.
+ std::cerr << src_text << " is expected to be a 32-bit integer, "
+ << "but actually has value \"" << str << "\".\n";
+ return false;
+ }
+
+ // Is the parsed value in the range of an Int32?
+ const int32_t result = static_cast<int32_t>(long_value);
+ if (long_value == std::numeric_limits<long>::max() ||
+ long_value == std::numeric_limits<long>::min() ||
+ // The parsed value overflows as a long. (strtol() returns
+ // LONG_MAX or LONG_MIN when the input overflows.)
+ result != long_value
+ // The parsed value overflows as an Int32.
+ ) {
+ std::cerr << src_text << " is expected to be a 32-bit integer, "
+ << "but actually has value \"" << str << "\", "
+ << "which overflows.\n";
+ return false;
+ }
+
+ *value = result;
+ return true;
+}
+
+// Parses 'str' for a double. If successful, writes the result to *value and
+// returns true; otherwise leaves *value unchanged and returns false.
+bool ParseDouble(const std::string& src_text, const char* str, double* value) {
+ // Parses the environment variable as a decimal integer.
+ char* end = nullptr;
+ const double double_value = strtod(str, &end); // NOLINT
+
+ // Has strtol() consumed all characters in the string?
+ if (*end != '\0') {
+ // No - an invalid character was encountered.
+ std::cerr << src_text << " is expected to be a double, "
+ << "but actually has value \"" << str << "\".\n";
+ return false;
+ }
+
+ *value = double_value;
+ return true;
+}
+
+// Returns the name of the environment variable corresponding to the
+// given flag. For example, FlagToEnvVar("foo") will return
+// "BENCHMARK_FOO" in the open-source version.
+static std::string FlagToEnvVar(const char* flag) {
+ const std::string flag_str(flag);
+
+ std::string env_var;
+ for (size_t i = 0; i != flag_str.length(); ++i)
+ env_var += static_cast<char>(::toupper(flag_str.c_str()[i]));
+
+ return "BENCHMARK_" + env_var;
+}
+
+// Reads and returns the Boolean environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+//
+// The value is considered true iff it's not "0".
+bool BoolFromEnv(const char* flag, bool default_value) {
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* const string_value = getenv(env_var.c_str());
+ return string_value == nullptr ? default_value
+ : strcmp(string_value, "0") != 0;
+}
+
+// Reads and returns a 32-bit integer stored in the environment
+// variable corresponding to the given flag; if it isn't set or
+// doesn't represent a valid 32-bit integer, returns default_value.
+int32_t Int32FromEnv(const char* flag, int32_t default_value) {
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* const string_value = getenv(env_var.c_str());
+ if (string_value == nullptr) {
+ // The environment variable is not set.
+ return default_value;
+ }
+
+ int32_t result = default_value;
+ if (!ParseInt32(std::string("Environment variable ") + env_var, string_value,
+ &result)) {
+ std::cout << "The default value " << default_value << " is used.\n";
+ return default_value;
+ }
+
+ return result;
+}
+
+// Reads and returns the string environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+const char* StringFromEnv(const char* flag, const char* default_value) {
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* const value = getenv(env_var.c_str());
+ return value == nullptr ? default_value : value;
+}
+
+// Parses a string as a command line flag. The string should have
+// the format "--flag=value". When def_optional is true, the "=value"
+// part can be omitted.
+//
+// Returns the value of the flag, or nullptr if the parsing failed.
+const char* ParseFlagValue(const char* str, const char* flag,
+ bool def_optional) {
+ // str and flag must not be nullptr.
+ if (str == nullptr || flag == nullptr) return nullptr;
+
+ // The flag must start with "--".
+ const std::string flag_str = std::string("--") + std::string(flag);
+ const size_t flag_len = flag_str.length();
+ if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr;
+
+ // Skips the flag name.
+ const char* flag_end = str + flag_len;
+
+ // When def_optional is true, it's OK to not have a "=value" part.
+ if (def_optional && (flag_end[0] == '\0')) return flag_end;
+
+ // If def_optional is true and there are more characters after the
+ // flag name, or if def_optional is false, there must be a '=' after
+ // the flag name.
+ if (flag_end[0] != '=') return nullptr;
+
+ // Returns the string after "=".
+ return flag_end + 1;
+}
+
+bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, true);
+
+ // Aborts if the parsing failed.
+ if (value_str == nullptr) return false;
+
+ // Converts the string value to a bool.
+ *value = IsTruthyFlagValue(value_str);
+ return true;
+}
+
+bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == nullptr) return false;
+
+ // Sets *value to the value of the flag.
+ return ParseInt32(std::string("The value of flag --") + flag, value_str,
+ value);
+}
+
+bool ParseDoubleFlag(const char* str, const char* flag, double* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == nullptr) return false;
+
+ // Sets *value to the value of the flag.
+ return ParseDouble(std::string("The value of flag --") + flag, value_str,
+ value);
+}
+
+bool ParseStringFlag(const char* str, const char* flag, std::string* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == nullptr) return false;
+
+ *value = value_str;
+ return true;
+}
+
+bool IsFlag(const char* str, const char* flag) {
+ return (ParseFlagValue(str, flag, true) != nullptr);
+}
+
+bool IsTruthyFlagValue(const std::string& value) {
+ if (value.empty()) return true;
+ char ch = value[0];
+ return isalnum(ch) &&
+ !(ch == '0' || ch == 'f' || ch == 'F' || ch == 'n' || ch == 'N');
+}
+} // end namespace benchmark
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.h
new file mode 100644
index 00000000..945c9a9f
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.h
@@ -0,0 +1,79 @@
+#ifndef BENCHMARK_COMMANDLINEFLAGS_H_
+#define BENCHMARK_COMMANDLINEFLAGS_H_
+
+#include <cstdint>
+#include <string>
+
+// Macro for referencing flags.
+#define FLAG(name) FLAGS_##name
+
+// Macros for declaring flags.
+#define DECLARE_bool(name) extern bool FLAG(name)
+#define DECLARE_int32(name) extern int32_t FLAG(name)
+#define DECLARE_int64(name) extern int64_t FLAG(name)
+#define DECLARE_double(name) extern double FLAG(name)
+#define DECLARE_string(name) extern std::string FLAG(name)
+
+// Macros for defining flags.
+#define DEFINE_bool(name, default_val, doc) bool FLAG(name) = (default_val)
+#define DEFINE_int32(name, default_val, doc) int32_t FLAG(name) = (default_val)
+#define DEFINE_int64(name, default_val, doc) int64_t FLAG(name) = (default_val)
+#define DEFINE_double(name, default_val, doc) double FLAG(name) = (default_val)
+#define DEFINE_string(name, default_val, doc) \
+ std::string FLAG(name) = (default_val)
+
+namespace benchmark {
+// Parses 'str' for a 32-bit signed integer. If successful, writes the result
+// to *value and returns true; otherwise leaves *value unchanged and returns
+// false.
+bool ParseInt32(const std::string& src_text, const char* str, int32_t* value);
+
+// Parses a bool/Int32/string from the environment variable
+// corresponding to the given Google Test flag.
+bool BoolFromEnv(const char* flag, bool default_val);
+int32_t Int32FromEnv(const char* flag, int32_t default_val);
+double DoubleFromEnv(const char* flag, double default_val);
+const char* StringFromEnv(const char* flag, const char* default_val);
+
+// Parses a string for a bool flag, in the form of either
+// "--flag=value" or "--flag".
+//
+// In the former case, the value is taken as true if it passes IsTruthyValue().
+//
+// In the latter case, the value is taken as true.
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseBoolFlag(const char* str, const char* flag, bool* value);
+
+// Parses a string for an Int32 flag, in the form of
+// "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseInt32Flag(const char* str, const char* flag, int32_t* value);
+
+// Parses a string for a Double flag, in the form of
+// "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseDoubleFlag(const char* str, const char* flag, double* value);
+
+// Parses a string for a string flag, in the form of
+// "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseStringFlag(const char* str, const char* flag, std::string* value);
+
+// Returns true if the string matches the flag.
+bool IsFlag(const char* str, const char* flag);
+
+// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or
+// some non-alphanumeric character. As a special case, also returns true if
+// value is the empty string.
+bool IsTruthyFlagValue(const std::string& value);
+} // end namespace benchmark
+
+#endif // BENCHMARK_COMMANDLINEFLAGS_H_
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.cc b/MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.cc
new file mode 100644
index 00000000..33975be5
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.cc
@@ -0,0 +1,324 @@
+// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Source project : https://github.com/ismaelJimenez/cpp.leastsq
+// Adapted to be used with google benchmark
+
+#include "benchmark/benchmark.h"
+
+#include <algorithm>
+#include <cmath>
+#include "check.h"
+#include "complexity.h"
+#include "stat.h"
+
+namespace benchmark {
+
+// Internal function to calculate the different scalability forms
+BigOFunc* FittingCurve(BigO complexity) {
+ switch (complexity) {
+ case oN:
+ return [](int n) -> double { return n; };
+ case oNSquared:
+ return [](int n) -> double { return std::pow(n, 2); };
+ case oNCubed:
+ return [](int n) -> double { return std::pow(n, 3); };
+ case oLogN:
+ return [](int n) { return log2(n); };
+ case oNLogN:
+ return [](int n) { return n * log2(n); };
+ case o1:
+ default:
+ return [](int) { return 1.0; };
+ }
+}
+
+// Function to return an string for the calculated complexity
+std::string GetBigOString(BigO complexity) {
+ switch (complexity) {
+ case oN:
+ return "N";
+ case oNSquared:
+ return "N^2";
+ case oNCubed:
+ return "N^3";
+ case oLogN:
+ return "lgN";
+ case oNLogN:
+ return "NlgN";
+ case o1:
+ return "(1)";
+ default:
+ return "f(N)";
+ }
+}
+
+// Find the coefficient for the high-order term in the running time, by
+// minimizing the sum of squares of relative error, for the fitting curve
+// given by the lambda expresion.
+// - n : Vector containing the size of the benchmark tests.
+// - time : Vector containing the times for the benchmark tests.
+// - fitting_curve : lambda expresion (e.g. [](int n) {return n; };).
+
+// For a deeper explanation on the algorithm logic, look the README file at
+// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
+
+LeastSq MinimalLeastSq(const std::vector<int>& n,
+ const std::vector<double>& time,
+ BigOFunc* fitting_curve) {
+ double sigma_gn = 0.0;
+ double sigma_gn_squared = 0.0;
+ double sigma_time = 0.0;
+ double sigma_time_gn = 0.0;
+
+ // Calculate least square fitting parameter
+ for (size_t i = 0; i < n.size(); ++i) {
+ double gn_i = fitting_curve(n[i]);
+ sigma_gn += gn_i;
+ sigma_gn_squared += gn_i * gn_i;
+ sigma_time += time[i];
+ sigma_time_gn += time[i] * gn_i;
+ }
+
+ LeastSq result;
+ result.complexity = oLambda;
+
+ // Calculate complexity.
+ result.coef = sigma_time_gn / sigma_gn_squared;
+
+ // Calculate RMS
+ double rms = 0.0;
+ for (size_t i = 0; i < n.size(); ++i) {
+ double fit = result.coef * fitting_curve(n[i]);
+ rms += pow((time[i] - fit), 2);
+ }
+
+ // Normalized RMS by the mean of the observed values
+ double mean = sigma_time / n.size();
+ result.rms = sqrt(rms / n.size()) / mean;
+
+ return result;
+}
+
+// Find the coefficient for the high-order term in the running time, by
+// minimizing the sum of squares of relative error.
+// - n : Vector containing the size of the benchmark tests.
+// - time : Vector containing the times for the benchmark tests.
+// - complexity : If different than oAuto, the fitting curve will stick to
+// this one. If it is oAuto, it will be calculated the best
+// fitting curve.
+LeastSq MinimalLeastSq(const std::vector<int>& n,
+ const std::vector<double>& time, const BigO complexity) {
+ CHECK_EQ(n.size(), time.size());
+ CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
+ // benchmark runs are given
+ CHECK_NE(complexity, oNone);
+
+ LeastSq best_fit;
+
+ if (complexity == oAuto) {
+ std::vector<BigO> fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed};
+
+ // Take o1 as default best fitting curve
+ best_fit = MinimalLeastSq(n, time, FittingCurve(o1));
+ best_fit.complexity = o1;
+
+ // Compute all possible fitting curves and stick to the best one
+ for (const auto& fit : fit_curves) {
+ LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit));
+ if (current_fit.rms < best_fit.rms) {
+ best_fit = current_fit;
+ best_fit.complexity = fit;
+ }
+ }
+ } else {
+ best_fit = MinimalLeastSq(n, time, FittingCurve(complexity));
+ best_fit.complexity = complexity;
+ }
+
+ return best_fit;
+}
+
+std::vector<BenchmarkReporter::Run> ComputeStats(
+ const std::vector<BenchmarkReporter::Run>& reports) {
+ typedef BenchmarkReporter::Run Run;
+ std::vector<Run> results;
+
+ auto error_count =
+ std::count_if(reports.begin(), reports.end(),
+ [](Run const& run) { return run.error_occurred; });
+
+ if (reports.size() - error_count < 2) {
+ // We don't report aggregated data if there was a single run.
+ return results;
+ }
+ // Accumulators.
+ Stat1_d real_accumulated_time_stat;
+ Stat1_d cpu_accumulated_time_stat;
+ Stat1_d bytes_per_second_stat;
+ Stat1_d items_per_second_stat;
+ // All repetitions should be run with the same number of iterations so we
+ // can take this information from the first benchmark.
+ int64_t const run_iterations = reports.front().iterations;
+ // create stats for user counters
+ struct CounterStat {
+ Counter c;
+ Stat1_d s;
+ };
+ std::map< std::string, CounterStat > counter_stats;
+ for(Run const& r : reports) {
+ for(auto const& cnt : r.counters) {
+ auto it = counter_stats.find(cnt.first);
+ if(it == counter_stats.end()) {
+ counter_stats.insert({cnt.first, {cnt.second, Stat1_d{}}});
+ } else {
+ CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
+ }
+ }
+ }
+
+ // Populate the accumulators.
+ for (Run const& run : reports) {
+ CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
+ CHECK_EQ(run_iterations, run.iterations);
+ if (run.error_occurred) continue;
+ real_accumulated_time_stat +=
+ Stat1_d(run.real_accumulated_time / run.iterations);
+ cpu_accumulated_time_stat +=
+ Stat1_d(run.cpu_accumulated_time / run.iterations);
+ items_per_second_stat += Stat1_d(run.items_per_second);
+ bytes_per_second_stat += Stat1_d(run.bytes_per_second);
+ // user counters
+ for(auto const& cnt : run.counters) {
+ auto it = counter_stats.find(cnt.first);
+ CHECK_NE(it, counter_stats.end());
+ it->second.s += Stat1_d(cnt.second);
+ }
+ }
+
+ // Get the data from the accumulator to BenchmarkReporter::Run's.
+ Run mean_data;
+ mean_data.benchmark_name = reports[0].benchmark_name + "_mean";
+ mean_data.iterations = run_iterations;
+ mean_data.real_accumulated_time =
+ real_accumulated_time_stat.Mean() * run_iterations;
+ mean_data.cpu_accumulated_time =
+ cpu_accumulated_time_stat.Mean() * run_iterations;
+ mean_data.bytes_per_second = bytes_per_second_stat.Mean();
+ mean_data.items_per_second = items_per_second_stat.Mean();
+ mean_data.time_unit = reports[0].time_unit;
+ // user counters
+ for(auto const& kv : counter_stats) {
+ auto c = Counter(kv.second.s.Mean(), counter_stats[kv.first].c.flags);
+ mean_data.counters[kv.first] = c;
+ }
+
+ // Only add label to mean/stddev if it is same for all runs
+ mean_data.report_label = reports[0].report_label;
+ for (std::size_t i = 1; i < reports.size(); i++) {
+ if (reports[i].report_label != reports[0].report_label) {
+ mean_data.report_label = "";
+ break;
+ }
+ }
+
+ Run stddev_data;
+ stddev_data.benchmark_name = reports[0].benchmark_name + "_stddev";
+ stddev_data.report_label = mean_data.report_label;
+ stddev_data.iterations = 0;
+ stddev_data.real_accumulated_time = real_accumulated_time_stat.StdDev();
+ stddev_data.cpu_accumulated_time = cpu_accumulated_time_stat.StdDev();
+ stddev_data.bytes_per_second = bytes_per_second_stat.StdDev();
+ stddev_data.items_per_second = items_per_second_stat.StdDev();
+ stddev_data.time_unit = reports[0].time_unit;
+ // user counters
+ for(auto const& kv : counter_stats) {
+ auto c = Counter(kv.second.s.StdDev(), counter_stats[kv.first].c.flags);
+ stddev_data.counters[kv.first] = c;
+ }
+
+ results.push_back(mean_data);
+ results.push_back(stddev_data);
+ return results;
+}
+
+std::vector<BenchmarkReporter::Run> ComputeBigO(
+ const std::vector<BenchmarkReporter::Run>& reports) {
+ typedef BenchmarkReporter::Run Run;
+ std::vector<Run> results;
+
+ if (reports.size() < 2) return results;
+
+ // Accumulators.
+ std::vector<int> n;
+ std::vector<double> real_time;
+ std::vector<double> cpu_time;
+
+ // Populate the accumulators.
+ for (const Run& run : reports) {
+ CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?";
+ n.push_back(run.complexity_n);
+ real_time.push_back(run.real_accumulated_time / run.iterations);
+ cpu_time.push_back(run.cpu_accumulated_time / run.iterations);
+ }
+
+ LeastSq result_cpu;
+ LeastSq result_real;
+
+ if (reports[0].complexity == oLambda) {
+ result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda);
+ result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda);
+ } else {
+ result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
+ result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
+ }
+ std::string benchmark_name =
+ reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/'));
+
+ // Get the data from the accumulator to BenchmarkReporter::Run's.
+ Run big_o;
+ big_o.benchmark_name = benchmark_name + "_BigO";
+ big_o.iterations = 0;
+ big_o.real_accumulated_time = result_real.coef;
+ big_o.cpu_accumulated_time = result_cpu.coef;
+ big_o.report_big_o = true;
+ big_o.complexity = result_cpu.complexity;
+
+ // All the time results are reported after being multiplied by the
+ // time unit multiplier. But since RMS is a relative quantity it
+ // should not be multiplied at all. So, here, we _divide_ it by the
+ // multiplier so that when it is multiplied later the result is the
+ // correct one.
+ double multiplier = GetTimeUnitMultiplier(reports[0].time_unit);
+
+ // Only add label to mean/stddev if it is same for all runs
+ Run rms;
+ big_o.report_label = reports[0].report_label;
+ rms.benchmark_name = benchmark_name + "_RMS";
+ rms.report_label = big_o.report_label;
+ rms.iterations = 0;
+ rms.real_accumulated_time = result_real.rms / multiplier;
+ rms.cpu_accumulated_time = result_cpu.rms / multiplier;
+ rms.report_rms = true;
+ rms.complexity = result_cpu.complexity;
+ // don't forget to keep the time unit, or we won't be able to
+ // recover the correct value.
+ rms.time_unit = reports[0].time_unit;
+
+ results.push_back(big_o);
+ results.push_back(rms);
+ return results;
+}
+
+} // end namespace benchmark
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.h
new file mode 100644
index 00000000..c0ca60e6
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.h
@@ -0,0 +1,60 @@
+// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Source project : https://github.com/ismaelJimenez/cpp.leastsq
+// Adapted to be used with google benchmark
+
+#ifndef COMPLEXITY_H_
+#define COMPLEXITY_H_
+
+#include <string>
+#include <vector>
+
+#include "benchmark/benchmark.h"
+
+namespace benchmark {
+
+// Return a vector containing the mean and standard devation information for
+// the specified list of reports. If 'reports' contains less than two
+// non-errored runs an empty vector is returned
+std::vector<BenchmarkReporter::Run> ComputeStats(
+ const std::vector<BenchmarkReporter::Run>& reports);
+
+// Return a vector containing the bigO and RMS information for the specified
+// list of reports. If 'reports.size() < 2' an empty vector is returned.
+std::vector<BenchmarkReporter::Run> ComputeBigO(
+ const std::vector<BenchmarkReporter::Run>& reports);
+
+// This data structure will contain the result returned by MinimalLeastSq
+// - coef : Estimated coeficient for the high-order term as
+// interpolated from data.
+// - rms : Normalized Root Mean Squared Error.
+// - complexity : Scalability form (e.g. oN, oNLogN). In case a scalability
+// form has been provided to MinimalLeastSq this will return
+// the same value. In case BigO::oAuto has been selected, this
+// parameter will return the best fitting curve detected.
+
+struct LeastSq {
+ LeastSq() : coef(0.0), rms(0.0), complexity(oNone) {}
+
+ double coef;
+ double rms;
+ BigO complexity;
+};
+
+// Function to return an string for the calculated complexity
+std::string GetBigOString(BigO complexity);
+
+} // end namespace benchmark
+#endif // COMPLEXITY_H_
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/console_reporter.cc b/MicroBenchmarks/libs/benchmark-1.2.0/src/console_reporter.cc
new file mode 100644
index 00000000..4bb6f712
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/console_reporter.cc
@@ -0,0 +1,180 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+#include "complexity.h"
+#include "counter.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <cstdio>
+#include <iostream>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "check.h"
+#include "colorprint.h"
+#include "commandlineflags.h"
+#include "internal_macros.h"
+#include "string_util.h"
+#include "timers.h"
+
+namespace benchmark {
+
+bool ConsoleReporter::ReportContext(const Context& context) {
+ name_field_width_ = context.name_field_width;
+ printed_header_ = false;
+ prev_counters_.clear();
+
+ PrintBasicContext(&GetErrorStream(), context);
+
+#ifdef BENCHMARK_OS_WINDOWS
+ if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) {
+ GetErrorStream()
+ << "Color printing is only supported for stdout on windows."
+ " Disabling color printing\n";
+ output_options_ = static_cast< OutputOptions >(output_options_ & ~OO_Color);
+ }
+#endif
+
+ return true;
+}
+
+void ConsoleReporter::PrintHeader(const Run& run) {
+ std::string str = FormatString("%-*s %13s %13s %10s", static_cast<int>(name_field_width_),
+ "Benchmark", "Time", "CPU", "Iterations");
+ if(!run.counters.empty()) {
+ if(output_options_ & OO_Tabular) {
+ for(auto const& c : run.counters) {
+ str += FormatString(" %10s", c.first.c_str());
+ }
+ } else {
+ str += " UserCounters...";
+ }
+ }
+ str += "\n";
+ std::string line = std::string(str.length(), '-');
+ GetOutputStream() << line << "\n" << str << line << "\n";
+}
+
+void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
+ for (const auto& run : reports) {
+ // print the header:
+ // --- if none was printed yet
+ bool print_header = !printed_header_;
+ // --- or if the format is tabular and this run
+ // has different fields from the prev header
+ print_header |= (output_options_ & OO_Tabular) &&
+ (!internal::SameNames(run.counters, prev_counters_));
+ if (print_header) {
+ printed_header_ = true;
+ prev_counters_ = run.counters;
+ PrintHeader(run);
+ }
+ // As an alternative to printing the headers like this, we could sort
+ // the benchmarks by header and then print. But this would require
+ // waiting for the full results before printing, or printing twice.
+ PrintRunData(run);
+ }
+}
+
+static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt,
+ ...) {
+ va_list args;
+ va_start(args, fmt);
+ out << FormatString(fmt, args);
+ va_end(args);
+}
+
+void ConsoleReporter::PrintRunData(const Run& result) {
+ typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...);
+ auto& Out = GetOutputStream();
+ PrinterFn* printer = (output_options_ & OO_Color) ?
+ (PrinterFn*)ColorPrintf : IgnoreColorPrint;
+ auto name_color =
+ (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
+ printer(Out, name_color, "%-*s ", name_field_width_,
+ result.benchmark_name.c_str());
+
+ if (result.error_occurred) {
+ printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'",
+ result.error_message.c_str());
+ printer(Out, COLOR_DEFAULT, "\n");
+ return;
+ }
+ // Format bytes per second
+ std::string rate;
+ if (result.bytes_per_second > 0) {
+ rate = StrCat(" ", HumanReadableNumber(result.bytes_per_second), "B/s");
+ }
+
+ // Format items per second
+ std::string items;
+ if (result.items_per_second > 0) {
+ items =
+ StrCat(" ", HumanReadableNumber(result.items_per_second), " items/s");
+ }
+
+ const double real_time = result.GetAdjustedRealTime();
+ const double cpu_time = result.GetAdjustedCPUTime();
+
+ if (result.report_big_o) {
+ std::string big_o = GetBigOString(result.complexity);
+ printer(Out, COLOR_YELLOW, "%10.2f %s %10.2f %s ", real_time, big_o.c_str(),
+ cpu_time, big_o.c_str());
+ } else if (result.report_rms) {
+ printer(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ", real_time * 100,
+ cpu_time * 100);
+ } else {
+ const char* timeLabel = GetTimeUnitString(result.time_unit);
+ printer(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ", real_time, timeLabel,
+ cpu_time, timeLabel);
+ }
+
+ if (!result.report_big_o && !result.report_rms) {
+ printer(Out, COLOR_CYAN, "%10lld", result.iterations);
+ }
+
+ for (auto& c : result.counters) {
+ auto const& s = HumanReadableNumber(c.second.value);
+ if (output_options_ & OO_Tabular) {
+ if (c.second.flags & Counter::kIsRate) {
+ printer(Out, COLOR_DEFAULT, " %8s/s", s.c_str());
+ } else {
+ printer(Out, COLOR_DEFAULT, " %10s", s.c_str());
+ }
+ } else {
+ const char* unit = (c.second.flags & Counter::kIsRate) ? "/s" : "";
+ printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(),
+ unit);
+ }
+ }
+
+ if (!rate.empty()) {
+ printer(Out, COLOR_DEFAULT, " %*s", 13, rate.c_str());
+ }
+
+ if (!items.empty()) {
+ printer(Out, COLOR_DEFAULT, " %*s", 18, items.c_str());
+ }
+
+ if (!result.report_label.empty()) {
+ printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
+ }
+
+ printer(Out, COLOR_DEFAULT, "\n");
+}
+
+} // end namespace benchmark
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/counter.cc b/MicroBenchmarks/libs/benchmark-1.2.0/src/counter.cc
new file mode 100644
index 00000000..ed1aa044
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/counter.cc
@@ -0,0 +1,68 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "counter.h"
+
+namespace benchmark {
+namespace internal {
+
+double Finish(Counter const& c, double cpu_time, double num_threads) {
+ double v = c.value;
+ if (c.flags & Counter::kIsRate) {
+ v /= cpu_time;
+ }
+ if (c.flags & Counter::kAvgThreads) {
+ v /= num_threads;
+ }
+ return v;
+}
+
+void Finish(UserCounters *l, double cpu_time, double num_threads) {
+ for (auto &c : *l) {
+ c.second.value = Finish(c.second, cpu_time, num_threads);
+ }
+}
+
+void Increment(UserCounters *l, UserCounters const& r) {
+ // add counters present in both or just in *l
+ for (auto &c : *l) {
+ auto it = r.find(c.first);
+ if (it != r.end()) {
+ c.second.value = c.second + it->second;
+ }
+ }
+ // add counters present in r, but not in *l
+ for (auto const &tc : r) {
+ auto it = l->find(tc.first);
+ if (it == l->end()) {
+ (*l)[tc.first] = tc.second;
+ }
+ }
+}
+
+bool SameNames(UserCounters const& l, UserCounters const& r) {
+ if (&l == &r) return true;
+ if (l.size() != r.size()) {
+ return false;
+ }
+ for (auto const& c : l) {
+ if (r.find(c.first) == r.end()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // end namespace internal
+} // end namespace benchmark
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/counter.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/counter.h
new file mode 100644
index 00000000..dd6865a3
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/counter.h
@@ -0,0 +1,26 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+
+namespace benchmark {
+
+// these counter-related functions are hidden to reduce API surface.
+namespace internal {
+void Finish(UserCounters *l, double time, double num_threads);
+void Increment(UserCounters *l, UserCounters const& r);
+bool SameNames(UserCounters const& l, UserCounters const& r);
+} // end namespace internal
+
+} //end namespace benchmark
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/csv_reporter.cc b/MicroBenchmarks/libs/benchmark-1.2.0/src/csv_reporter.cc
new file mode 100644
index 00000000..35510645
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/csv_reporter.cc
@@ -0,0 +1,149 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+#include "complexity.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <iostream>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "string_util.h"
+#include "timers.h"
+#include "check.h"
+
+// File format reference: http://edoceo.com/utilitas/csv-file-format.
+
+namespace benchmark {
+
+namespace {
+std::vector<std::string> elements = {
+ "name", "iterations", "real_time", "cpu_time",
+ "time_unit", "bytes_per_second", "items_per_second", "label",
+ "error_occurred", "error_message"};
+} // namespace
+
+bool CSVReporter::ReportContext(const Context& context) {
+ PrintBasicContext(&GetErrorStream(), context);
+ return true;
+}
+
+void CSVReporter::ReportRuns(const std::vector<Run> & reports) {
+ std::ostream& Out = GetOutputStream();
+
+ if (!printed_header_) {
+ // save the names of all the user counters
+ for (const auto& run : reports) {
+ for (const auto& cnt : run.counters) {
+ user_counter_names_.insert(cnt.first);
+ }
+ }
+
+ // print the header
+ for (auto B = elements.begin(); B != elements.end();) {
+ Out << *B++;
+ if (B != elements.end()) Out << ",";
+ }
+ for (auto B = user_counter_names_.begin(); B != user_counter_names_.end();) {
+ Out << ",\"" << *B++ << "\"";
+ }
+ Out << "\n";
+
+ printed_header_ = true;
+ } else {
+ // check that all the current counters are saved in the name set
+ for (const auto& run : reports) {
+ for (const auto& cnt : run.counters) {
+ CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end())
+ << "All counters must be present in each run. "
+ << "Counter named \"" << cnt.first
+ << "\" was not in a run after being added to the header";
+ }
+ }
+ }
+
+ // print results for each run
+ for (const auto& run : reports) {
+ PrintRunData(run);
+ }
+
+}
+
+void CSVReporter::PrintRunData(const Run & run) {
+ std::ostream& Out = GetOutputStream();
+
+ // Field with embedded double-quote characters must be doubled and the field
+ // delimited with double-quotes.
+ std::string name = run.benchmark_name;
+ ReplaceAll(&name, "\"", "\"\"");
+ Out << '"' << name << "\",";
+ if (run.error_occurred) {
+ Out << std::string(elements.size() - 3, ',');
+ Out << "true,";
+ std::string msg = run.error_message;
+ ReplaceAll(&msg, "\"", "\"\"");
+ Out << '"' << msg << "\"\n";
+ return;
+ }
+
+ // Do not print iteration on bigO and RMS report
+ if (!run.report_big_o && !run.report_rms) {
+ Out << run.iterations;
+ }
+ Out << ",";
+
+ Out << run.GetAdjustedRealTime() << ",";
+ Out << run.GetAdjustedCPUTime() << ",";
+
+ // Do not print timeLabel on bigO and RMS report
+ if (run.report_big_o) {
+ Out << GetBigOString(run.complexity);
+ } else if (!run.report_rms) {
+ Out << GetTimeUnitString(run.time_unit);
+ }
+ Out << ",";
+
+ if (run.bytes_per_second > 0.0) {
+ Out << run.bytes_per_second;
+ }
+ Out << ",";
+ if (run.items_per_second > 0.0) {
+ Out << run.items_per_second;
+ }
+ Out << ",";
+ if (!run.report_label.empty()) {
+ // Field with embedded double-quote characters must be doubled and the field
+ // delimited with double-quotes.
+ std::string label = run.report_label;
+ ReplaceAll(&label, "\"", "\"\"");
+ Out << "\"" << label << "\"";
+ }
+ Out << ",,"; // for error_occurred and error_message
+
+ // Print user counters
+ for (const auto &ucn : user_counter_names_) {
+ auto it = run.counters.find(ucn);
+ if(it == run.counters.end()) {
+ Out << ",";
+ } else {
+ Out << "," << it->second;
+ }
+ }
+ Out << '\n';
+}
+
+} // end namespace benchmark
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/cycleclock.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/cycleclock.h
new file mode 100644
index 00000000..4251fe4c
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/cycleclock.h
@@ -0,0 +1,172 @@
+// ----------------------------------------------------------------------
+// CycleClock
+// A CycleClock tells you the current time in Cycles. The "time"
+// is actually time since power-on. This is like time() but doesn't
+// involve a system call and is much more precise.
+//
+// NOTE: Not all cpu/platform/kernel combinations guarantee that this
+// clock increments at a constant rate or is synchronized across all logical
+// cpus in a system.
+//
+// If you need the above guarantees, please consider using a different
+// API. There are efforts to provide an interface which provides a millisecond
+// granularity and implemented as a memory read. A memory read is generally
+// cheaper than the CycleClock for many architectures.
+//
+// Also, in some out of order CPU implementations, the CycleClock is not
+// serializing. So if you're trying to count at cycles granularity, your
+// data might be inaccurate due to out of order instruction execution.
+// ----------------------------------------------------------------------
+
+#ifndef BENCHMARK_CYCLECLOCK_H_
+#define BENCHMARK_CYCLECLOCK_H_
+
+#include <cstdint>
+
+#include "benchmark/benchmark.h"
+#include "internal_macros.h"
+
+#if defined(BENCHMARK_OS_MACOSX)
+#include <mach/mach_time.h>
+#endif
+// For MSVC, we want to use '_asm rdtsc' when possible (since it works
+// with even ancient MSVC compilers), and when not possible the
+// __rdtsc intrinsic, declared in <intrin.h>. Unfortunately, in some
+// environments, <windows.h> and <intrin.h> have conflicting
+// declarations of some other intrinsics, breaking compilation.
+// Therefore, we simply declare __rdtsc ourselves. See also
+// http://connect.microsoft.com/VisualStudio/feedback/details/262047
+#if defined(COMPILER_MSVC) && !defined(_M_IX86)
+extern "C" uint64_t __rdtsc();
+#pragma intrinsic(__rdtsc)
+#endif
+
+#ifndef BENCHMARK_OS_WINDOWS
+#include <sys/time.h>
+#include <time.h>
+#endif
+
+#ifdef BENCHMARK_OS_EMSCRIPTEN
+#include <emscripten.h>
+#endif
+
+namespace benchmark {
+// NOTE: only i386 and x86_64 have been well tested.
+// PPC, sparc, alpha, and ia64 are based on
+// http://peter.kuscsik.com/wordpress/?p=14
+// with modifications by m3b. See also
+// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
+namespace cycleclock {
+// This should return the number of cycles since power-on. Thread-safe.
+inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
+#if defined(BENCHMARK_OS_MACOSX)
+ // this goes at the top because we need ALL Macs, regardless of
+ // architecture, to return the number of "mach time units" that
+ // have passed since startup. See sysinfo.cc where
+ // InitializeSystemInfo() sets the supposed cpu clock frequency of
+ // macs to the number of mach time units per second, not actual
+ // CPU clock frequency (which can change in the face of CPU
+ // frequency scaling). Also note that when the Mac sleeps, this
+ // counter pauses; it does not continue counting, nor does it
+ // reset to zero.
+ return mach_absolute_time();
+#elif defined(BENCHMARK_OS_EMSCRIPTEN)
+ // this goes above x86-specific code because old versions of Emscripten
+ // define __x86_64__, although they have nothing to do with it.
+ return static_cast<int64_t>(emscripten_get_now() * 1e+6);
+#elif defined(__i386__)
+ int64_t ret;
+ __asm__ volatile("rdtsc" : "=A"(ret));
+ return ret;
+#elif defined(__x86_64__) || defined(__amd64__)
+ uint64_t low, high;
+ __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
+ return (high << 32) | low;
+#elif defined(__powerpc__) || defined(__ppc__)
+ // This returns a time-base, which is not always precisely a cycle-count.
+ int64_t tbl, tbu0, tbu1;
+ asm("mftbu %0" : "=r"(tbu0));
+ asm("mftb %0" : "=r"(tbl));
+ asm("mftbu %0" : "=r"(tbu1));
+ tbl &= -static_cast<int64_t>(tbu0 == tbu1);
+ // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is garbage)
+ return (tbu1 << 32) | tbl;
+#elif defined(__sparc__)
+ int64_t tick;
+ asm(".byte 0x83, 0x41, 0x00, 0x00");
+ asm("mov %%g1, %0" : "=r"(tick));
+ return tick;
+#elif defined(__ia64__)
+ int64_t itc;
+ asm("mov %0 = ar.itc" : "=r"(itc));
+ return itc;
+#elif defined(COMPILER_MSVC) && defined(_M_IX86)
+ // Older MSVC compilers (like 7.x) don't seem to support the
+ // __rdtsc intrinsic properly, so I prefer to use _asm instead
+ // when I know it will work. Otherwise, I'll use __rdtsc and hope
+ // the code is being compiled with a non-ancient compiler.
+ _asm rdtsc
+#elif defined(COMPILER_MSVC)
+ return __rdtsc();
+#elif defined(BENCHMARK_OS_NACL)
+ // Native Client validator on x86/x86-64 allows RDTSC instructions,
+ // and this case is handled above. Native Client validator on ARM
+ // rejects MRC instructions (used in the ARM-specific sequence below),
+ // so we handle it here. Portable Native Client compiles to
+ // architecture-agnostic bytecode, which doesn't provide any
+ // cycle counter access mnemonics.
+
+ // Native Client does not provide any API to access cycle counter.
+ // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday
+ // because is provides nanosecond resolution (which is noticable at
+ // least for PNaCl modules running on x86 Mac & Linux).
+ // Initialize to always return 0 if clock_gettime fails.
+ struct timespec ts = { 0, 0 };
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
+#elif defined(__aarch64__)
+ // System timer of ARMv8 runs at a different frequency than the CPU's.
+ // The frequency is fixed, typically in the range 1-50MHz. It can be
+ // read at CNTFRQ special register. We assume the OS has set up
+ // the virtual timer properly.
+ int64_t virtual_timer_value;
+ asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
+ return virtual_timer_value;
+#elif defined(__ARM_ARCH)
+ // V6 is the earliest arch that has a standard cyclecount
+ // Native Client validator doesn't allow MRC instructions.
+#if (__ARM_ARCH >= 6)
+ uint32_t pmccntr;
+ uint32_t pmuseren;
+ uint32_t pmcntenset;
+ // Read the user mode perf monitor counter access permissions.
+ asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
+ if (pmuseren & 1) { // Allows reading perfmon counters for user mode code.
+ asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
+ if (pmcntenset & 0x80000000ul) { // Is it counting?
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
+ // The counter is set up to count every 64th cycle
+ return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6
+ }
+ }
+#endif
+ struct timeval tv;
+ gettimeofday(&tv, nullptr);
+ return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
+#elif defined(__mips__)
+ // mips apparently only allows rdtsc for superusers, so we fall
+ // back to gettimeofday. It's possible clock_gettime would be better.
+ struct timeval tv;
+ gettimeofday(&tv, nullptr);
+ return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
+#else
+// The soft failover to a generic implementation is automatic only for ARM.
+// For other platforms the developer is expected to make an attempt to create
+// a fast implementation and use generic version if nothing better is available.
+#error You need to define CycleTimer for your OS and CPU
+#endif
+}
+} // end namespace cycleclock
+} // end namespace benchmark
+
+#endif // BENCHMARK_CYCLECLOCK_H_
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/internal_macros.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/internal_macros.h
new file mode 100644
index 00000000..94288745
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/internal_macros.h
@@ -0,0 +1,57 @@
+#ifndef BENCHMARK_INTERNAL_MACROS_H_
+#define BENCHMARK_INTERNAL_MACROS_H_
+
+#include "benchmark/benchmark.h"
+
+#ifndef __has_feature
+#define __has_feature(x) 0
+#endif
+
+#if defined(__clang__)
+#define COMPILER_CLANG
+#elif defined(_MSC_VER)
+#define COMPILER_MSVC
+#elif defined(__GNUC__)
+#define COMPILER_GCC
+#endif
+
+#if __has_feature(cxx_attributes)
+#define BENCHMARK_NORETURN [[noreturn]]
+#elif defined(__GNUC__)
+#define BENCHMARK_NORETURN __attribute__((noreturn))
+#elif defined(COMPILER_MSVC)
+#define BENCHMARK_NORETURN __declspec(noreturn)
+#else
+#define BENCHMARK_NORETURN
+#endif
+
+#if defined(__CYGWIN__)
+#define BENCHMARK_OS_CYGWIN 1
+#elif defined(_WIN32)
+#define BENCHMARK_OS_WINDOWS 1
+#elif defined(__APPLE__)
+#include "TargetConditionals.h"
+ #if defined(TARGET_OS_MAC)
+ #define BENCHMARK_OS_MACOSX 1
+ #if defined(TARGET_OS_IPHONE)
+ #define BENCHMARK_OS_IOS 1
+ #endif
+ #endif
+#elif defined(__FreeBSD__)
+#define BENCHMARK_OS_FREEBSD 1
+#elif defined(__linux__)
+#define BENCHMARK_OS_LINUX 1
+#elif defined(__native_client__)
+#define BENCHMARK_OS_NACL 1
+#elif defined(EMSCRIPTEN)
+#define BENCHMARK_OS_EMSCRIPTEN 1
+#elif defined(__rtems__)
+#define BENCHMARK_OS_RTEMS 1
+#endif
+
+#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \
+ && !defined(__EXCEPTIONS)
+#define BENCHMARK_HAS_NO_EXCEPTIONS
+#endif
+
+#endif // BENCHMARK_INTERNAL_MACROS_H_
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/json_reporter.cc b/MicroBenchmarks/libs/benchmark-1.2.0/src/json_reporter.cc
new file mode 100644
index 00000000..edf6ecc8
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/json_reporter.cc
@@ -0,0 +1,168 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+#include "complexity.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <iostream>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "string_util.h"
+#include "timers.h"
+
+namespace benchmark {
+
+namespace {
+
+std::string FormatKV(std::string const& key, std::string const& value) {
+ return StringPrintF("\"%s\": \"%s\"", key.c_str(), value.c_str());
+}
+
+std::string FormatKV(std::string const& key, const char* value) {
+ return StringPrintF("\"%s\": \"%s\"", key.c_str(), value);
+}
+
+std::string FormatKV(std::string const& key, bool value) {
+ return StringPrintF("\"%s\": %s", key.c_str(), value ? "true" : "false");
+}
+
+std::string FormatKV(std::string const& key, int64_t value) {
+ std::stringstream ss;
+ ss << '"' << key << "\": " << value;
+ return ss.str();
+}
+
+std::string FormatKV(std::string const& key, double value) {
+ return StringPrintF("\"%s\": %.2f", key.c_str(), value);
+}
+
+int64_t RoundDouble(double v) { return static_cast<int64_t>(v + 0.5); }
+
+} // end namespace
+
+bool JSONReporter::ReportContext(const Context& context) {
+ std::ostream& out = GetOutputStream();
+
+ out << "{\n";
+ std::string inner_indent(2, ' ');
+
+ // Open context block and print context information.
+ out << inner_indent << "\"context\": {\n";
+ std::string indent(4, ' ');
+
+ std::string walltime_value = LocalDateTimeString();
+ out << indent << FormatKV("date", walltime_value) << ",\n";
+
+ out << indent << FormatKV("num_cpus", static_cast<int64_t>(context.num_cpus))
+ << ",\n";
+ out << indent << FormatKV("mhz_per_cpu", RoundDouble(context.mhz_per_cpu))
+ << ",\n";
+ out << indent << FormatKV("cpu_scaling_enabled", context.cpu_scaling_enabled)
+ << ",\n";
+
+#if defined(NDEBUG)
+ const char build_type[] = "release";
+#else
+ const char build_type[] = "debug";
+#endif
+ out << indent << FormatKV("library_build_type", build_type) << "\n";
+ // Close context block and open the list of benchmarks.
+ out << inner_indent << "},\n";
+ out << inner_indent << "\"benchmarks\": [\n";
+ return true;
+}
+
+void JSONReporter::ReportRuns(std::vector<Run> const& reports) {
+ if (reports.empty()) {
+ return;
+ }
+ std::string indent(4, ' ');
+ std::ostream& out = GetOutputStream();
+ if (!first_report_) {
+ out << ",\n";
+ }
+ first_report_ = false;
+
+ for (auto it = reports.begin(); it != reports.end(); ++it) {
+ out << indent << "{\n";
+ PrintRunData(*it);
+ out << indent << '}';
+ auto it_cp = it;
+ if (++it_cp != reports.end()) {
+ out << ",\n";
+ }
+ }
+}
+
+void JSONReporter::Finalize() {
+ // Close the list of benchmarks and the top level object.
+ GetOutputStream() << "\n ]\n}\n";
+}
+
+void JSONReporter::PrintRunData(Run const& run) {
+ std::string indent(6, ' ');
+ std::ostream& out = GetOutputStream();
+ out << indent << FormatKV("name", run.benchmark_name) << ",\n";
+ if (run.error_occurred) {
+ out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
+ out << indent << FormatKV("error_message", run.error_message) << ",\n";
+ }
+ if (!run.report_big_o && !run.report_rms) {
+ out << indent << FormatKV("iterations", run.iterations) << ",\n";
+ out << indent
+ << FormatKV("real_time", RoundDouble(run.GetAdjustedRealTime()))
+ << ",\n";
+ out << indent
+ << FormatKV("cpu_time", RoundDouble(run.GetAdjustedCPUTime()));
+ out << ",\n"
+ << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
+ } else if (run.report_big_o) {
+ out << indent
+ << FormatKV("cpu_coefficient", RoundDouble(run.GetAdjustedCPUTime()))
+ << ",\n";
+ out << indent
+ << FormatKV("real_coefficient", RoundDouble(run.GetAdjustedRealTime()))
+ << ",\n";
+ out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
+ out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
+ } else if (run.report_rms) {
+ out << indent
+ << FormatKV("rms", run.GetAdjustedCPUTime());
+ }
+ if (run.bytes_per_second > 0.0) {
+ out << ",\n"
+ << indent
+ << FormatKV("bytes_per_second", RoundDouble(run.bytes_per_second));
+ }
+ if (run.items_per_second > 0.0) {
+ out << ",\n"
+ << indent
+ << FormatKV("items_per_second", RoundDouble(run.items_per_second));
+ }
+ for(auto &c : run.counters) {
+ out << ",\n"
+ << indent
+ << FormatKV(c.first, RoundDouble(c.second));
+ }
+ if (!run.report_label.empty()) {
+ out << ",\n" << indent << FormatKV("label", run.report_label);
+ }
+ out << '\n';
+}
+
+} // end namespace benchmark
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/log.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/log.h
new file mode 100644
index 00000000..d06e1031
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/log.h
@@ -0,0 +1,73 @@
+#ifndef BENCHMARK_LOG_H_
+#define BENCHMARK_LOG_H_
+
+#include <iostream>
+#include <ostream>
+
+#include "benchmark/benchmark.h"
+
+namespace benchmark {
+namespace internal {
+
+typedef std::basic_ostream<char>&(EndLType)(std::basic_ostream<char>&);
+
+class LogType {
+ friend LogType& GetNullLogInstance();
+ friend LogType& GetErrorLogInstance();
+
+ // FIXME: Add locking to output.
+ template <class Tp>
+ friend LogType& operator<<(LogType&, Tp const&);
+ friend LogType& operator<<(LogType&, EndLType*);
+
+ private:
+ LogType(std::ostream* out) : out_(out) {}
+ std::ostream* out_;
+ BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType);
+};
+
+template <class Tp>
+LogType& operator<<(LogType& log, Tp const& value) {
+ if (log.out_) {
+ *log.out_ << value;
+ }
+ return log;
+}
+
+inline LogType& operator<<(LogType& log, EndLType* m) {
+ if (log.out_) {
+ *log.out_ << m;
+ }
+ return log;
+}
+
+inline int& LogLevel() {
+ static int log_level = 0;
+ return log_level;
+}
+
+inline LogType& GetNullLogInstance() {
+ static LogType log(nullptr);
+ return log;
+}
+
+inline LogType& GetErrorLogInstance() {
+ static LogType log(&std::clog);
+ return log;
+}
+
+inline LogType& GetLogInstanceForLevel(int level) {
+ if (level <= LogLevel()) {
+ return GetErrorLogInstance();
+ }
+ return GetNullLogInstance();
+}
+
+} // end namespace internal
+} // end namespace benchmark
+
+#define VLOG(x) \
+ (::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \
+ " ")
+
+#endif
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/mutex.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/mutex.h
new file mode 100644
index 00000000..5f461d05
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/mutex.h
@@ -0,0 +1,155 @@
+#ifndef BENCHMARK_MUTEX_H_
+#define BENCHMARK_MUTEX_H_
+
+#include <condition_variable>
+#include <mutex>
+
+#include "check.h"
+
+// Enable thread safety attributes only with clang.
+// The attributes can be safely erased when compiling with other compilers.
+#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES)
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
+#endif
+
+#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x))
+
+#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
+
+#define ACQUIRED_BEFORE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
+
+#define ACQUIRED_AFTER(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
+
+#define REQUIRES(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
+
+#define REQUIRES_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
+
+#define ACQUIRE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
+
+#define ACQUIRE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
+
+#define RELEASE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
+
+#define RELEASE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
+
+#define TRY_ACQUIRE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
+
+#define TRY_ACQUIRE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
+
+#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
+
+#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x))
+
+#define ASSERT_SHARED_CAPABILITY(x) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
+
+#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+
+#define NO_THREAD_SAFETY_ANALYSIS \
+ THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
+
+namespace benchmark {
+
+typedef std::condition_variable Condition;
+
+// NOTE: Wrappers for std::mutex and std::unique_lock are provided so that
+// we can annotate them with thread safety attributes and use the
+// -Wthread-safety warning with clang. The standard library types cannot be
+// used directly because they do not provided the required annotations.
+class CAPABILITY("mutex") Mutex {
+ public:
+ Mutex() {}
+
+ void lock() ACQUIRE() { mut_.lock(); }
+ void unlock() RELEASE() { mut_.unlock(); }
+ std::mutex& native_handle() { return mut_; }
+
+ private:
+ std::mutex mut_;
+};
+
+class SCOPED_CAPABILITY MutexLock {
+ typedef std::unique_lock<std::mutex> MutexLockImp;
+
+ public:
+ MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {}
+ ~MutexLock() RELEASE() {}
+ MutexLockImp& native_handle() { return ml_; }
+
+ private:
+ MutexLockImp ml_;
+};
+
+class Barrier {
+ public:
+ Barrier(int num_threads) : running_threads_(num_threads) {}
+
+ // Called by each thread
+ bool wait() EXCLUDES(lock_) {
+ bool last_thread = false;
+ {
+ MutexLock ml(lock_);
+ last_thread = createBarrier(ml);
+ }
+ if (last_thread) phase_condition_.notify_all();
+ return last_thread;
+ }
+
+ void removeThread() EXCLUDES(lock_) {
+ MutexLock ml(lock_);
+ --running_threads_;
+ if (entered_ != 0) phase_condition_.notify_all();
+ }
+
+ private:
+ Mutex lock_;
+ Condition phase_condition_;
+ int running_threads_;
+
+ // State for barrier management
+ int phase_number_ = 0;
+ int entered_ = 0; // Number of threads that have entered this barrier
+
+ // Enter the barrier and wait until all other threads have also
+ // entered the barrier. Returns iff this is the last thread to
+ // enter the barrier.
+ bool createBarrier(MutexLock& ml) REQUIRES(lock_) {
+ CHECK_LT(entered_, running_threads_);
+ entered_++;
+ if (entered_ < running_threads_) {
+ // Wait for all threads to enter
+ int phase_number_cp = phase_number_;
+ auto cb = [this, phase_number_cp]() {
+ return this->phase_number_ > phase_number_cp ||
+ entered_ == running_threads_; // A thread has aborted in error
+ };
+ phase_condition_.wait(ml.native_handle(), cb);
+ if (phase_number_ > phase_number_cp) return false;
+ // else (running_threads_ == entered_) and we are the last thread.
+ }
+ // Last thread has reached the barrier
+ phase_number_++;
+ entered_ = 0;
+ return true;
+ }
+};
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_MUTEX_H_
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/re.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/re.h
new file mode 100644
index 00000000..01e97365
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/re.h
@@ -0,0 +1,140 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef BENCHMARK_RE_H_
+#define BENCHMARK_RE_H_
+
+#include "internal_macros.h"
+
+// Prefer C regex libraries when compiling w/o exceptions so that we can
+// correctly report errors.
+#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && defined(HAVE_STD_REGEX) && \
+ (defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX))
+#undef HAVE_STD_REGEX
+#endif
+
+#if defined(HAVE_STD_REGEX)
+#include <regex>
+#elif defined(HAVE_GNU_POSIX_REGEX)
+#include <gnuregex.h>
+#elif defined(HAVE_POSIX_REGEX)
+#include <regex.h>
+#else
+#error No regular expression backend was found!
+#endif
+#include <string>
+
+#include "check.h"
+
+namespace benchmark {
+
+// A wrapper around the POSIX regular expression API that provides automatic
+// cleanup
+class Regex {
+ public:
+ Regex() : init_(false) {}
+
+ ~Regex();
+
+ // Compile a regular expression matcher from spec. Returns true on success.
+ //
+ // On failure (and if error is not nullptr), error is populated with a human
+ // readable error message if an error occurs.
+ bool Init(const std::string& spec, std::string* error);
+
+ // Returns whether str matches the compiled regular expression.
+ bool Match(const std::string& str);
+
+ private:
+ bool init_;
+// Underlying regular expression object
+#if defined(HAVE_STD_REGEX)
+ std::regex re_;
+#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX)
+ regex_t re_;
+#else
+#error No regular expression backend implementation available
+#endif
+};
+
+#if defined(HAVE_STD_REGEX)
+
+inline bool Regex::Init(const std::string& spec, std::string* error) {
+#ifdef BENCHMARK_HAS_NO_EXCEPTIONS
+ ((void)error); // suppress unused warning
+#else
+ try {
+#endif
+ re_ = std::regex(spec, std::regex_constants::extended);
+ init_ = true;
+#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
+ } catch (const std::regex_error& e) {
+ if (error) {
+ *error = e.what();
+ }
+ }
+#endif
+ return init_;
+}
+
+inline Regex::~Regex() {}
+
+inline bool Regex::Match(const std::string& str) {
+ if (!init_) {
+ return false;
+ }
+ return std::regex_search(str, re_);
+}
+
+#else
+inline bool Regex::Init(const std::string& spec, std::string* error) {
+ int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB);
+ if (ec != 0) {
+ if (error) {
+ size_t needed = regerror(ec, &re_, nullptr, 0);
+ char* errbuf = new char[needed];
+ regerror(ec, &re_, errbuf, needed);
+
+ // regerror returns the number of bytes necessary to null terminate
+ // the string, so we move that when assigning to error.
+ CHECK_NE(needed, 0);
+ error->assign(errbuf, needed - 1);
+
+ delete[] errbuf;
+ }
+
+ return false;
+ }
+
+ init_ = true;
+ return true;
+}
+
+inline Regex::~Regex() {
+ if (init_) {
+ regfree(&re_);
+ }
+}
+
+inline bool Regex::Match(const std::string& str) {
+ if (!init_) {
+ return false;
+ }
+ return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0;
+}
+#endif
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_RE_H_
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/reporter.cc b/MicroBenchmarks/libs/benchmark-1.2.0/src/reporter.cc
new file mode 100644
index 00000000..aacd4531
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/reporter.cc
@@ -0,0 +1,68 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+#include "timers.h"
+
+#include <cstdlib>
+
+#include <iostream>
+#include <tuple>
+#include <vector>
+
+#include "check.h"
+#include "stat.h"
+
+namespace benchmark {
+
+BenchmarkReporter::BenchmarkReporter()
+ : output_stream_(&std::cout), error_stream_(&std::cerr) {}
+
+BenchmarkReporter::~BenchmarkReporter() {}
+
+void BenchmarkReporter::PrintBasicContext(std::ostream *out,
+ Context const &context) {
+ CHECK(out) << "cannot be null";
+ auto &Out = *out;
+
+ Out << "Run on (" << context.num_cpus << " X " << context.mhz_per_cpu
+ << " MHz CPU " << ((context.num_cpus > 1) ? "s" : "") << ")\n";
+
+ Out << LocalDateTimeString() << "\n";
+
+ if (context.cpu_scaling_enabled) {
+ Out << "***WARNING*** CPU scaling is enabled, the benchmark "
+ "real time measurements may be noisy and will incur extra "
+ "overhead.\n";
+ }
+
+#ifndef NDEBUG
+ Out << "***WARNING*** Library was built as DEBUG. Timings may be "
+ "affected.\n";
+#endif
+}
+
+double BenchmarkReporter::Run::GetAdjustedRealTime() const {
+ double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
+ if (iterations != 0) new_time /= static_cast<double>(iterations);
+ return new_time;
+}
+
+double BenchmarkReporter::Run::GetAdjustedCPUTime() const {
+ double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit);
+ if (iterations != 0) new_time /= static_cast<double>(iterations);
+ return new_time;
+}
+
+} // end namespace benchmark
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.cc b/MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.cc
new file mode 100644
index 00000000..54aa04a4
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.cc
@@ -0,0 +1,51 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "sleep.h"
+
+#include <cerrno>
+#include <cstdlib>
+#include <ctime>
+
+#include "internal_macros.h"
+
+#ifdef BENCHMARK_OS_WINDOWS
+#include <Windows.h>
+#endif
+
+namespace benchmark {
+#ifdef BENCHMARK_OS_WINDOWS
+// Window's Sleep takes milliseconds argument.
+void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); }
+void SleepForSeconds(double seconds) {
+ SleepForMilliseconds(static_cast<int>(kNumMillisPerSecond * seconds));
+}
+#else // BENCHMARK_OS_WINDOWS
+void SleepForMicroseconds(int microseconds) {
+ struct timespec sleep_time;
+ sleep_time.tv_sec = microseconds / kNumMicrosPerSecond;
+ sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro;
+ while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR)
+ ; // Ignore signals and wait for the full interval to elapse.
+}
+
+void SleepForMilliseconds(int milliseconds) {
+ SleepForMicroseconds(milliseconds * kNumMicrosPerMilli);
+}
+
+void SleepForSeconds(double seconds) {
+ SleepForMicroseconds(static_cast<int>(seconds * kNumMicrosPerSecond));
+}
+#endif // BENCHMARK_OS_WINDOWS
+} // end namespace benchmark
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.h
new file mode 100644
index 00000000..f98551af
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.h
@@ -0,0 +1,15 @@
+#ifndef BENCHMARK_SLEEP_H_
+#define BENCHMARK_SLEEP_H_
+
+namespace benchmark {
+const int kNumMillisPerSecond = 1000;
+const int kNumMicrosPerMilli = 1000;
+const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000;
+const int kNumNanosPerMicro = 1000;
+const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond;
+
+void SleepForMilliseconds(int milliseconds);
+void SleepForSeconds(double seconds);
+} // end namespace benchmark
+
+#endif // BENCHMARK_SLEEP_H_
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/stat.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/stat.h
new file mode 100644
index 00000000..d356875b
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/stat.h
@@ -0,0 +1,310 @@
+#ifndef BENCHMARK_STAT_H_
+#define BENCHMARK_STAT_H_
+
+#include <cmath>
+#include <limits>
+#include <ostream>
+#include <type_traits>
+
+namespace benchmark {
+
+template <typename VType, typename NumType>
+class Stat1;
+
+template <typename VType, typename NumType>
+class Stat1MinMax;
+
+typedef Stat1<float, int64_t> Stat1_f;
+typedef Stat1<double, int64_t> Stat1_d;
+typedef Stat1MinMax<float, int64_t> Stat1MinMax_f;
+typedef Stat1MinMax<double, int64_t> Stat1MinMax_d;
+
+template <typename VType>
+class Vector2;
+template <typename VType>
+class Vector3;
+template <typename VType>
+class Vector4;
+
+template <typename VType, typename NumType>
+class Stat1 {
+ public:
+ typedef Stat1<VType, NumType> Self;
+
+ Stat1() { Clear(); }
+ // Create a sample of value dat and weight 1
+ explicit Stat1(const VType &dat) {
+ sum_ = dat;
+ sum_squares_ = Sqr(dat);
+ numsamples_ = 1;
+ }
+ // Create statistics for all the samples between begin (included)
+ // and end(excluded)
+ explicit Stat1(const VType *begin, const VType *end) {
+ Clear();
+ for (const VType *item = begin; item < end; ++item) {
+ (*this) += Stat1(*item);
+ }
+ }
+ // Create a sample of value dat and weight w
+ Stat1(const VType &dat, const NumType &w) {
+ sum_ = w * dat;
+ sum_squares_ = w * Sqr(dat);
+ numsamples_ = w;
+ }
+ // Copy operator
+ Stat1(const Self &stat) {
+ sum_ = stat.sum_;
+ sum_squares_ = stat.sum_squares_;
+ numsamples_ = stat.numsamples_;
+ }
+
+ void Clear() {
+ numsamples_ = NumType();
+ sum_squares_ = sum_ = VType();
+ }
+
+ Self &operator=(const Self &stat) {
+ sum_ = stat.sum_;
+ sum_squares_ = stat.sum_squares_;
+ numsamples_ = stat.numsamples_;
+ return (*this);
+ }
+ // Merge statistics from two sample sets.
+ Self &operator+=(const Self &stat) {
+ sum_ += stat.sum_;
+ sum_squares_ += stat.sum_squares_;
+ numsamples_ += stat.numsamples_;
+ return (*this);
+ }
+ // The operation opposite to +=
+ Self &operator-=(const Self &stat) {
+ sum_ -= stat.sum_;
+ sum_squares_ -= stat.sum_squares_;
+ numsamples_ -= stat.numsamples_;
+ return (*this);
+ }
+ // Multiply the weight of the set of samples by a factor k
+ Self &operator*=(const VType &k) {
+ sum_ *= k;
+ sum_squares_ *= k;
+ numsamples_ *= k;
+ return (*this);
+ }
+
+ // Merge statistics from two sample sets.
+ Self operator+(const Self &stat) const { return Self(*this) += stat; }
+
+ // The operation opposite to +
+ Self operator-(const Self &stat) const { return Self(*this) -= stat; }
+
+ // Multiply the weight of the set of samples by a factor k
+ Self operator*(const VType &k) const { return Self(*this) *= k; }
+
+ // Return the total weight of this sample set
+ NumType numSamples() const { return numsamples_; }
+
+ // Return the sum of this sample set
+ VType Sum() const { return sum_; }
+
+ // Return the mean of this sample set
+ VType Mean() const {
+ if (numsamples_ == 0) return VType();
+ return sum_ * (1.0 / numsamples_);
+ }
+
+ // Return the mean of this sample set and compute the standard deviation at
+ // the same time.
+ VType Mean(VType *stddev) const {
+ if (numsamples_ == 0) return VType();
+ VType mean = sum_ * (1.0 / numsamples_);
+ if (stddev) {
+ // Sample standard deviation is undefined for n = 1
+ if (numsamples_ == 1) {
+ *stddev = VType();
+ } else {
+ VType avg_squares = sum_squares_ * (1.0 / numsamples_);
+ *stddev = Sqrt(numsamples_ / (numsamples_ - 1.0) * (avg_squares - Sqr(mean)));
+ }
+ }
+ return mean;
+ }
+
+ // Return the standard deviation of the sample set
+ VType StdDev() const {
+ VType stddev = VType();
+ Mean(&stddev);
+ return stddev;
+ }
+
+ private:
+ static_assert(std::is_integral<NumType>::value &&
+ !std::is_same<NumType, bool>::value,
+ "NumType must be an integral type that is not bool.");
+ // Let i be the index of the samples provided (using +=)
+ // and weight[i],value[i] be the data of sample #i
+ // then the variables have the following meaning:
+ NumType numsamples_; // sum of weight[i];
+ VType sum_; // sum of weight[i]*value[i];
+ VType sum_squares_; // sum of weight[i]*value[i]^2;
+
+ // Template function used to square a number.
+ // For a vector we square all components
+ template <typename SType>
+ static inline SType Sqr(const SType &dat) {
+ return dat * dat;
+ }
+
+ template <typename SType>
+ static inline Vector2<SType> Sqr(const Vector2<SType> &dat) {
+ return dat.MulComponents(dat);
+ }
+
+ template <typename SType>
+ static inline Vector3<SType> Sqr(const Vector3<SType> &dat) {
+ return dat.MulComponents(dat);
+ }
+
+ template <typename SType>
+ static inline Vector4<SType> Sqr(const Vector4<SType> &dat) {
+ return dat.MulComponents(dat);
+ }
+
+ // Template function used to take the square root of a number.
+ // For a vector we square all components
+ template <typename SType>
+ static inline SType Sqrt(const SType &dat) {
+ // Avoid NaN due to imprecision in the calculations
+ if (dat < 0) return 0;
+ return sqrt(dat);
+ }
+
+ template <typename SType>
+ static inline Vector2<SType> Sqrt(const Vector2<SType> &dat) {
+ // Avoid NaN due to imprecision in the calculations
+ return Max(dat, Vector2<SType>()).Sqrt();
+ }
+
+ template <typename SType>
+ static inline Vector3<SType> Sqrt(const Vector3<SType> &dat) {
+ // Avoid NaN due to imprecision in the calculations
+ return Max(dat, Vector3<SType>()).Sqrt();
+ }
+
+ template <typename SType>
+ static inline Vector4<SType> Sqrt(const Vector4<SType> &dat) {
+ // Avoid NaN due to imprecision in the calculations
+ return Max(dat, Vector4<SType>()).Sqrt();
+ }
+};
+
+// Useful printing function
+template <typename VType, typename NumType>
+std::ostream &operator<<(std::ostream &out, const Stat1<VType, NumType> &s) {
+ out << "{ avg = " << s.Mean() << " std = " << s.StdDev()
+ << " nsamples = " << s.NumSamples() << "}";
+ return out;
+}
+
+// Stat1MinMax: same as Stat1, but it also
+// keeps the Min and Max values; the "-"
+// operator is disabled because it cannot be implemented
+// efficiently
+template <typename VType, typename NumType>
+class Stat1MinMax : public Stat1<VType, NumType> {
+ public:
+ typedef Stat1MinMax<VType, NumType> Self;
+
+ Stat1MinMax() { Clear(); }
+ // Create a sample of value dat and weight 1
+ explicit Stat1MinMax(const VType &dat) : Stat1<VType, NumType>(dat) {
+ max_ = dat;
+ min_ = dat;
+ }
+ // Create statistics for all the samples between begin (included)
+ // and end(excluded)
+ explicit Stat1MinMax(const VType *begin, const VType *end) {
+ Clear();
+ for (const VType *item = begin; item < end; ++item) {
+ (*this) += Stat1MinMax(*item);
+ }
+ }
+ // Create a sample of value dat and weight w
+ Stat1MinMax(const VType &dat, const NumType &w)
+ : Stat1<VType, NumType>(dat, w) {
+ max_ = dat;
+ min_ = dat;
+ }
+ // Copy operator
+ Stat1MinMax(const Self &stat) : Stat1<VType, NumType>(stat) {
+ max_ = stat.max_;
+ min_ = stat.min_;
+ }
+
+ void Clear() {
+ Stat1<VType, NumType>::Clear();
+ if (std::numeric_limits<VType>::has_infinity) {
+ min_ = std::numeric_limits<VType>::infinity();
+ max_ = -std::numeric_limits<VType>::infinity();
+ } else {
+ min_ = std::numeric_limits<VType>::max();
+ max_ = std::numeric_limits<VType>::min();
+ }
+ }
+
+ Self &operator=(const Self &stat) {
+ this->Stat1<VType, NumType>::operator=(stat);
+ max_ = stat.max_;
+ min_ = stat.min_;
+ return (*this);
+ }
+ // Merge statistics from two sample sets.
+ Self &operator+=(const Self &stat) {
+ this->Stat1<VType, NumType>::operator+=(stat);
+ if (stat.max_ > max_) max_ = stat.max_;
+ if (stat.min_ < min_) min_ = stat.min_;
+ return (*this);
+ }
+ // Multiply the weight of the set of samples by a factor k
+ Self &operator*=(const VType &stat) {
+ this->Stat1<VType, NumType>::operator*=(stat);
+ return (*this);
+ }
+ // Merge statistics from two sample sets.
+ Self operator+(const Self &stat) const { return Self(*this) += stat; }
+ // Multiply the weight of the set of samples by a factor k
+ Self operator*(const VType &k) const { return Self(*this) *= k; }
+
+ // Return the maximal value in this sample set
+ VType Max() const { return max_; }
+ // Return the minimal value in this sample set
+ VType Min() const { return min_; }
+
+ private:
+ // The - operation makes no sense with Min/Max
+ // unless we keep the full list of values (but we don't)
+ // make it private, and let it undefined so nobody can call it
+ Self &operator-=(const Self &stat); // senseless. let it undefined.
+
+ // The operation opposite to -
+ Self operator-(const Self &stat) const; // senseless. let it undefined.
+
+ // Let i be the index of the samples provided (using +=)
+ // and weight[i],value[i] be the data of sample #i
+ // then the variables have the following meaning:
+ VType max_; // max of value[i]
+ VType min_; // min of value[i]
+};
+
+// Useful printing function
+template <typename VType, typename NumType>
+std::ostream &operator<<(std::ostream &out,
+ const Stat1MinMax<VType, NumType> &s) {
+ out << "{ avg = " << s.Mean() << " std = " << s.StdDev()
+ << " nsamples = " << s.NumSamples() << " min = " << s.Min()
+ << " max = " << s.Max() << "}";
+ return out;
+}
+} // end namespace benchmark
+
+#endif // BENCHMARK_STAT_H_
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.cc b/MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.cc
new file mode 100644
index 00000000..cd4e7cfd
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.cc
@@ -0,0 +1,172 @@
+#include "string_util.h"
+
+#include <array>
+#include <cmath>
+#include <cstdarg>
+#include <cstdio>
+#include <memory>
+#include <sstream>
+
+#include "arraysize.h"
+
+namespace benchmark {
+namespace {
+
+// kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta.
+const char kBigSIUnits[] = "kMGTPEZY";
+// Kibi, Mebi, Gibi, Tebi, Pebi, Exbi, Zebi, Yobi.
+const char kBigIECUnits[] = "KMGTPEZY";
+// milli, micro, nano, pico, femto, atto, zepto, yocto.
+const char kSmallSIUnits[] = "munpfazy";
+
+// We require that all three arrays have the same size.
+static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits),
+ "SI and IEC unit arrays must be the same size");
+static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits),
+ "Small SI and Big SI unit arrays must be the same size");
+
+static const int64_t kUnitsSize = arraysize(kBigSIUnits);
+
+} // end anonymous namespace
+
+void ToExponentAndMantissa(double val, double thresh, int precision,
+ double one_k, std::string* mantissa,
+ int64_t* exponent) {
+ std::stringstream mantissa_stream;
+
+ if (val < 0) {
+ mantissa_stream << "-";
+ val = -val;
+ }
+
+ // Adjust threshold so that it never excludes things which can't be rendered
+ // in 'precision' digits.
+ const double adjusted_threshold =
+ std::max(thresh, 1.0 / std::pow(10.0, precision));
+ const double big_threshold = adjusted_threshold * one_k;
+ const double small_threshold = adjusted_threshold;
+ // Values in ]simple_threshold,small_threshold[ will be printed as-is
+ const double simple_threshold = 0.01;
+
+ if (val > big_threshold) {
+ // Positive powers
+ double scaled = val;
+ for (size_t i = 0; i < arraysize(kBigSIUnits); ++i) {
+ scaled /= one_k;
+ if (scaled <= big_threshold) {
+ mantissa_stream << scaled;
+ *exponent = i + 1;
+ *mantissa = mantissa_stream.str();
+ return;
+ }
+ }
+ mantissa_stream << val;
+ *exponent = 0;
+ } else if (val < small_threshold) {
+ // Negative powers
+ if (val < simple_threshold) {
+ double scaled = val;
+ for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i) {
+ scaled *= one_k;
+ if (scaled >= small_threshold) {
+ mantissa_stream << scaled;
+ *exponent = -static_cast<int64_t>(i + 1);
+ *mantissa = mantissa_stream.str();
+ return;
+ }
+ }
+ }
+ mantissa_stream << val;
+ *exponent = 0;
+ } else {
+ mantissa_stream << val;
+ *exponent = 0;
+ }
+ *mantissa = mantissa_stream.str();
+}
+
+std::string ExponentToPrefix(int64_t exponent, bool iec) {
+ if (exponent == 0) return "";
+
+ const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1);
+ if (index >= kUnitsSize) return "";
+
+ const char* array =
+ (exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits);
+ if (iec)
+ return array[index] + std::string("i");
+ else
+ return std::string(1, array[index]);
+}
+
+std::string ToBinaryStringFullySpecified(double value, double threshold,
+ int precision) {
+ std::string mantissa;
+ int64_t exponent;
+ ToExponentAndMantissa(value, threshold, precision, 1024.0, &mantissa,
+ &exponent);
+ return mantissa + ExponentToPrefix(exponent, false);
+}
+
+void AppendHumanReadable(int n, std::string* str) {
+ std::stringstream ss;
+ // Round down to the nearest SI prefix.
+ ss << ToBinaryStringFullySpecified(n, 1.0, 0);
+ *str += ss.str();
+}
+
+std::string HumanReadableNumber(double n) {
+ // 1.1 means that figures up to 1.1k should be shown with the next unit down;
+ // this softens edge effects.
+ // 1 means that we should show one decimal place of precision.
+ return ToBinaryStringFullySpecified(n, 1.1, 1);
+}
+
+std::string StringPrintFImp(const char* msg, va_list args) {
+ // we might need a second shot at this, so pre-emptivly make a copy
+ va_list args_cp;
+ va_copy(args_cp, args);
+
+ // TODO(ericwf): use std::array for first attempt to avoid one memory
+ // allocation guess what the size might be
+ std::array<char, 256> local_buff;
+ std::size_t size = local_buff.size();
+ // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
+ // in the android-ndk
+ auto ret = vsnprintf(local_buff.data(), size, msg, args_cp);
+
+ va_end(args_cp);
+
+ // handle empty expansion
+ if (ret == 0) return std::string{};
+ if (static_cast<std::size_t>(ret) < size)
+ return std::string(local_buff.data());
+
+ // we did not provide a long enough buffer on our first attempt.
+ // add 1 to size to account for null-byte in size cast to prevent overflow
+ size = static_cast<std::size_t>(ret) + 1;
+ auto buff_ptr = std::unique_ptr<char[]>(new char[size]);
+ // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
+ // in the android-ndk
+ ret = vsnprintf(buff_ptr.get(), size, msg, args);
+ return std::string(buff_ptr.get());
+}
+
+std::string StringPrintF(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ std::string tmp = StringPrintFImp(format, args);
+ va_end(args);
+ return tmp;
+}
+
+void ReplaceAll(std::string* str, const std::string& from,
+ const std::string& to) {
+ std::size_t start = 0;
+ while ((start = str->find(from, start)) != std::string::npos) {
+ str->replace(start, from.length(), to);
+ start += to.length();
+ }
+}
+
+} // end namespace benchmark
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.h
new file mode 100644
index 00000000..0b190b91
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.h
@@ -0,0 +1,40 @@
+#ifndef BENCHMARK_STRING_UTIL_H_
+#define BENCHMARK_STRING_UTIL_H_
+
+#include <sstream>
+#include <string>
+#include <utility>
+#include "internal_macros.h"
+
+namespace benchmark {
+
+void AppendHumanReadable(int n, std::string* str);
+
+std::string HumanReadableNumber(double n);
+
+std::string StringPrintF(const char* format, ...);
+
+inline std::ostream& StringCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
+ return out;
+}
+
+template <class First, class... Rest>
+inline std::ostream& StringCatImp(std::ostream& out, First&& f,
+ Rest&&... rest) {
+ out << std::forward<First>(f);
+ return StringCatImp(out, std::forward<Rest>(rest)...);
+}
+
+template <class... Args>
+inline std::string StrCat(Args&&... args) {
+ std::ostringstream ss;
+ StringCatImp(ss, std::forward<Args>(args)...);
+ return ss.str();
+}
+
+void ReplaceAll(std::string* str, const std::string& from,
+ const std::string& to);
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_STRING_UTIL_H_
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.cc b/MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.cc
new file mode 100644
index 00000000..7feb79e6
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.cc
@@ -0,0 +1,355 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "sysinfo.h"
+#include "internal_macros.h"
+
+#ifdef BENCHMARK_OS_WINDOWS
+#include <Shlwapi.h>
+#include <VersionHelpers.h>
+#include <Windows.h>
+#else
+#include <fcntl.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
+#include <unistd.h>
+#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX
+#include <sys/sysctl.h>
+#endif
+#endif
+
+#include <cerrno>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <iostream>
+#include <limits>
+#include <mutex>
+
+#include "arraysize.h"
+#include "check.h"
+#include "cycleclock.h"
+#include "internal_macros.h"
+#include "log.h"
+#include "sleep.h"
+#include "string_util.h"
+
+namespace benchmark {
+namespace {
+std::once_flag cpuinfo_init;
+double cpuinfo_cycles_per_second = 1.0;
+int cpuinfo_num_cpus = 1; // Conservative guess
+
+#if !defined BENCHMARK_OS_MACOSX
+const int64_t estimate_time_ms = 1000;
+
+// Helper function estimates cycles/sec by observing cycles elapsed during
+// sleep(). Using small sleep time decreases accuracy significantly.
+int64_t EstimateCyclesPerSecond() {
+ const int64_t start_ticks = cycleclock::Now();
+ SleepForMilliseconds(estimate_time_ms);
+ return cycleclock::Now() - start_ticks;
+}
+#endif
+
+#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
+// Helper function for reading an int from a file. Returns true if successful
+// and the memory location pointed to by value is set to the value read.
+bool ReadIntFromFile(const char* file, long* value) {
+ bool ret = false;
+ int fd = open(file, O_RDONLY);
+ if (fd != -1) {
+ char line[1024];
+ char* err;
+ memset(line, '\0', sizeof(line));
+ ssize_t read_err = read(fd, line, sizeof(line) - 1);
+ ((void)read_err); // prevent unused warning
+ CHECK(read_err >= 0);
+ const long temp_value = strtol(line, &err, 10);
+ if (line[0] != '\0' && (*err == '\n' || *err == '\0')) {
+ *value = temp_value;
+ ret = true;
+ }
+ close(fd);
+ }
+ return ret;
+}
+#endif
+
+#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
+static std::string convertToLowerCase(std::string s) {
+ for (auto& ch : s)
+ ch = std::tolower(ch);
+ return s;
+}
+static bool startsWithKey(std::string Value, std::string Key,
+ bool IgnoreCase = true) {
+ if (IgnoreCase) {
+ Key = convertToLowerCase(std::move(Key));
+ Value = convertToLowerCase(std::move(Value));
+ }
+ return Value.compare(0, Key.size(), Key) == 0;
+}
+#endif
+
+void InitializeSystemInfo() {
+#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
+ char line[1024];
+ char* err;
+ long freq;
+
+ bool saw_mhz = false;
+
+ // If the kernel is exporting the tsc frequency use that. There are issues
+ // where cpuinfo_max_freq cannot be relied on because the BIOS may be
+ // exporintg an invalid p-state (on x86) or p-states may be used to put the
+ // processor in a new mode (turbo mode). Essentially, those frequencies
+ // cannot always be relied upon. The same reasons apply to /proc/cpuinfo as
+ // well.
+ if (!saw_mhz &&
+ ReadIntFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) {
+ // The value is in kHz (as the file name suggests). For example, on a
+ // 2GHz warpstation, the file contains the value "2000000".
+ cpuinfo_cycles_per_second = freq * 1000.0;
+ saw_mhz = true;
+ }
+
+ // If CPU scaling is in effect, we want to use the *maximum* frequency,
+ // not whatever CPU speed some random processor happens to be using now.
+ if (!saw_mhz &&
+ ReadIntFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
+ &freq)) {
+ // The value is in kHz. For example, on a 2GHz warpstation, the file
+ // contains the value "2000000".
+ cpuinfo_cycles_per_second = freq * 1000.0;
+ saw_mhz = true;
+ }
+
+ // Read /proc/cpuinfo for other values, and if there is no cpuinfo_max_freq.
+ const char* pname = "/proc/cpuinfo";
+ int fd = open(pname, O_RDONLY);
+ if (fd == -1) {
+ perror(pname);
+ if (!saw_mhz) {
+ cpuinfo_cycles_per_second =
+ static_cast<double>(EstimateCyclesPerSecond());
+ }
+ return;
+ }
+
+ double bogo_clock = 1.0;
+ bool saw_bogo = false;
+ long max_cpu_id = 0;
+ int num_cpus = 0;
+ line[0] = line[1] = '\0';
+ size_t chars_read = 0;
+ do { // we'll exit when the last read didn't read anything
+ // Move the next line to the beginning of the buffer
+ const size_t oldlinelen = strlen(line);
+ if (sizeof(line) == oldlinelen + 1) // oldlinelen took up entire line
+ line[0] = '\0';
+ else // still other lines left to save
+ memmove(line, line + oldlinelen + 1, sizeof(line) - (oldlinelen + 1));
+ // Terminate the new line, reading more if we can't find the newline
+ char* newline = strchr(line, '\n');
+ if (newline == nullptr) {
+ const size_t linelen = strlen(line);
+ const size_t bytes_to_read = sizeof(line) - 1 - linelen;
+ CHECK(bytes_to_read > 0); // because the memmove recovered >=1 bytes
+ chars_read = read(fd, line + linelen, bytes_to_read);
+ line[linelen + chars_read] = '\0';
+ newline = strchr(line, '\n');
+ }
+ if (newline != nullptr) *newline = '\0';
+
+ // When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only
+ // accept postive values. Some environments (virtual machines) report zero,
+ // which would cause infinite looping in WallTime_Init.
+ if (!saw_mhz && startsWithKey(line, "cpu MHz")) {
+ const char* freqstr = strchr(line, ':');
+ if (freqstr) {
+ cpuinfo_cycles_per_second = strtod(freqstr + 1, &err) * 1000000.0;
+ if (freqstr[1] != '\0' && *err == '\0' && cpuinfo_cycles_per_second > 0)
+ saw_mhz = true;
+ }
+ } else if (startsWithKey(line, "bogomips")) {
+ const char* freqstr = strchr(line, ':');
+ if (freqstr) {
+ bogo_clock = strtod(freqstr + 1, &err) * 1000000.0;
+ if (freqstr[1] != '\0' && *err == '\0' && bogo_clock > 0)
+ saw_bogo = true;
+ }
+ } else if (startsWithKey(line, "processor", /*IgnoreCase*/false)) {
+ // The above comparison is case-sensitive because ARM kernels often
+ // include a "Processor" line that tells you about the CPU, distinct
+ // from the usual "processor" lines that give you CPU ids. No current
+ // Linux architecture is using "Processor" for CPU ids.
+ num_cpus++; // count up every time we see an "processor :" entry
+ const char* id_str = strchr(line, ':');
+ if (id_str) {
+ const long cpu_id = strtol(id_str + 1, &err, 10);
+ if (id_str[1] != '\0' && *err == '\0' && max_cpu_id < cpu_id)
+ max_cpu_id = cpu_id;
+ }
+ }
+ } while (chars_read > 0);
+ close(fd);
+
+ if (!saw_mhz) {
+ if (saw_bogo) {
+ // If we didn't find anything better, we'll use bogomips, but
+ // we're not happy about it.
+ cpuinfo_cycles_per_second = bogo_clock;
+ } else {
+ // If we don't even have bogomips, we'll use the slow estimation.
+ cpuinfo_cycles_per_second =
+ static_cast<double>(EstimateCyclesPerSecond());
+ }
+ }
+ if (num_cpus == 0) {
+ fprintf(stderr, "Failed to read num. CPUs correctly from /proc/cpuinfo\n");
+ } else {
+ if ((max_cpu_id + 1) != num_cpus) {
+ fprintf(stderr,
+ "CPU ID assignments in /proc/cpuinfo seem messed up."
+ " This is usually caused by a bad BIOS.\n");
+ }
+ cpuinfo_num_cpus = num_cpus;
+ }
+
+#elif defined BENCHMARK_OS_FREEBSD
+// For this sysctl to work, the machine must be configured without
+// SMP, APIC, or APM support. hz should be 64-bit in freebsd 7.0
+// and later. Before that, it's a 32-bit quantity (and gives the
+// wrong answer on machines faster than 2^32 Hz). See
+// http://lists.freebsd.org/pipermail/freebsd-i386/2004-November/001846.html
+// But also compare FreeBSD 7.0:
+// http://fxr.watson.org/fxr/source/i386/i386/tsc.c?v=RELENG70#L223
+// 231 error = sysctl_handle_quad(oidp, &freq, 0, req);
+// To FreeBSD 6.3 (it's the same in 6-STABLE):
+// http://fxr.watson.org/fxr/source/i386/i386/tsc.c?v=RELENG6#L131
+// 139 error = sysctl_handle_int(oidp, &freq, sizeof(freq), req);
+#if __FreeBSD__ >= 7
+ uint64_t hz = 0;
+#else
+ unsigned int hz = 0;
+#endif
+ size_t sz = sizeof(hz);
+ const char* sysctl_path = "machdep.tsc_freq";
+ if (sysctlbyname(sysctl_path, &hz, &sz, nullptr, 0) != 0) {
+ fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n",
+ sysctl_path, strerror(errno));
+ cpuinfo_cycles_per_second = static_cast<double>(EstimateCyclesPerSecond());
+ } else {
+ cpuinfo_cycles_per_second = hz;
+ }
+// TODO: also figure out cpuinfo_num_cpus
+
+#elif defined BENCHMARK_OS_WINDOWS
+ // In NT, read MHz from the registry. If we fail to do so or we're in win9x
+ // then make a crude estimate.
+ DWORD data, data_size = sizeof(data);
+ if (IsWindowsXPOrGreater() &&
+ SUCCEEDED(
+ SHGetValueA(HKEY_LOCAL_MACHINE,
+ "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
+ "~MHz", nullptr, &data, &data_size)))
+ cpuinfo_cycles_per_second =
+ static_cast<double>((int64_t)data * (int64_t)(1000 * 1000)); // was mhz
+ else
+ cpuinfo_cycles_per_second = static_cast<double>(EstimateCyclesPerSecond());
+
+ SYSTEM_INFO sysinfo;
+ // Use memset as opposed to = {} to avoid GCC missing initializer false
+ // positives.
+ std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO));
+ GetSystemInfo(&sysinfo);
+ cpuinfo_num_cpus = sysinfo.dwNumberOfProcessors; // number of logical
+ // processors in the current
+ // group
+
+#elif defined BENCHMARK_OS_MACOSX
+ int32_t num_cpus = 0;
+ size_t size = sizeof(num_cpus);
+ if (::sysctlbyname("hw.ncpu", &num_cpus, &size, nullptr, 0) == 0 &&
+ (size == sizeof(num_cpus))) {
+ cpuinfo_num_cpus = num_cpus;
+ } else {
+ fprintf(stderr, "%s\n", strerror(errno));
+ std::exit(EXIT_FAILURE);
+ }
+ int64_t cpu_freq = 0;
+ size = sizeof(cpu_freq);
+ if (::sysctlbyname("hw.cpufrequency", &cpu_freq, &size, nullptr, 0) == 0 &&
+ (size == sizeof(cpu_freq))) {
+ cpuinfo_cycles_per_second = cpu_freq;
+ } else {
+ #if defined BENCHMARK_OS_IOS
+ fprintf(stderr, "CPU frequency cannot be detected. \n");
+ cpuinfo_cycles_per_second = 0;
+ #else
+ fprintf(stderr, "%s\n", strerror(errno));
+ std::exit(EXIT_FAILURE);
+ #endif
+ }
+#else
+ // Generic cycles per second counter
+ cpuinfo_cycles_per_second = static_cast<double>(EstimateCyclesPerSecond());
+#endif
+}
+
+} // end namespace
+
+double CyclesPerSecond(void) {
+ std::call_once(cpuinfo_init, InitializeSystemInfo);
+ return cpuinfo_cycles_per_second;
+}
+
+int NumCPUs(void) {
+ std::call_once(cpuinfo_init, InitializeSystemInfo);
+ return cpuinfo_num_cpus;
+}
+
+// The ""'s catch people who don't pass in a literal for "str"
+#define strliterallen(str) (sizeof("" str "") - 1)
+
+// Must use a string literal for prefix.
+#define memprefix(str, len, prefix) \
+ ((((len) >= strliterallen(prefix)) && \
+ std::memcmp(str, prefix, strliterallen(prefix)) == 0) \
+ ? str + strliterallen(prefix) \
+ : nullptr)
+
+bool CpuScalingEnabled() {
+#ifndef BENCHMARK_OS_WINDOWS
+ // On Linux, the CPUfreq subsystem exposes CPU information as files on the
+ // local file system. If reading the exported files fails, then we may not be
+ // running on Linux, so we silently ignore all the read errors.
+ for (int cpu = 0, num_cpus = NumCPUs(); cpu < num_cpus; ++cpu) {
+ std::string governor_file =
+ StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor");
+ FILE* file = fopen(governor_file.c_str(), "r");
+ if (!file) break;
+ char buff[16];
+ size_t bytes_read = fread(buff, 1, sizeof(buff), file);
+ fclose(file);
+ if (memprefix(buff, bytes_read, "performance") == nullptr) return true;
+ }
+#endif
+ return false;
+}
+
+} // end namespace benchmark
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.h
new file mode 100644
index 00000000..c5d9916d
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.h
@@ -0,0 +1,10 @@
+#ifndef BENCHMARK_SYSINFO_H_
+#define BENCHMARK_SYSINFO_H_
+
+namespace benchmark {
+int NumCPUs();
+double CyclesPerSecond();
+bool CpuScalingEnabled();
+} // end namespace benchmark
+
+#endif // BENCHMARK_SYSINFO_H_
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/timers.cc b/MicroBenchmarks/libs/benchmark-1.2.0/src/timers.cc
new file mode 100644
index 00000000..817272d0
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/timers.cc
@@ -0,0 +1,212 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "timers.h"
+#include "internal_macros.h"
+
+#ifdef BENCHMARK_OS_WINDOWS
+#include <Shlwapi.h>
+#include <VersionHelpers.h>
+#include <Windows.h>
+#else
+#include <fcntl.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
+#include <unistd.h>
+#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX
+#include <sys/sysctl.h>
+#endif
+#if defined(BENCHMARK_OS_MACOSX)
+#include <mach/mach_init.h>
+#include <mach/mach_port.h>
+#include <mach/thread_act.h>
+#endif
+#endif
+
+#ifdef BENCHMARK_OS_EMSCRIPTEN
+#include <emscripten.h>
+#endif
+
+#include <cerrno>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <ctime>
+#include <iostream>
+#include <limits>
+#include <mutex>
+
+#include "check.h"
+#include "log.h"
+#include "sleep.h"
+#include "string_util.h"
+
+namespace benchmark {
+
+// Suppress unused warnings on helper functions.
+#if defined(__GNUC__)
+#pragma GCC diagnostic ignored "-Wunused-function"
+#endif
+
+namespace {
+#if defined(BENCHMARK_OS_WINDOWS)
+double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) {
+ ULARGE_INTEGER kernel;
+ ULARGE_INTEGER user;
+ kernel.HighPart = kernel_time.dwHighDateTime;
+ kernel.LowPart = kernel_time.dwLowDateTime;
+ user.HighPart = user_time.dwHighDateTime;
+ user.LowPart = user_time.dwLowDateTime;
+ return (static_cast<double>(kernel.QuadPart) +
+ static_cast<double>(user.QuadPart)) *
+ 1e-7;
+}
+#else
+double MakeTime(struct rusage const& ru) {
+ return (static_cast<double>(ru.ru_utime.tv_sec) +
+ static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
+ static_cast<double>(ru.ru_stime.tv_sec) +
+ static_cast<double>(ru.ru_stime.tv_usec) * 1e-6);
+}
+#endif
+#if defined(BENCHMARK_OS_MACOSX)
+double MakeTime(thread_basic_info_data_t const& info) {
+ return (static_cast<double>(info.user_time.seconds) +
+ static_cast<double>(info.user_time.microseconds) * 1e-6 +
+ static_cast<double>(info.system_time.seconds) +
+ static_cast<double>(info.system_time.microseconds) * 1e-6);
+}
+#endif
+#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID)
+double MakeTime(struct timespec const& ts) {
+ return ts.tv_sec + (static_cast<double>(ts.tv_nsec) * 1e-9);
+}
+#endif
+
+BENCHMARK_NORETURN static void DiagnoseAndExit(const char* msg) {
+ std::cerr << "ERROR: " << msg << std::endl;
+ std::exit(EXIT_FAILURE);
+}
+
+} // end namespace
+
+double ProcessCPUUsage() {
+#if defined(BENCHMARK_OS_WINDOWS)
+ HANDLE proc = GetCurrentProcess();
+ FILETIME creation_time;
+ FILETIME exit_time;
+ FILETIME kernel_time;
+ FILETIME user_time;
+ if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time,
+ &user_time))
+ return MakeTime(kernel_time, user_time);
+ DiagnoseAndExit("GetProccessTimes() failed");
+#elif defined(BENCHMARK_OS_EMSCRIPTEN)
+ // clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten.
+ // Use Emscripten-specific API. Reported CPU time would be exactly the
+ // same as total time, but this is ok because there aren't long-latency
+ // syncronous system calls in Emscripten.
+ return emscripten_get_now() * 1e-3;
+#elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX)
+ // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
+ // https://github.com/google/benchmark/pull/292
+ struct timespec spec;
+ if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0)
+ return MakeTime(spec);
+ DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed");
+#else
+ struct rusage ru;
+ if (getrusage(RUSAGE_SELF, &ru) == 0) return MakeTime(ru);
+ DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed");
+#endif
+}
+
+double ThreadCPUUsage() {
+#if defined(BENCHMARK_OS_WINDOWS)
+ HANDLE this_thread = GetCurrentThread();
+ FILETIME creation_time;
+ FILETIME exit_time;
+ FILETIME kernel_time;
+ FILETIME user_time;
+ GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time,
+ &user_time);
+ return MakeTime(kernel_time, user_time);
+#elif defined(BENCHMARK_OS_MACOSX)
+ // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
+ // https://github.com/google/benchmark/pull/292
+ mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
+ thread_basic_info_data_t info;
+ mach_port_t thread = pthread_mach_thread_np(pthread_self());
+ if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) ==
+ KERN_SUCCESS) {
+ return MakeTime(info);
+ }
+ DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info");
+#elif defined(BENCHMARK_OS_EMSCRIPTEN)
+ // Emscripten doesn't support traditional threads
+ return ProcessCPUUsage();
+#elif defined(BENCHMARK_OS_RTEMS)
+ // RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See
+ // https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c
+ return ProcessCPUUsage();
+#elif defined(CLOCK_THREAD_CPUTIME_ID)
+ struct timespec ts;
+ if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts);
+ DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed");
+#else
+#error Per-thread timing is not available on your system.
+#endif
+}
+
+namespace {
+
+std::string DateTimeString(bool local) {
+ typedef std::chrono::system_clock Clock;
+ std::time_t now = Clock::to_time_t(Clock::now());
+ const std::size_t kStorageSize = 128;
+ char storage[kStorageSize];
+ std::size_t written;
+
+ if (local) {
+#if defined(BENCHMARK_OS_WINDOWS)
+ written =
+ std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
+#else
+ std::tm timeinfo;
+ std::memset(&timeinfo, 0, sizeof(std::tm));
+ ::localtime_r(&now, &timeinfo);
+ written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
+#endif
+ } else {
+#if defined(BENCHMARK_OS_WINDOWS)
+ written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now));
+#else
+ std::tm timeinfo;
+ std::memset(&timeinfo, 0, sizeof(std::tm));
+ ::gmtime_r(&now, &timeinfo);
+ written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
+#endif
+ }
+ CHECK(written < kStorageSize);
+ ((void)written); // prevent unused variable in optimized mode.
+ return std::string(storage);
+}
+
+} // end namespace
+
+std::string LocalDateTimeString() { return DateTimeString(true); }
+
+} // end namespace benchmark
diff --git a/MicroBenchmarks/libs/benchmark-1.2.0/src/timers.h b/MicroBenchmarks/libs/benchmark-1.2.0/src/timers.h
new file mode 100644
index 00000000..65606ccd
--- /dev/null
+++ b/MicroBenchmarks/libs/benchmark-1.2.0/src/timers.h
@@ -0,0 +1,48 @@
+#ifndef BENCHMARK_TIMERS_H
+#define BENCHMARK_TIMERS_H
+
+#include <chrono>
+#include <string>
+
+namespace benchmark {
+
+// Return the CPU usage of the current process
+double ProcessCPUUsage();
+
+// Return the CPU usage of the children of the current process
+double ChildrenCPUUsage();
+
+// Return the CPU usage of the current thread
+double ThreadCPUUsage();
+
+#if defined(HAVE_STEADY_CLOCK)
+template <bool HighResIsSteady = std::chrono::high_resolution_clock::is_steady>
+struct ChooseSteadyClock {
+ typedef std::chrono::high_resolution_clock type;
+};
+
+template <>
+struct ChooseSteadyClock<false> {
+ typedef std::chrono::steady_clock type;
+};
+#endif
+
+struct ChooseClockType {
+#if defined(HAVE_STEADY_CLOCK)
+ typedef ChooseSteadyClock<>::type type;
+#else
+ typedef std::chrono::high_resolution_clock type;
+#endif
+};
+
+inline double ChronoClockNow() {
+ typedef ChooseClockType::type ClockType;
+ using FpSeconds = std::chrono::duration<double, std::chrono::seconds::period>;
+ return FpSeconds(ClockType::now().time_since_epoch()).count();
+}
+
+std::string LocalDateTimeString();
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_TIMERS_H