mirror of
https://github.com/vgough/encfs.git
synced 2024-11-21 15:33:16 +01:00
Update google vendors (#589)
This commit is contained in:
parent
5203bdb474
commit
4a95d700e6
11
vendor/github.com/google/benchmark/.gitignore
generated
vendored
11
vendor/github.com/google/benchmark/.gitignore
generated
vendored
@ -6,6 +6,7 @@
|
|||||||
*.dylib
|
*.dylib
|
||||||
*.cmake
|
*.cmake
|
||||||
!/cmake/*.cmake
|
!/cmake/*.cmake
|
||||||
|
!/test/AssemblyTests.cmake
|
||||||
*~
|
*~
|
||||||
*.pyc
|
*.pyc
|
||||||
__pycache__
|
__pycache__
|
||||||
@ -41,6 +42,16 @@ build.ninja
|
|||||||
install_manifest.txt
|
install_manifest.txt
|
||||||
rules.ninja
|
rules.ninja
|
||||||
|
|
||||||
|
# bazel output symlinks.
|
||||||
|
bazel-*
|
||||||
|
|
||||||
# out-of-source build top-level folders.
|
# out-of-source build top-level folders.
|
||||||
build/
|
build/
|
||||||
_build/
|
_build/
|
||||||
|
|
||||||
|
# in-source dependencies
|
||||||
|
/googletest/
|
||||||
|
|
||||||
|
# Visual Studio 2015/2017 cache/options directory
|
||||||
|
.vs/
|
||||||
|
CMakeSettings.json
|
||||||
|
61
vendor/github.com/google/benchmark/.travis.yml
generated
vendored
61
vendor/github.com/google/benchmark/.travis.yml
generated
vendored
@ -31,14 +31,10 @@ matrix:
|
|||||||
- g++-multilib
|
- g++-multilib
|
||||||
env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release BUILD_32_BITS=ON
|
env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release BUILD_32_BITS=ON
|
||||||
- compiler: gcc
|
- compiler: gcc
|
||||||
addons:
|
|
||||||
apt:
|
|
||||||
sources:
|
|
||||||
- ubuntu-toolchain-r-test
|
|
||||||
packages:
|
|
||||||
- g++-6
|
|
||||||
env:
|
env:
|
||||||
|
- INSTALL_GCC6_FROM_PPA=1
|
||||||
- COMPILER=g++-6 C_COMPILER=gcc-6 BUILD_TYPE=Debug
|
- COMPILER=g++-6 C_COMPILER=gcc-6 BUILD_TYPE=Debug
|
||||||
|
- ENABLE_SANITIZER=1
|
||||||
- EXTRA_FLAGS="-fno-omit-frame-pointer -g -O2 -fsanitize=undefined,address -fuse-ld=gold"
|
- EXTRA_FLAGS="-fno-omit-frame-pointer -g -O2 -fsanitize=undefined,address -fuse-ld=gold"
|
||||||
- compiler: clang
|
- compiler: clang
|
||||||
env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Debug
|
env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Debug
|
||||||
@ -96,6 +92,7 @@ matrix:
|
|||||||
env:
|
env:
|
||||||
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
|
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
|
||||||
- LIBCXX_BUILD=1 LIBCXX_SANITIZER="Undefined;Address"
|
- LIBCXX_BUILD=1 LIBCXX_SANITIZER="Undefined;Address"
|
||||||
|
- ENABLE_SANITIZER=1
|
||||||
- EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=undefined,address -fno-sanitize-recover=all"
|
- EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=undefined,address -fno-sanitize-recover=all"
|
||||||
- UBSAN_OPTIONS=print_stacktrace=1
|
- UBSAN_OPTIONS=print_stacktrace=1
|
||||||
# Clang w/ libc++ and MSAN
|
# Clang w/ libc++ and MSAN
|
||||||
@ -107,6 +104,7 @@ matrix:
|
|||||||
env:
|
env:
|
||||||
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
|
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
|
||||||
- LIBCXX_BUILD=1 LIBCXX_SANITIZER=MemoryWithOrigins
|
- LIBCXX_BUILD=1 LIBCXX_SANITIZER=MemoryWithOrigins
|
||||||
|
- ENABLE_SANITIZER=1
|
||||||
- EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins"
|
- EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins"
|
||||||
# Clang w/ libc++ and MSAN
|
# Clang w/ libc++ and MSAN
|
||||||
- compiler: clang
|
- compiler: clang
|
||||||
@ -117,8 +115,8 @@ matrix:
|
|||||||
env:
|
env:
|
||||||
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=RelWithDebInfo
|
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=RelWithDebInfo
|
||||||
- LIBCXX_BUILD=1 LIBCXX_SANITIZER=Thread
|
- LIBCXX_BUILD=1 LIBCXX_SANITIZER=Thread
|
||||||
|
- ENABLE_SANITIZER=1
|
||||||
- EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all"
|
- EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all"
|
||||||
|
|
||||||
- os: osx
|
- os: osx
|
||||||
osx_image: xcode8.3
|
osx_image: xcode8.3
|
||||||
compiler: clang
|
compiler: clang
|
||||||
@ -129,27 +127,66 @@ matrix:
|
|||||||
compiler: clang
|
compiler: clang
|
||||||
env:
|
env:
|
||||||
- COMPILER=clang++ BUILD_TYPE=Release
|
- COMPILER=clang++ BUILD_TYPE=Release
|
||||||
|
- os: osx
|
||||||
|
osx_image: xcode8.3
|
||||||
|
compiler: gcc
|
||||||
|
env:
|
||||||
|
- COMPILER=g++-7 C_COMPILER=gcc-7 BUILD_TYPE=Debug
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- if [ -z "$BUILD_32_BITS" ]; then
|
|
||||||
export BUILD_32_BITS=OFF && echo disabling 32 bit build;
|
|
||||||
fi
|
|
||||||
- if [ -n "${LIBCXX_BUILD}" ]; then
|
- if [ -n "${LIBCXX_BUILD}" ]; then
|
||||||
source .travis-libcxx-setup.sh;
|
source .travis-libcxx-setup.sh;
|
||||||
fi
|
fi
|
||||||
- mkdir build && cd build
|
- if [ -n "${ENABLE_SANITIZER}" ]; then
|
||||||
|
export EXTRA_OPTIONS="-DBENCHMARK_ENABLE_ASSEMBLY_TESTS=OFF";
|
||||||
|
else
|
||||||
|
export EXTRA_OPTIONS="";
|
||||||
|
fi
|
||||||
|
- mkdir -p build && cd build
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- if [ -z "$BUILD_32_BITS" ]; then
|
||||||
|
export BUILD_32_BITS=OFF && echo disabling 32 bit build;
|
||||||
|
fi
|
||||||
|
- if [ -n "${INSTALL_GCC6_FROM_PPA}" ]; then
|
||||||
|
sudo add-apt-repository -y "ppa:ubuntu-toolchain-r/test";
|
||||||
|
sudo apt-get update --option Acquire::Retries=100 --option Acquire::http::Timeout="60";
|
||||||
|
fi
|
||||||
|
|
||||||
install:
|
install:
|
||||||
|
- if [ -n "${INSTALL_GCC6_FROM_PPA}" ]; then
|
||||||
|
sudo -E apt-get -yq --no-install-suggests --no-install-recommends install g++-6;
|
||||||
|
fi
|
||||||
|
- if [ "${TRAVIS_OS_NAME}" == "linux" -a "${BUILD_32_BITS}" == "OFF" ]; then
|
||||||
|
sudo -E apt-get -y --no-install-suggests --no-install-recommends install llvm-3.9-tools;
|
||||||
|
sudo cp /usr/lib/llvm-3.9/bin/FileCheck /usr/local/bin/;
|
||||||
|
fi
|
||||||
- if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then
|
- if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then
|
||||||
PATH=~/.local/bin:${PATH};
|
PATH=~/.local/bin:${PATH};
|
||||||
pip install --user --upgrade pip;
|
pip install --user --upgrade pip;
|
||||||
pip install --user cpp-coveralls;
|
pip install --user cpp-coveralls;
|
||||||
fi
|
fi
|
||||||
|
- if [ "${C_COMPILER}" == "gcc-7" -a "${TRAVIS_OS_NAME}" == "osx" ]; then
|
||||||
|
rm -f /usr/local/include/c++;
|
||||||
|
brew update;
|
||||||
|
brew install gcc@7;
|
||||||
|
fi
|
||||||
|
- if [ "${TRAVIS_OS_NAME}" == "linux" ]; then
|
||||||
|
sudo apt-get update -qq;
|
||||||
|
sudo apt-get install -qq unzip;
|
||||||
|
wget https://github.com/bazelbuild/bazel/releases/download/0.10.1/bazel-0.10.1-installer-linux-x86_64.sh --output-document bazel-installer.sh;
|
||||||
|
sudo bash bazel-installer.sh;
|
||||||
|
fi
|
||||||
|
- if [ "${TRAVIS_OS_NAME}" == "osx" ]; then
|
||||||
|
curl -L -o bazel-installer.sh https://github.com/bazelbuild/bazel/releases/download/0.10.1/bazel-0.10.1-installer-darwin-x86_64.sh;
|
||||||
|
sudo bash bazel-installer.sh;
|
||||||
|
fi
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_CXX_FLAGS="${EXTRA_FLAGS}" -DBENCHMARK_BUILD_32_BITS=${BUILD_32_BITS} ..
|
- cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_CXX_FLAGS="${EXTRA_FLAGS}" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON -DBENCHMARK_BUILD_32_BITS=${BUILD_32_BITS} ${EXTRA_OPTIONS} ..
|
||||||
- make
|
- make
|
||||||
- ctest -C ${BUILD_TYPE} --output-on-failure
|
- ctest -C ${BUILD_TYPE} --output-on-failure
|
||||||
|
- bazel test -c dbg --define google_benchmark.have_regex=posix --announce_rc --verbose_failures --test_output=errors --keep_going //test/...
|
||||||
|
|
||||||
after_success:
|
after_success:
|
||||||
- if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then
|
- if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then
|
||||||
|
2
vendor/github.com/google/benchmark/.ycm_extra_conf.py
generated
vendored
2
vendor/github.com/google/benchmark/.ycm_extra_conf.py
generated
vendored
@ -7,7 +7,7 @@ import ycm_core
|
|||||||
flags = [
|
flags = [
|
||||||
'-Wall',
|
'-Wall',
|
||||||
'-Werror',
|
'-Werror',
|
||||||
'-pendantic-errors',
|
'-pedantic-errors',
|
||||||
'-std=c++0x',
|
'-std=c++0x',
|
||||||
'-fno-strict-aliasing',
|
'-fno-strict-aliasing',
|
||||||
'-O3',
|
'-O3',
|
||||||
|
14
vendor/github.com/google/benchmark/AUTHORS
generated
vendored
14
vendor/github.com/google/benchmark/AUTHORS
generated
vendored
@ -10,9 +10,12 @@
|
|||||||
|
|
||||||
Albert Pretorius <pretoalb@gmail.com>
|
Albert Pretorius <pretoalb@gmail.com>
|
||||||
Arne Beer <arne@twobeer.de>
|
Arne Beer <arne@twobeer.de>
|
||||||
|
Carto
|
||||||
Christopher Seymour <chris.j.seymour@hotmail.com>
|
Christopher Seymour <chris.j.seymour@hotmail.com>
|
||||||
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
|
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
|
||||||
Dominic Hamon <dma@stripysock.com>
|
Deniz Evrenci <denizevrenci@gmail.com>
|
||||||
|
Dirac Research
|
||||||
|
Dominik Czarnota <dominik.b.czarnota@gmail.com>
|
||||||
Eric Fiselier <eric@efcs.ca>
|
Eric Fiselier <eric@efcs.ca>
|
||||||
Eugene Zhuk <eugene.zhuk@gmail.com>
|
Eugene Zhuk <eugene.zhuk@gmail.com>
|
||||||
Evgeny Safronov <division494@gmail.com>
|
Evgeny Safronov <division494@gmail.com>
|
||||||
@ -21,20 +24,23 @@ Google Inc.
|
|||||||
International Business Machines Corporation
|
International Business Machines Corporation
|
||||||
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
|
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
|
||||||
Jern-Kuan Leong <jernkuan@gmail.com>
|
Jern-Kuan Leong <jernkuan@gmail.com>
|
||||||
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
|
|
||||||
JianXiong Zhou <zhoujianxiong2@gmail.com>
|
JianXiong Zhou <zhoujianxiong2@gmail.com>
|
||||||
|
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
|
||||||
Jussi Knuuttila <jussi.knuuttila@gmail.com>
|
Jussi Knuuttila <jussi.knuuttila@gmail.com>
|
||||||
Kaito Udagawa <umireon@gmail.com>
|
Kaito Udagawa <umireon@gmail.com>
|
||||||
|
Kishan Kumar <kumar.kishan@outlook.com>
|
||||||
Lei Xu <eddyxu@gmail.com>
|
Lei Xu <eddyxu@gmail.com>
|
||||||
Matt Clarkson <mattyclarkson@gmail.com>
|
Matt Clarkson <mattyclarkson@gmail.com>
|
||||||
Maxim Vafin <maxvafin@gmail.com>
|
Maxim Vafin <maxvafin@gmail.com>
|
||||||
|
MongoDB Inc.
|
||||||
Nick Hutchinson <nshutchinson@gmail.com>
|
Nick Hutchinson <nshutchinson@gmail.com>
|
||||||
Oleksandr Sochka <sasha.sochka@gmail.com>
|
Oleksandr Sochka <sasha.sochka@gmail.com>
|
||||||
Paul Redmond <paul.redmond@gmail.com>
|
Paul Redmond <paul.redmond@gmail.com>
|
||||||
Radoslav Yovchev <radoslav.tm@gmail.com>
|
Radoslav Yovchev <radoslav.tm@gmail.com>
|
||||||
|
Roman Lebedev <lebedev.ri@gmail.com>
|
||||||
Shuo Chen <chenshuo@chenshuo.com>
|
Shuo Chen <chenshuo@chenshuo.com>
|
||||||
|
Steinar H. Gunderson <sgunderson@bigfoot.com>
|
||||||
|
Stripe, Inc.
|
||||||
Yixuan Qiu <yixuanq@gmail.com>
|
Yixuan Qiu <yixuanq@gmail.com>
|
||||||
Yusuke Suzuki <utatane.tea@gmail.com>
|
Yusuke Suzuki <utatane.tea@gmail.com>
|
||||||
Dirac Research
|
|
||||||
Zbigniew Skowron <zbychs@gmail.com>
|
Zbigniew Skowron <zbychs@gmail.com>
|
||||||
Dominik Czarnota <dominik.b.czarnota@gmail.com>
|
|
||||||
|
42
vendor/github.com/google/benchmark/BUILD.bazel
generated
vendored
Normal file
42
vendor/github.com/google/benchmark/BUILD.bazel
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
licenses(["notice"])
|
||||||
|
|
||||||
|
config_setting(
|
||||||
|
name = "windows",
|
||||||
|
values = {
|
||||||
|
"cpu": "x64_windows",
|
||||||
|
},
|
||||||
|
visibility = [":__subpackages__"],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "benchmark",
|
||||||
|
srcs = glob(
|
||||||
|
[
|
||||||
|
"src/*.cc",
|
||||||
|
"src/*.h",
|
||||||
|
],
|
||||||
|
exclude = ["src/benchmark_main.cc"],
|
||||||
|
),
|
||||||
|
hdrs = ["include/benchmark/benchmark.h"],
|
||||||
|
linkopts = select({
|
||||||
|
":windows": ["-DEFAULTLIB:shlwapi.lib"],
|
||||||
|
"//conditions:default": ["-pthread"],
|
||||||
|
}),
|
||||||
|
strip_include_prefix = "include",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "benchmark_main",
|
||||||
|
srcs = ["src/benchmark_main.cc"],
|
||||||
|
hdrs = ["include/benchmark/benchmark.h"],
|
||||||
|
strip_include_prefix = "include",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [":benchmark"],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "benchmark_internal_headers",
|
||||||
|
hdrs = glob(["src/*.h"]),
|
||||||
|
visibility = ["//test:__pkg__"],
|
||||||
|
)
|
85
vendor/github.com/google/benchmark/CMakeLists.txt
generated
vendored
85
vendor/github.com/google/benchmark/CMakeLists.txt
generated
vendored
@ -5,6 +5,7 @@ project (benchmark)
|
|||||||
foreach(p
|
foreach(p
|
||||||
CMP0054 # CMake 3.1
|
CMP0054 # CMake 3.1
|
||||||
CMP0056 # export EXE_LINKER_FLAGS to try_run
|
CMP0056 # export EXE_LINKER_FLAGS to try_run
|
||||||
|
CMP0057 # Support no if() IN_LIST operator
|
||||||
)
|
)
|
||||||
if(POLICY ${p})
|
if(POLICY ${p})
|
||||||
cmake_policy(SET ${p} NEW)
|
cmake_policy(SET ${p} NEW)
|
||||||
@ -15,11 +16,59 @@ option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON)
|
|||||||
option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON)
|
option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON)
|
||||||
option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF)
|
option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF)
|
||||||
option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF)
|
option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF)
|
||||||
option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library" OFF)
|
option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF)
|
||||||
|
option(BENCHMARK_ENABLE_INSTALL "Enable installation of benchmark. (Projects embedding benchmark may want to turn this OFF.)" ON)
|
||||||
|
|
||||||
|
# Allow unmet dependencies to be met using CMake's ExternalProject mechanics, which
|
||||||
|
# may require downloading the source code.
|
||||||
|
option(BENCHMARK_DOWNLOAD_DEPENDENCIES "Allow the downloading and in-tree building of unmet dependencies" OFF)
|
||||||
|
|
||||||
|
# This option can be used to disable building and running unit tests which depend on gtest
|
||||||
|
# in cases where it is not possible to build or find a valid version of gtest.
|
||||||
|
option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which depend on gtest" ON)
|
||||||
|
|
||||||
|
set(ENABLE_ASSEMBLY_TESTS_DEFAULT OFF)
|
||||||
|
function(should_enable_assembly_tests)
|
||||||
|
if(CMAKE_BUILD_TYPE)
|
||||||
|
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
|
||||||
|
if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage")
|
||||||
|
# FIXME: The --coverage flag needs to be removed when building assembly
|
||||||
|
# tests for this to work.
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
if (MSVC)
|
||||||
|
return()
|
||||||
|
elseif(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
|
||||||
|
return()
|
||||||
|
elseif(NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
|
||||||
|
# FIXME: Make these work on 32 bit builds
|
||||||
|
return()
|
||||||
|
elseif(BENCHMARK_BUILD_32_BITS)
|
||||||
|
# FIXME: Make these work on 32 bit builds
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
find_program(LLVM_FILECHECK_EXE FileCheck)
|
||||||
|
if (LLVM_FILECHECK_EXE)
|
||||||
|
set(LLVM_FILECHECK_EXE "${LLVM_FILECHECK_EXE}" CACHE PATH "llvm filecheck" FORCE)
|
||||||
|
message(STATUS "LLVM FileCheck Found: ${LLVM_FILECHECK_EXE}")
|
||||||
|
else()
|
||||||
|
message(STATUS "Failed to find LLVM FileCheck")
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
set(ENABLE_ASSEMBLY_TESTS_DEFAULT ON PARENT_SCOPE)
|
||||||
|
endfunction()
|
||||||
|
should_enable_assembly_tests()
|
||||||
|
|
||||||
|
# This option disables the building and running of the assembly verification tests
|
||||||
|
option(BENCHMARK_ENABLE_ASSEMBLY_TESTS "Enable building and running the assembly tests"
|
||||||
|
${ENABLE_ASSEMBLY_TESTS_DEFAULT})
|
||||||
|
|
||||||
# Make sure we can import out CMake functions
|
# Make sure we can import out CMake functions
|
||||||
|
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules")
|
||||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
|
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
|
||||||
|
|
||||||
|
|
||||||
# Read the git tags to determine the project version
|
# Read the git tags to determine the project version
|
||||||
include(GetGitVersion)
|
include(GetGitVersion)
|
||||||
get_git_version(GIT_VERSION)
|
get_git_version(GIT_VERSION)
|
||||||
@ -95,9 +144,7 @@ else()
|
|||||||
if (NOT BENCHMARK_ENABLE_EXCEPTIONS)
|
if (NOT BENCHMARK_ENABLE_EXCEPTIONS)
|
||||||
add_cxx_compiler_flag(-fno-exceptions)
|
add_cxx_compiler_flag(-fno-exceptions)
|
||||||
endif()
|
endif()
|
||||||
if (NOT BENCHMARK_USE_LIBCXX)
|
|
||||||
add_cxx_compiler_flag(-Wzero-as-null-pointer-constant)
|
|
||||||
endif()
|
|
||||||
if (HAVE_CXX_FLAG_FSTRICT_ALIASING)
|
if (HAVE_CXX_FLAG_FSTRICT_ALIASING)
|
||||||
if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "Intel") #ICC17u2: Many false positives for Wstrict-aliasing
|
if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "Intel") #ICC17u2: Many false positives for Wstrict-aliasing
|
||||||
add_cxx_compiler_flag(-Wstrict-aliasing)
|
add_cxx_compiler_flag(-Wstrict-aliasing)
|
||||||
@ -131,28 +178,27 @@ else()
|
|||||||
if (GCC_RANLIB)
|
if (GCC_RANLIB)
|
||||||
set(CMAKE_RANLIB ${GCC_RANLIB})
|
set(CMAKE_RANLIB ${GCC_RANLIB})
|
||||||
endif()
|
endif()
|
||||||
|
elseif("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang")
|
||||||
|
include(llvm-toolchain)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Coverage build type
|
# Coverage build type
|
||||||
set(CMAKE_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG}" CACHE STRING
|
set(BENCHMARK_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG}"
|
||||||
"Flags used by the C++ compiler during coverage builds."
|
CACHE STRING "Flags used by the C++ compiler during coverage builds."
|
||||||
FORCE)
|
FORCE)
|
||||||
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE
|
set(BENCHMARK_EXE_LINKER_FLAGS_COVERAGE "${CMAKE_EXE_LINKER_FLAGS_DEBUG}"
|
||||||
"${CMAKE_EXE_LINKER_FLAGS_DEBUG}" CACHE STRING
|
CACHE STRING "Flags used for linking binaries during coverage builds."
|
||||||
"Flags used for linking binaries during coverage builds."
|
|
||||||
FORCE)
|
FORCE)
|
||||||
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE
|
set(BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}"
|
||||||
"${CMAKE_SHARED_LINKER_FLAGS_DEBUG}" CACHE STRING
|
CACHE STRING "Flags used by the shared libraries linker during coverage builds."
|
||||||
"Flags used by the shared libraries linker during coverage builds."
|
|
||||||
FORCE)
|
FORCE)
|
||||||
mark_as_advanced(
|
mark_as_advanced(
|
||||||
CMAKE_CXX_FLAGS_COVERAGE
|
BENCHMARK_CXX_FLAGS_COVERAGE
|
||||||
CMAKE_EXE_LINKER_FLAGS_COVERAGE
|
BENCHMARK_EXE_LINKER_FLAGS_COVERAGE
|
||||||
CMAKE_SHARED_LINKER_FLAGS_COVERAGE)
|
BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE)
|
||||||
set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING
|
set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING
|
||||||
"Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage."
|
"Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage.")
|
||||||
FORCE)
|
|
||||||
add_cxx_compiler_flag(--coverage COVERAGE)
|
add_cxx_compiler_flag(--coverage COVERAGE)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -170,7 +216,7 @@ if (BENCHMARK_USE_LIBCXX)
|
|||||||
# linker flags appear before all linker inputs and -lc++ must appear after.
|
# linker flags appear before all linker inputs and -lc++ must appear after.
|
||||||
list(APPEND BENCHMARK_CXX_LIBRARIES c++)
|
list(APPEND BENCHMARK_CXX_LIBRARIES c++)
|
||||||
else()
|
else()
|
||||||
message(FATAL "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler")
|
message(FATAL_ERROR "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler")
|
||||||
endif()
|
endif()
|
||||||
endif(BENCHMARK_USE_LIBCXX)
|
endif(BENCHMARK_USE_LIBCXX)
|
||||||
|
|
||||||
@ -198,5 +244,8 @@ add_subdirectory(src)
|
|||||||
|
|
||||||
if (BENCHMARK_ENABLE_TESTING)
|
if (BENCHMARK_ENABLE_TESTING)
|
||||||
enable_testing()
|
enable_testing()
|
||||||
|
if (BENCHMARK_ENABLE_GTEST_TESTS)
|
||||||
|
include(HandleGTest)
|
||||||
|
endif()
|
||||||
add_subdirectory(test)
|
add_subdirectory(test)
|
||||||
endif()
|
endif()
|
||||||
|
16
vendor/github.com/google/benchmark/CONTRIBUTORS
generated
vendored
16
vendor/github.com/google/benchmark/CONTRIBUTORS
generated
vendored
@ -28,18 +28,22 @@ Billy Robert O'Neal III <billy.oneal@gmail.com> <bion@microsoft.com>
|
|||||||
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
|
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
|
||||||
Christopher Seymour <chris.j.seymour@hotmail.com>
|
Christopher Seymour <chris.j.seymour@hotmail.com>
|
||||||
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
|
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
|
||||||
Dominic Hamon <dma@stripysock.com>
|
Deniz Evrenci <denizevrenci@gmail.com>
|
||||||
|
Dominic Hamon <dma@stripysock.com> <dominic@google.com>
|
||||||
|
Dominik Czarnota <dominik.b.czarnota@gmail.com>
|
||||||
Eric Fiselier <eric@efcs.ca>
|
Eric Fiselier <eric@efcs.ca>
|
||||||
Eugene Zhuk <eugene.zhuk@gmail.com>
|
Eugene Zhuk <eugene.zhuk@gmail.com>
|
||||||
Evgeny Safronov <division494@gmail.com>
|
Evgeny Safronov <division494@gmail.com>
|
||||||
Felix Homann <linuxaudio@showlabor.de>
|
Felix Homann <linuxaudio@showlabor.de>
|
||||||
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
|
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
|
||||||
Jern-Kuan Leong <jernkuan@gmail.com>
|
Jern-Kuan Leong <jernkuan@gmail.com>
|
||||||
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
|
|
||||||
JianXiong Zhou <zhoujianxiong2@gmail.com>
|
JianXiong Zhou <zhoujianxiong2@gmail.com>
|
||||||
|
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
|
||||||
|
John Millikin <jmillikin@stripe.com>
|
||||||
Jussi Knuuttila <jussi.knuuttila@gmail.com>
|
Jussi Knuuttila <jussi.knuuttila@gmail.com>
|
||||||
Kaito Udagawa <umireon@gmail.com>
|
|
||||||
Kai Wolf <kai.wolf@gmail.com>
|
Kai Wolf <kai.wolf@gmail.com>
|
||||||
|
Kishan Kumar <kumar.kishan@outlook.com>
|
||||||
|
Kaito Udagawa <umireon@gmail.com>
|
||||||
Lei Xu <eddyxu@gmail.com>
|
Lei Xu <eddyxu@gmail.com>
|
||||||
Matt Clarkson <mattyclarkson@gmail.com>
|
Matt Clarkson <mattyclarkson@gmail.com>
|
||||||
Maxim Vafin <maxvafin@gmail.com>
|
Maxim Vafin <maxvafin@gmail.com>
|
||||||
@ -49,11 +53,13 @@ Pascal Leroy <phl@google.com>
|
|||||||
Paul Redmond <paul.redmond@gmail.com>
|
Paul Redmond <paul.redmond@gmail.com>
|
||||||
Pierre Phaneuf <pphaneuf@google.com>
|
Pierre Phaneuf <pphaneuf@google.com>
|
||||||
Radoslav Yovchev <radoslav.tm@gmail.com>
|
Radoslav Yovchev <radoslav.tm@gmail.com>
|
||||||
|
Raul Marin <rmrodriguez@cartodb.com>
|
||||||
Ray Glover <ray.glover@uk.ibm.com>
|
Ray Glover <ray.glover@uk.ibm.com>
|
||||||
|
Robert Guo <robert.guo@mongodb.com>
|
||||||
|
Roman Lebedev <lebedev.ri@gmail.com>
|
||||||
Shuo Chen <chenshuo@chenshuo.com>
|
Shuo Chen <chenshuo@chenshuo.com>
|
||||||
|
Tobias Ulvgård <tobias.ulvgard@dirac.se>
|
||||||
Tom Madams <tom.ej.madams@gmail.com> <tmadams@google.com>
|
Tom Madams <tom.ej.madams@gmail.com> <tmadams@google.com>
|
||||||
Yixuan Qiu <yixuanq@gmail.com>
|
Yixuan Qiu <yixuanq@gmail.com>
|
||||||
Yusuke Suzuki <utatane.tea@gmail.com>
|
Yusuke Suzuki <utatane.tea@gmail.com>
|
||||||
Tobias Ulvgård <tobias.ulvgard@dirac.se>
|
|
||||||
Zbigniew Skowron <zbychs@gmail.com>
|
Zbigniew Skowron <zbychs@gmail.com>
|
||||||
Dominik Czarnota <dominik.b.czarnota@gmail.com>
|
|
||||||
|
314
vendor/github.com/google/benchmark/README.md
generated
vendored
314
vendor/github.com/google/benchmark/README.md
generated
vendored
@ -2,6 +2,7 @@
|
|||||||
[![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark)
|
[![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark)
|
||||||
[![Build status](https://ci.appveyor.com/api/projects/status/u0qsyp7t1tk7cpxs/branch/master?svg=true)](https://ci.appveyor.com/project/google/benchmark/branch/master)
|
[![Build status](https://ci.appveyor.com/api/projects/status/u0qsyp7t1tk7cpxs/branch/master?svg=true)](https://ci.appveyor.com/project/google/benchmark/branch/master)
|
||||||
[![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark)
|
[![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark)
|
||||||
|
[![slackin](https://slackin-iqtfqnpzxd.now.sh/badge.svg)](https://slackin-iqtfqnpzxd.now.sh/)
|
||||||
|
|
||||||
A library to support the benchmarking of functions, similar to unit-tests.
|
A library to support the benchmarking of functions, similar to unit-tests.
|
||||||
|
|
||||||
@ -13,13 +14,94 @@ IRC channel: https://freenode.net #googlebenchmark
|
|||||||
|
|
||||||
[Additional Tooling Documentation](docs/tools.md)
|
[Additional Tooling Documentation](docs/tools.md)
|
||||||
|
|
||||||
|
[Assembly Testing Documentation](docs/AssemblyTests.md)
|
||||||
|
|
||||||
|
|
||||||
|
## Building
|
||||||
|
|
||||||
|
The basic steps for configuring and building the library look like this:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ git clone https://github.com/google/benchmark.git
|
||||||
|
# Benchmark requires Google Test as a dependency. Add the source tree as a subdirectory.
|
||||||
|
$ git clone https://github.com/google/googletest.git benchmark/googletest
|
||||||
|
$ mkdir build && cd build
|
||||||
|
$ cmake -G <generator> [options] ../benchmark
|
||||||
|
# Assuming a makefile generator was used
|
||||||
|
$ make
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that Google Benchmark requires Google Test to build and run the tests. This
|
||||||
|
dependency can be provided two ways:
|
||||||
|
|
||||||
|
* Checkout the Google Test sources into `benchmark/googletest` as above.
|
||||||
|
* Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during
|
||||||
|
configuration, the library will automatically download and build any required
|
||||||
|
dependencies.
|
||||||
|
|
||||||
|
If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF`
|
||||||
|
to `CMAKE_ARGS`.
|
||||||
|
|
||||||
|
|
||||||
|
## Installation Guide
|
||||||
|
|
||||||
|
For Ubuntu and Debian Based System
|
||||||
|
|
||||||
|
First make sure you have git and cmake installed (If not please install it)
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo apt-get install git
|
||||||
|
sudo apt-get install cmake
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, let's clone the repository and build it
|
||||||
|
|
||||||
|
```
|
||||||
|
git clone https://github.com/google/benchmark.git
|
||||||
|
cd benchmark
|
||||||
|
git clone https://github.com/google/googletest.git
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake .. -DCMAKE_BUILD_TYPE=RELEASE
|
||||||
|
make
|
||||||
|
```
|
||||||
|
|
||||||
|
We need to install the library globally now
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo make install
|
||||||
|
```
|
||||||
|
|
||||||
|
Now you have google/benchmark installed in your machine
|
||||||
|
Note: Don't forget to link to pthread library while building
|
||||||
|
|
||||||
|
## Stable and Experimental Library Versions
|
||||||
|
|
||||||
|
The main branch contains the latest stable version of the benchmarking library;
|
||||||
|
the API of which can be considered largely stable, with source breaking changes
|
||||||
|
being made only upon the release of a new major version.
|
||||||
|
|
||||||
|
Newer, experimental, features are implemented and tested on the
|
||||||
|
[`v2` branch](https://github.com/google/benchmark/tree/v2). Users who wish
|
||||||
|
to use, test, and provide feedback on the new features are encouraged to try
|
||||||
|
this branch. However, this branch provides no stability guarantees and reserves
|
||||||
|
the right to change and break the API at any time.
|
||||||
|
|
||||||
|
##Prerequisite knowledge
|
||||||
|
|
||||||
|
Before attempting to understand this framework one should ideally have some familiarity with the structure and format of the Google Test framework, upon which it is based. Documentation for Google Test, including a "Getting Started" (primer) guide, is available here:
|
||||||
|
https://github.com/google/googletest/blob/master/googletest/docs/Documentation.md
|
||||||
|
|
||||||
|
|
||||||
## Example usage
|
## Example usage
|
||||||
### Basic usage
|
### Basic usage
|
||||||
Define a function that executes the code to be measured.
|
Define a function that executes the code to be measured.
|
||||||
|
|
||||||
```c++
|
```c++
|
||||||
|
#include <benchmark/benchmark.h>
|
||||||
|
|
||||||
static void BM_StringCreation(benchmark::State& state) {
|
static void BM_StringCreation(benchmark::State& state) {
|
||||||
while (state.KeepRunning())
|
for (auto _ : state)
|
||||||
std::string empty_string;
|
std::string empty_string;
|
||||||
}
|
}
|
||||||
// Register the function as a benchmark
|
// Register the function as a benchmark
|
||||||
@ -28,7 +110,7 @@ BENCHMARK(BM_StringCreation);
|
|||||||
// Define another benchmark
|
// Define another benchmark
|
||||||
static void BM_StringCopy(benchmark::State& state) {
|
static void BM_StringCopy(benchmark::State& state) {
|
||||||
std::string x = "hello";
|
std::string x = "hello";
|
||||||
while (state.KeepRunning())
|
for (auto _ : state)
|
||||||
std::string copy(x);
|
std::string copy(x);
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_StringCopy);
|
BENCHMARK(BM_StringCopy);
|
||||||
@ -36,6 +118,13 @@ BENCHMARK(BM_StringCopy);
|
|||||||
BENCHMARK_MAIN();
|
BENCHMARK_MAIN();
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Don't forget to inform your linker to add benchmark library e.g. through
|
||||||
|
`-lbenchmark` compilation flag. Alternatively, you may leave out the
|
||||||
|
`BENCHMARK_MAIN();` at the end of the source file and link against
|
||||||
|
`-lbenchmark_main` to get the same default behavior.
|
||||||
|
|
||||||
|
The benchmark library will reporting the timing for the code within the `for(...)` loop.
|
||||||
|
|
||||||
### Passing arguments
|
### Passing arguments
|
||||||
Sometimes a family of benchmarks can be implemented with just one routine that
|
Sometimes a family of benchmarks can be implemented with just one routine that
|
||||||
takes an extra argument to specify which one of the family of benchmarks to
|
takes an extra argument to specify which one of the family of benchmarks to
|
||||||
@ -47,7 +136,7 @@ static void BM_memcpy(benchmark::State& state) {
|
|||||||
char* src = new char[state.range(0)];
|
char* src = new char[state.range(0)];
|
||||||
char* dst = new char[state.range(0)];
|
char* dst = new char[state.range(0)];
|
||||||
memset(src, 'x', state.range(0));
|
memset(src, 'x', state.range(0));
|
||||||
while (state.KeepRunning())
|
for (auto _ : state)
|
||||||
memcpy(dst, src, state.range(0));
|
memcpy(dst, src, state.range(0));
|
||||||
state.SetBytesProcessed(int64_t(state.iterations()) *
|
state.SetBytesProcessed(int64_t(state.iterations()) *
|
||||||
int64_t(state.range(0)));
|
int64_t(state.range(0)));
|
||||||
@ -80,22 +169,23 @@ insertion.
|
|||||||
|
|
||||||
```c++
|
```c++
|
||||||
static void BM_SetInsert(benchmark::State& state) {
|
static void BM_SetInsert(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
std::set<int> data;
|
||||||
|
for (auto _ : state) {
|
||||||
state.PauseTiming();
|
state.PauseTiming();
|
||||||
std::set<int> data = ConstructRandomSet(state.range(0));
|
data = ConstructRandomSet(state.range(0));
|
||||||
state.ResumeTiming();
|
state.ResumeTiming();
|
||||||
for (int j = 0; j < state.range(1); ++j)
|
for (int j = 0; j < state.range(1); ++j)
|
||||||
data.insert(RandomNumber());
|
data.insert(RandomNumber());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_SetInsert)
|
BENCHMARK(BM_SetInsert)
|
||||||
->Args({1<<10, 1})
|
->Args({1<<10, 128})
|
||||||
->Args({1<<10, 8})
|
->Args({2<<10, 128})
|
||||||
->Args({1<<10, 64})
|
->Args({4<<10, 128})
|
||||||
|
->Args({8<<10, 128})
|
||||||
->Args({1<<10, 512})
|
->Args({1<<10, 512})
|
||||||
->Args({8<<10, 1})
|
->Args({2<<10, 512})
|
||||||
->Args({8<<10, 8})
|
->Args({4<<10, 512})
|
||||||
->Args({8<<10, 64})
|
|
||||||
->Args({8<<10, 512});
|
->Args({8<<10, 512});
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -105,7 +195,7 @@ product of the two specified ranges and will generate a benchmark for each such
|
|||||||
pair.
|
pair.
|
||||||
|
|
||||||
```c++
|
```c++
|
||||||
BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {1, 512}});
|
BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}});
|
||||||
```
|
```
|
||||||
|
|
||||||
For more complex patterns of inputs, passing a custom function to `Apply` allows
|
For more complex patterns of inputs, passing a custom function to `Apply` allows
|
||||||
@ -131,7 +221,7 @@ running time and the normalized root-mean square error of string comparison.
|
|||||||
static void BM_StringCompare(benchmark::State& state) {
|
static void BM_StringCompare(benchmark::State& state) {
|
||||||
std::string s1(state.range(0), '-');
|
std::string s1(state.range(0), '-');
|
||||||
std::string s2(state.range(0), '-');
|
std::string s2(state.range(0), '-');
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
benchmark::DoNotOptimize(s1.compare(s2));
|
benchmark::DoNotOptimize(s1.compare(s2));
|
||||||
}
|
}
|
||||||
state.SetComplexityN(state.range(0));
|
state.SetComplexityN(state.range(0));
|
||||||
@ -165,7 +255,7 @@ absence of multiprogramming.
|
|||||||
template <class Q> int BM_Sequential(benchmark::State& state) {
|
template <class Q> int BM_Sequential(benchmark::State& state) {
|
||||||
Q q;
|
Q q;
|
||||||
typename Q::value_type v;
|
typename Q::value_type v;
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
for (int i = state.range(0); i--; )
|
for (int i = state.range(0); i--; )
|
||||||
q.push(v);
|
q.push(v);
|
||||||
for (int e = state.range(0); e--; )
|
for (int e = state.range(0); e--; )
|
||||||
@ -181,7 +271,7 @@ BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue<int>)->Range(1<<0, 1<<10);
|
|||||||
Three macros are provided for adding benchmark templates.
|
Three macros are provided for adding benchmark templates.
|
||||||
|
|
||||||
```c++
|
```c++
|
||||||
#if __cplusplus >= 201103L // C++11 and greater.
|
#ifdef BENCHMARK_HAS_CXX11
|
||||||
#define BENCHMARK_TEMPLATE(func, ...) // Takes any number of parameters.
|
#define BENCHMARK_TEMPLATE(func, ...) // Takes any number of parameters.
|
||||||
#else // C++ < C++11
|
#else // C++ < C++11
|
||||||
#define BENCHMARK_TEMPLATE(func, arg1)
|
#define BENCHMARK_TEMPLATE(func, arg1)
|
||||||
@ -190,6 +280,62 @@ Three macros are provided for adding benchmark templates.
|
|||||||
#define BENCHMARK_TEMPLATE2(func, arg1, arg2)
|
#define BENCHMARK_TEMPLATE2(func, arg1, arg2)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### A Faster KeepRunning loop
|
||||||
|
|
||||||
|
In C++11 mode, a ranged-based for loop should be used in preference to
|
||||||
|
the `KeepRunning` loop for running the benchmarks. For example:
|
||||||
|
|
||||||
|
```c++
|
||||||
|
static void BM_Fast(benchmark::State &state) {
|
||||||
|
for (auto _ : state) {
|
||||||
|
FastOperation();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BENCHMARK(BM_Fast);
|
||||||
|
```
|
||||||
|
|
||||||
|
The reason the ranged-for loop is faster than using `KeepRunning`, is
|
||||||
|
because `KeepRunning` requires a memory load and store of the iteration count
|
||||||
|
ever iteration, whereas the ranged-for variant is able to keep the iteration count
|
||||||
|
in a register.
|
||||||
|
|
||||||
|
For example, an empty inner loop of using the ranged-based for method looks like:
|
||||||
|
|
||||||
|
```asm
|
||||||
|
# Loop Init
|
||||||
|
mov rbx, qword ptr [r14 + 104]
|
||||||
|
call benchmark::State::StartKeepRunning()
|
||||||
|
test rbx, rbx
|
||||||
|
je .LoopEnd
|
||||||
|
.LoopHeader: # =>This Inner Loop Header: Depth=1
|
||||||
|
add rbx, -1
|
||||||
|
jne .LoopHeader
|
||||||
|
.LoopEnd:
|
||||||
|
```
|
||||||
|
|
||||||
|
Compared to an empty `KeepRunning` loop, which looks like:
|
||||||
|
|
||||||
|
```asm
|
||||||
|
.LoopHeader: # in Loop: Header=BB0_3 Depth=1
|
||||||
|
cmp byte ptr [rbx], 1
|
||||||
|
jne .LoopInit
|
||||||
|
.LoopBody: # =>This Inner Loop Header: Depth=1
|
||||||
|
mov rax, qword ptr [rbx + 8]
|
||||||
|
lea rcx, [rax + 1]
|
||||||
|
mov qword ptr [rbx + 8], rcx
|
||||||
|
cmp rax, qword ptr [rbx + 104]
|
||||||
|
jb .LoopHeader
|
||||||
|
jmp .LoopEnd
|
||||||
|
.LoopInit:
|
||||||
|
mov rdi, rbx
|
||||||
|
call benchmark::State::StartKeepRunning()
|
||||||
|
jmp .LoopBody
|
||||||
|
.LoopEnd:
|
||||||
|
```
|
||||||
|
|
||||||
|
Unless C++03 compatibility is required, the ranged-for variant of writing
|
||||||
|
the benchmark loop should be preferred.
|
||||||
|
|
||||||
## Passing arbitrary arguments to a benchmark
|
## Passing arbitrary arguments to a benchmark
|
||||||
In C++11 it is possible to define a benchmark that takes an arbitrary number
|
In C++11 it is possible to define a benchmark that takes an arbitrary number
|
||||||
of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)`
|
of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)`
|
||||||
@ -199,11 +345,11 @@ The `test_case_name` is appended to the name of the benchmark and
|
|||||||
should describe the values passed.
|
should describe the values passed.
|
||||||
|
|
||||||
```c++
|
```c++
|
||||||
template <class ...ExtraArgs>`
|
template <class ...ExtraArgs>
|
||||||
void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) {
|
void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) {
|
||||||
[...]
|
[...]
|
||||||
}
|
}
|
||||||
// Registers a benchmark named "BM_takes_args/int_string_test` that passes
|
// Registers a benchmark named "BM_takes_args/int_string_test" that passes
|
||||||
// the specified values to `extra_args`.
|
// the specified values to `extra_args`.
|
||||||
BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc"));
|
BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc"));
|
||||||
```
|
```
|
||||||
@ -223,8 +369,7 @@ scope, the `RegisterBenchmark` can be called anywhere. This allows for
|
|||||||
benchmark tests to be registered programmatically.
|
benchmark tests to be registered programmatically.
|
||||||
|
|
||||||
Additionally `RegisterBenchmark` allows any callable object to be registered
|
Additionally `RegisterBenchmark` allows any callable object to be registered
|
||||||
as a benchmark. Including capturing lambdas and function objects. This
|
as a benchmark. Including capturing lambdas and function objects.
|
||||||
allows the creation
|
|
||||||
|
|
||||||
For Example:
|
For Example:
|
||||||
```c++
|
```c++
|
||||||
@ -240,9 +385,10 @@ int main(int argc, char** argv) {
|
|||||||
|
|
||||||
### Multithreaded benchmarks
|
### Multithreaded benchmarks
|
||||||
In a multithreaded test (benchmark invoked by multiple threads simultaneously),
|
In a multithreaded test (benchmark invoked by multiple threads simultaneously),
|
||||||
it is guaranteed that none of the threads will start until all have called
|
it is guaranteed that none of the threads will start until all have reached
|
||||||
`KeepRunning`, and all will have finished before KeepRunning returns false. As
|
the start of the benchmark loop, and all will have finished before any thread
|
||||||
such, any global setup or teardown can be wrapped in a check against the thread
|
exits the benchmark loop. (This behavior is also provided by the `KeepRunning()`
|
||||||
|
API) As such, any global setup or teardown can be wrapped in a check against the thread
|
||||||
index:
|
index:
|
||||||
|
|
||||||
```c++
|
```c++
|
||||||
@ -250,7 +396,7 @@ static void BM_MultiThreaded(benchmark::State& state) {
|
|||||||
if (state.thread_index == 0) {
|
if (state.thread_index == 0) {
|
||||||
// Setup code here.
|
// Setup code here.
|
||||||
}
|
}
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
// Run the test as normal.
|
// Run the test as normal.
|
||||||
}
|
}
|
||||||
if (state.thread_index == 0) {
|
if (state.thread_index == 0) {
|
||||||
@ -274,10 +420,10 @@ Without `UseRealTime`, CPU time is used by default.
|
|||||||
## Manual timing
|
## Manual timing
|
||||||
For benchmarking something for which neither CPU time nor real-time are
|
For benchmarking something for which neither CPU time nor real-time are
|
||||||
correct or accurate enough, completely manual timing is supported using
|
correct or accurate enough, completely manual timing is supported using
|
||||||
the `UseManualTime` function.
|
the `UseManualTime` function.
|
||||||
|
|
||||||
When `UseManualTime` is used, the benchmarked code must call
|
When `UseManualTime` is used, the benchmarked code must call
|
||||||
`SetIterationTime` once per iteration of the `KeepRunning` loop to
|
`SetIterationTime` once per iteration of the benchmark loop to
|
||||||
report the manually measured time.
|
report the manually measured time.
|
||||||
|
|
||||||
An example use case for this is benchmarking GPU execution (e.g. OpenCL
|
An example use case for this is benchmarking GPU execution (e.g. OpenCL
|
||||||
@ -293,7 +439,7 @@ static void BM_ManualTiming(benchmark::State& state) {
|
|||||||
static_cast<double>(microseconds)
|
static_cast<double>(microseconds)
|
||||||
};
|
};
|
||||||
|
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
auto start = std::chrono::high_resolution_clock::now();
|
auto start = std::chrono::high_resolution_clock::now();
|
||||||
// Simulate some useful workload with a sleep
|
// Simulate some useful workload with a sleep
|
||||||
std::this_thread::sleep_for(sleep_duration);
|
std::this_thread::sleep_for(sleep_duration);
|
||||||
@ -316,7 +462,7 @@ functions can be used.
|
|||||||
|
|
||||||
```c++
|
```c++
|
||||||
static void BM_test(benchmark::State& state) {
|
static void BM_test(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
int x = 0;
|
int x = 0;
|
||||||
for (int i=0; i < 64; ++i) {
|
for (int i=0; i < 64; ++i) {
|
||||||
benchmark::DoNotOptimize(x += i);
|
benchmark::DoNotOptimize(x += i);
|
||||||
@ -355,7 +501,7 @@ away.
|
|||||||
|
|
||||||
```c++
|
```c++
|
||||||
static void BM_vector_push_back(benchmark::State& state) {
|
static void BM_vector_push_back(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
std::vector<int> v;
|
std::vector<int> v;
|
||||||
v.reserve(1);
|
v.reserve(1);
|
||||||
benchmark::DoNotOptimize(v.data()); // Allow v.data() to be clobbered.
|
benchmark::DoNotOptimize(v.data()); // Allow v.data() to be clobbered.
|
||||||
@ -384,7 +530,7 @@ the minimum time, or the wallclock time is 5x minimum time. The minimum time is
|
|||||||
set as a flag `--benchmark_min_time` or per benchmark by calling `MinTime` on
|
set as a flag `--benchmark_min_time` or per benchmark by calling `MinTime` on
|
||||||
the registered benchmark object.
|
the registered benchmark object.
|
||||||
|
|
||||||
## Reporting the mean and standard devation by repeated benchmarks
|
## Reporting the mean, median and standard deviation by repeated benchmarks
|
||||||
By default each benchmark is run once and that single result is reported.
|
By default each benchmark is run once and that single result is reported.
|
||||||
However benchmarks are often noisy and a single result may not be representative
|
However benchmarks are often noisy and a single result may not be representative
|
||||||
of the overall behavior. For this reason it's possible to repeatedly rerun the
|
of the overall behavior. For this reason it's possible to repeatedly rerun the
|
||||||
@ -392,19 +538,42 @@ benchmark.
|
|||||||
|
|
||||||
The number of runs of each benchmark is specified globally by the
|
The number of runs of each benchmark is specified globally by the
|
||||||
`--benchmark_repetitions` flag or on a per benchmark basis by calling
|
`--benchmark_repetitions` flag or on a per benchmark basis by calling
|
||||||
`Repetitions` on the registered benchmark object. When a benchmark is run
|
`Repetitions` on the registered benchmark object. When a benchmark is run more
|
||||||
more than once the mean and standard deviation of the runs will be reported.
|
than once the mean, median and standard deviation of the runs will be reported.
|
||||||
|
|
||||||
Additionally the `--benchmark_report_aggregates_only={true|false}` flag or
|
Additionally the `--benchmark_report_aggregates_only={true|false}` flag or
|
||||||
`ReportAggregatesOnly(bool)` function can be used to change how repeated tests
|
`ReportAggregatesOnly(bool)` function can be used to change how repeated tests
|
||||||
are reported. By default the result of each repeated run is reported. When this
|
are reported. By default the result of each repeated run is reported. When this
|
||||||
option is 'true' only the mean and standard deviation of the runs is reported.
|
option is `true` only the mean, median and standard deviation of the runs is reported.
|
||||||
Calling `ReportAggregatesOnly(bool)` on a registered benchmark object overrides
|
Calling `ReportAggregatesOnly(bool)` on a registered benchmark object overrides
|
||||||
the value of the flag for that benchmark.
|
the value of the flag for that benchmark.
|
||||||
|
|
||||||
|
## User-defined statistics for repeated benchmarks
|
||||||
|
While having mean, median and standard deviation is nice, this may not be
|
||||||
|
enough for everyone. For example you may want to know what is the largest
|
||||||
|
observation, e.g. because you have some real-time constraints. This is easy.
|
||||||
|
The following code will specify a custom statistic to be calculated, defined
|
||||||
|
by a lambda function.
|
||||||
|
|
||||||
|
```c++
|
||||||
|
void BM_spin_empty(benchmark::State& state) {
|
||||||
|
for (auto _ : state) {
|
||||||
|
for (int x = 0; x < state.range(0); ++x) {
|
||||||
|
benchmark::DoNotOptimize(x);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK(BM_spin_empty)
|
||||||
|
->ComputeStatistics("max", [](const std::vector<double>& v) -> double {
|
||||||
|
return *(std::max_element(std::begin(v), std::end(v)));
|
||||||
|
})
|
||||||
|
->Arg(512);
|
||||||
|
```
|
||||||
|
|
||||||
## Fixtures
|
## Fixtures
|
||||||
Fixture tests are created by
|
Fixture tests are created by
|
||||||
first defining a type that derives from ::benchmark::Fixture and then
|
first defining a type that derives from `::benchmark::Fixture` and then
|
||||||
creating/registering the tests using the following macros:
|
creating/registering the tests using the following macros:
|
||||||
|
|
||||||
* `BENCHMARK_F(ClassName, Method)`
|
* `BENCHMARK_F(ClassName, Method)`
|
||||||
@ -417,13 +586,13 @@ For Example:
|
|||||||
class MyFixture : public benchmark::Fixture {};
|
class MyFixture : public benchmark::Fixture {};
|
||||||
|
|
||||||
BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) {
|
BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) {
|
||||||
while (st.KeepRunning()) {
|
for (auto _ : st) {
|
||||||
...
|
...
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) {
|
BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) {
|
||||||
while (st.KeepRunning()) {
|
for (auto _ : st) {
|
||||||
...
|
...
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -432,6 +601,31 @@ BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2);
|
|||||||
/* BarTest is now registered */
|
/* BarTest is now registered */
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Templated fixtures
|
||||||
|
Also you can create templated fixture by using the following macros:
|
||||||
|
|
||||||
|
* `BENCHMARK_TEMPLATE_F(ClassName, Method, ...)`
|
||||||
|
* `BENCHMARK_TEMPLATE_DEFINE_F(ClassName, Method, ...)`
|
||||||
|
|
||||||
|
For example:
|
||||||
|
```c++
|
||||||
|
template<typename T>
|
||||||
|
class MyFixture : public benchmark::Fixture {};
|
||||||
|
|
||||||
|
BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_REGISTER_F(MyFixture, DoubleTest)->Threads(2);
|
||||||
|
```
|
||||||
|
|
||||||
## User-defined counters
|
## User-defined counters
|
||||||
|
|
||||||
@ -441,7 +635,7 @@ will add columns "Foo", "Bar" and "Baz" in its output:
|
|||||||
```c++
|
```c++
|
||||||
static void UserCountersExample1(benchmark::State& state) {
|
static void UserCountersExample1(benchmark::State& state) {
|
||||||
double numFoos = 0, numBars = 0, numBazs = 0;
|
double numFoos = 0, numBars = 0, numBazs = 0;
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
// ... count Foo,Bar,Baz events
|
// ... count Foo,Bar,Baz events
|
||||||
}
|
}
|
||||||
state.counters["Foo"] = numFoos;
|
state.counters["Foo"] = numFoos;
|
||||||
@ -564,11 +758,12 @@ When errors caused by external influences, such as file I/O and network
|
|||||||
communication, occur within a benchmark the
|
communication, occur within a benchmark the
|
||||||
`State::SkipWithError(const char* msg)` function can be used to skip that run
|
`State::SkipWithError(const char* msg)` function can be used to skip that run
|
||||||
of benchmark and report the error. Note that only future iterations of the
|
of benchmark and report the error. Note that only future iterations of the
|
||||||
`KeepRunning()` are skipped. Users may explicitly return to exit the
|
`KeepRunning()` are skipped. For the ranged-for version of the benchmark loop
|
||||||
benchmark immediately.
|
Users must explicitly exit the loop, otherwise all iterations will be performed.
|
||||||
|
Users may explicitly return to exit the benchmark immediately.
|
||||||
|
|
||||||
The `SkipWithError(...)` function may be used at any point within the benchmark,
|
The `SkipWithError(...)` function may be used at any point within the benchmark,
|
||||||
including before and after the `KeepRunning()` loop.
|
including before and after the benchmark loop.
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
@ -579,7 +774,7 @@ static void BM_test(benchmark::State& state) {
|
|||||||
state.SkipWithError("Resource is not good!");
|
state.SkipWithError("Resource is not good!");
|
||||||
// KeepRunning() loop will not be entered.
|
// KeepRunning() loop will not be entered.
|
||||||
}
|
}
|
||||||
while (state.KeepRunning()) {
|
for (state.KeepRunning()) {
|
||||||
auto data = resource.read_data();
|
auto data = resource.read_data();
|
||||||
if (!resource.good()) {
|
if (!resource.good()) {
|
||||||
state.SkipWithError("Failed to read data!");
|
state.SkipWithError("Failed to read data!");
|
||||||
@ -588,6 +783,14 @@ static void BM_test(benchmark::State& state) {
|
|||||||
do_stuff(data);
|
do_stuff(data);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void BM_test_ranged_fo(benchmark::State & state) {
|
||||||
|
state.SkipWithError("test will not be entered");
|
||||||
|
for (auto _ : state) {
|
||||||
|
state.SkipWithError("Failed!");
|
||||||
|
break; // REQUIRED to prevent all further iterations.
|
||||||
|
}
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Running a subset of the benchmarks
|
## Running a subset of the benchmarks
|
||||||
@ -614,7 +817,7 @@ The library supports multiple output formats. Use the
|
|||||||
is the default format.
|
is the default format.
|
||||||
|
|
||||||
The Console format is intended to be a human readable format. By default
|
The Console format is intended to be a human readable format. By default
|
||||||
the format generates color output. Context is output on stderr and the
|
the format generates color output. Context is output on stderr and the
|
||||||
tabular data on stdout. Example tabular output looks like:
|
tabular data on stdout. Example tabular output looks like:
|
||||||
```
|
```
|
||||||
Benchmark Time(ns) CPU(ns) Iterations
|
Benchmark Time(ns) CPU(ns) Iterations
|
||||||
@ -627,7 +830,7 @@ BM_SetInsert/1024/10 33157 33648 21431 1.13369M
|
|||||||
The JSON format outputs human readable json split into two top level attributes.
|
The JSON format outputs human readable json split into two top level attributes.
|
||||||
The `context` attribute contains information about the run in general, including
|
The `context` attribute contains information about the run in general, including
|
||||||
information about the CPU and the date.
|
information about the CPU and the date.
|
||||||
The `benchmarks` attribute contains a list of ever benchmark run. Example json
|
The `benchmarks` attribute contains a list of every benchmark run. Example json
|
||||||
output looks like:
|
output looks like:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
@ -695,9 +898,15 @@ To enable link-time optimisation, use
|
|||||||
cmake -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_LTO=true
|
cmake -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_LTO=true
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake cache variables, if autodetection fails.
|
||||||
|
If you are using clang, you may need to set `LLVMAR_EXECUTABLE`, `LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
|
||||||
|
|
||||||
## Linking against the library
|
## Linking against the library
|
||||||
When using gcc, it is necessary to link against pthread to avoid runtime exceptions.
|
|
||||||
This is due to how gcc implements std::thread.
|
When the library is built using GCC it is necessary to link with `-pthread`,
|
||||||
|
due to how GCC implements `std::thread`.
|
||||||
|
|
||||||
|
For GCC 4.x failing to link to pthreads will lead to runtime exceptions, not linker errors.
|
||||||
See [issue #67](https://github.com/google/benchmark/issues/67) for more details.
|
See [issue #67](https://github.com/google/benchmark/issues/67) for more details.
|
||||||
|
|
||||||
## Compiler Support
|
## Compiler Support
|
||||||
@ -717,10 +926,25 @@ Anything older *may* work.
|
|||||||
Note: Using the library and its headers in C++03 is supported. C++11 is only
|
Note: Using the library and its headers in C++03 is supported. C++11 is only
|
||||||
required to build the library.
|
required to build the library.
|
||||||
|
|
||||||
|
## Disable CPU frequency scaling
|
||||||
|
If you see this error:
|
||||||
|
```
|
||||||
|
***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead.
|
||||||
|
```
|
||||||
|
you might want to disable the CPU frequency scaling while running the benchmark:
|
||||||
|
```bash
|
||||||
|
sudo cpupower frequency-set --governor performance
|
||||||
|
./mybench
|
||||||
|
sudo cpupower frequency-set --governor powersave
|
||||||
|
```
|
||||||
|
|
||||||
# Known Issues
|
# Known Issues
|
||||||
|
|
||||||
### Windows
|
### Windows with CMake
|
||||||
|
|
||||||
* Users must manually link `shlwapi.lib`. Failure to do so may result
|
* Users must manually link `shlwapi.lib`. Failure to do so may result
|
||||||
in unresolved symbols.
|
in unresolved symbols.
|
||||||
|
|
||||||
|
### Solaris
|
||||||
|
|
||||||
|
* Users must explicitly link with kstat library (-lkstat compilation flag).
|
||||||
|
7
vendor/github.com/google/benchmark/WORKSPACE
generated
vendored
Normal file
7
vendor/github.com/google/benchmark/WORKSPACE
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
workspace(name = "com_github_google_benchmark")
|
||||||
|
|
||||||
|
http_archive(
|
||||||
|
name = "com_google_googletest",
|
||||||
|
urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"],
|
||||||
|
strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e",
|
||||||
|
)
|
2
vendor/github.com/google/benchmark/appveyor.yml
generated
vendored
2
vendor/github.com/google/benchmark/appveyor.yml
generated
vendored
@ -43,7 +43,7 @@ build_script:
|
|||||||
- md _build -Force
|
- md _build -Force
|
||||||
- cd _build
|
- cd _build
|
||||||
- echo %configuration%
|
- echo %configuration%
|
||||||
- cmake -G "%generator%" "-DCMAKE_BUILD_TYPE=%configuration%" ..
|
- cmake -G "%generator%" "-DCMAKE_BUILD_TYPE=%configuration%" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON ..
|
||||||
- cmake --build . --config %configuration%
|
- cmake --build . --config %configuration%
|
||||||
|
|
||||||
test_script:
|
test_script:
|
||||||
|
12
vendor/github.com/google/benchmark/cmake/AddCXXCompilerFlag.cmake
generated
vendored
12
vendor/github.com/google/benchmark/cmake/AddCXXCompilerFlag.cmake
generated
vendored
@ -38,7 +38,7 @@ function(add_cxx_compiler_flag FLAG)
|
|||||||
if(ARGV1)
|
if(ARGV1)
|
||||||
string(TOUPPER "_${VARIANT}" VARIANT)
|
string(TOUPPER "_${VARIANT}" VARIANT)
|
||||||
endif()
|
endif()
|
||||||
set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
|
set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${BENCHMARK_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
|
||||||
endif()
|
endif()
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
@ -62,3 +62,13 @@ function(add_required_cxx_compiler_flag FLAG)
|
|||||||
message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler")
|
message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler")
|
||||||
endif()
|
endif()
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
|
function(check_cxx_warning_flag FLAG)
|
||||||
|
mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
|
||||||
|
set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
|
||||||
|
# Add -Werror to ensure the compiler generates an error if the warning flag
|
||||||
|
# doesn't exist.
|
||||||
|
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -Werror ${FLAG}")
|
||||||
|
check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
|
||||||
|
set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
|
||||||
|
endfunction()
|
||||||
|
34
vendor/github.com/google/benchmark/cmake/CXXFeatureCheck.cmake
generated
vendored
34
vendor/github.com/google/benchmark/cmake/CXXFeatureCheck.cmake
generated
vendored
@ -22,18 +22,37 @@ function(cxx_feature_check FILE)
|
|||||||
string(TOUPPER ${FILE} VAR)
|
string(TOUPPER ${FILE} VAR)
|
||||||
string(TOUPPER "HAVE_${VAR}" FEATURE)
|
string(TOUPPER "HAVE_${VAR}" FEATURE)
|
||||||
if (DEFINED HAVE_${VAR})
|
if (DEFINED HAVE_${VAR})
|
||||||
set(HAVE_${VAR} 1 CACHE INTERNAL "Feature test for ${FILE}" PARENT_SCOPE)
|
set(HAVE_${VAR} 1 PARENT_SCOPE)
|
||||||
add_definitions(-DHAVE_${VAR})
|
add_definitions(-DHAVE_${VAR})
|
||||||
return()
|
return()
|
||||||
endif()
|
endif()
|
||||||
message("-- Performing Test ${FEATURE}")
|
|
||||||
try_run(RUN_${FEATURE} COMPILE_${FEATURE}
|
if (NOT DEFINED COMPILE_${FEATURE})
|
||||||
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
|
message("-- Performing Test ${FEATURE}")
|
||||||
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
|
if(CMAKE_CROSSCOMPILING)
|
||||||
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
|
try_compile(COMPILE_${FEATURE}
|
||||||
|
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
|
||||||
|
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
|
||||||
|
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
|
||||||
|
if(COMPILE_${FEATURE})
|
||||||
|
message(WARNING
|
||||||
|
"If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0")
|
||||||
|
set(RUN_${FEATURE} 0)
|
||||||
|
else()
|
||||||
|
set(RUN_${FEATURE} 1)
|
||||||
|
endif()
|
||||||
|
else()
|
||||||
|
message("-- Performing Test ${FEATURE}")
|
||||||
|
try_run(RUN_${FEATURE} COMPILE_${FEATURE}
|
||||||
|
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
|
||||||
|
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
|
||||||
|
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
if(RUN_${FEATURE} EQUAL 0)
|
if(RUN_${FEATURE} EQUAL 0)
|
||||||
message("-- Performing Test ${FEATURE} -- success")
|
message("-- Performing Test ${FEATURE} -- success")
|
||||||
set(HAVE_${VAR} 1 CACHE INTERNAL "Feature test for ${FILE}" PARENT_SCOPE)
|
set(HAVE_${VAR} 1 PARENT_SCOPE)
|
||||||
add_definitions(-DHAVE_${VAR})
|
add_definitions(-DHAVE_${VAR})
|
||||||
else()
|
else()
|
||||||
if(NOT COMPILE_${FEATURE})
|
if(NOT COMPILE_${FEATURE})
|
||||||
@ -43,4 +62,3 @@ function(cxx_feature_check FILE)
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
|
3
vendor/github.com/google/benchmark/cmake/GetGitVersion.cmake
generated
vendored
3
vendor/github.com/google/benchmark/cmake/GetGitVersion.cmake
generated
vendored
@ -21,6 +21,7 @@ set(__get_git_version INCLUDED)
|
|||||||
function(get_git_version var)
|
function(get_git_version var)
|
||||||
if(GIT_EXECUTABLE)
|
if(GIT_EXECUTABLE)
|
||||||
execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8
|
execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8
|
||||||
|
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
|
||||||
RESULT_VARIABLE status
|
RESULT_VARIABLE status
|
||||||
OUTPUT_VARIABLE GIT_VERSION
|
OUTPUT_VARIABLE GIT_VERSION
|
||||||
ERROR_QUIET)
|
ERROR_QUIET)
|
||||||
@ -33,9 +34,11 @@ function(get_git_version var)
|
|||||||
|
|
||||||
# Work out if the repository is dirty
|
# Work out if the repository is dirty
|
||||||
execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh
|
execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh
|
||||||
|
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
|
||||||
OUTPUT_QUIET
|
OUTPUT_QUIET
|
||||||
ERROR_QUIET)
|
ERROR_QUIET)
|
||||||
execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD --
|
execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD --
|
||||||
|
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
|
||||||
OUTPUT_VARIABLE GIT_DIFF_INDEX
|
OUTPUT_VARIABLE GIT_DIFF_INDEX
|
||||||
ERROR_QUIET)
|
ERROR_QUIET)
|
||||||
string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY)
|
string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY)
|
||||||
|
113
vendor/github.com/google/benchmark/cmake/HandleGTest.cmake
generated
vendored
Normal file
113
vendor/github.com/google/benchmark/cmake/HandleGTest.cmake
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
|
||||||
|
include(split_list)
|
||||||
|
|
||||||
|
macro(build_external_gtest)
|
||||||
|
include(ExternalProject)
|
||||||
|
set(GTEST_FLAGS "")
|
||||||
|
if (BENCHMARK_USE_LIBCXX)
|
||||||
|
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
||||||
|
list(APPEND GTEST_FLAGS -stdlib=libc++)
|
||||||
|
else()
|
||||||
|
message(WARNING "Unsupported compiler (${CMAKE_CXX_COMPILER}) when using libc++")
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
if (BENCHMARK_BUILD_32_BITS)
|
||||||
|
list(APPEND GTEST_FLAGS -m32)
|
||||||
|
endif()
|
||||||
|
if (NOT "${CMAKE_CXX_FLAGS}" STREQUAL "")
|
||||||
|
list(APPEND GTEST_FLAGS ${CMAKE_CXX_FLAGS})
|
||||||
|
endif()
|
||||||
|
string(TOUPPER "${CMAKE_BUILD_TYPE}" GTEST_BUILD_TYPE)
|
||||||
|
if ("${GTEST_BUILD_TYPE}" STREQUAL "COVERAGE")
|
||||||
|
set(GTEST_BUILD_TYPE "DEBUG")
|
||||||
|
endif()
|
||||||
|
# FIXME: Since 10/Feb/2017 the googletest trunk has had a bug where
|
||||||
|
# -Werror=unused-function fires during the build on OS X. This is a temporary
|
||||||
|
# workaround to keep our travis bots from failing. It should be removed
|
||||||
|
# once gtest is fixed.
|
||||||
|
if (NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
|
||||||
|
list(APPEND GTEST_FLAGS "-Wno-unused-function")
|
||||||
|
endif()
|
||||||
|
split_list(GTEST_FLAGS)
|
||||||
|
set(EXCLUDE_FROM_ALL_OPT "")
|
||||||
|
set(EXCLUDE_FROM_ALL_VALUE "")
|
||||||
|
if (${CMAKE_VERSION} VERSION_GREATER "3.0.99")
|
||||||
|
set(EXCLUDE_FROM_ALL_OPT "EXCLUDE_FROM_ALL")
|
||||||
|
set(EXCLUDE_FROM_ALL_VALUE "ON")
|
||||||
|
endif()
|
||||||
|
ExternalProject_Add(googletest
|
||||||
|
${EXCLUDE_FROM_ALL_OPT} ${EXCLUDE_FROM_ALL_VALUE}
|
||||||
|
GIT_REPOSITORY https://github.com/google/googletest.git
|
||||||
|
GIT_TAG master
|
||||||
|
PREFIX "${CMAKE_BINARY_DIR}/googletest"
|
||||||
|
INSTALL_DIR "${CMAKE_BINARY_DIR}/googletest"
|
||||||
|
CMAKE_CACHE_ARGS
|
||||||
|
-DCMAKE_BUILD_TYPE:STRING=${GTEST_BUILD_TYPE}
|
||||||
|
-DCMAKE_C_COMPILER:STRING=${CMAKE_C_COMPILER}
|
||||||
|
-DCMAKE_CXX_COMPILER:STRING=${CMAKE_CXX_COMPILER}
|
||||||
|
-DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR>
|
||||||
|
-DCMAKE_INSTALL_LIBDIR:PATH=<INSTALL_DIR>/lib
|
||||||
|
-DCMAKE_CXX_FLAGS:STRING=${GTEST_FLAGS}
|
||||||
|
-Dgtest_force_shared_crt:BOOL=ON
|
||||||
|
)
|
||||||
|
|
||||||
|
ExternalProject_Get_Property(googletest install_dir)
|
||||||
|
set(GTEST_INCLUDE_DIRS ${install_dir}/include)
|
||||||
|
file(MAKE_DIRECTORY ${GTEST_INCLUDE_DIRS})
|
||||||
|
|
||||||
|
set(LIB_SUFFIX "${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||||
|
set(LIB_PREFIX "${CMAKE_STATIC_LIBRARY_PREFIX}")
|
||||||
|
if("${GTEST_BUILD_TYPE}" STREQUAL "DEBUG")
|
||||||
|
set(LIB_SUFFIX "d${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Use gmock_main instead of gtest_main because it initializes gtest as well.
|
||||||
|
# Note: The libraries are listed in reverse order of their dependancies.
|
||||||
|
foreach(LIB gtest gmock gmock_main)
|
||||||
|
add_library(${LIB} UNKNOWN IMPORTED)
|
||||||
|
set_target_properties(${LIB} PROPERTIES
|
||||||
|
IMPORTED_LOCATION ${install_dir}/lib/${LIB_PREFIX}${LIB}${LIB_SUFFIX}
|
||||||
|
INTERFACE_INCLUDE_DIRECTORIES ${GTEST_INCLUDE_DIRS}
|
||||||
|
INTERFACE_LINK_LIBRARIES "${GTEST_BOTH_LIBRARIES}"
|
||||||
|
)
|
||||||
|
add_dependencies(${LIB} googletest)
|
||||||
|
list(APPEND GTEST_BOTH_LIBRARIES ${LIB})
|
||||||
|
endforeach()
|
||||||
|
endmacro(build_external_gtest)
|
||||||
|
|
||||||
|
if (BENCHMARK_ENABLE_GTEST_TESTS)
|
||||||
|
if (IS_DIRECTORY ${CMAKE_SOURCE_DIR}/googletest)
|
||||||
|
set(GTEST_ROOT "${CMAKE_SOURCE_DIR}/googletest")
|
||||||
|
set(INSTALL_GTEST OFF CACHE INTERNAL "")
|
||||||
|
set(INSTALL_GMOCK OFF CACHE INTERNAL "")
|
||||||
|
add_subdirectory(${CMAKE_SOURCE_DIR}/googletest)
|
||||||
|
set(GTEST_BOTH_LIBRARIES gtest gmock gmock_main)
|
||||||
|
foreach(HEADER test mock)
|
||||||
|
# CMake 2.8 and older don't respect INTERFACE_INCLUDE_DIRECTORIES, so we
|
||||||
|
# have to add the paths ourselves.
|
||||||
|
set(HFILE g${HEADER}/g${HEADER}.h)
|
||||||
|
set(HPATH ${GTEST_ROOT}/google${HEADER}/include)
|
||||||
|
find_path(HEADER_PATH_${HEADER} ${HFILE}
|
||||||
|
NO_DEFAULT_PATHS
|
||||||
|
HINTS ${HPATH}
|
||||||
|
)
|
||||||
|
if (NOT HEADER_PATH_${HEADER})
|
||||||
|
message(FATAL_ERROR "Failed to find header ${HFILE} in ${HPATH}")
|
||||||
|
endif()
|
||||||
|
list(APPEND GTEST_INCLUDE_DIRS ${HEADER_PATH_${HEADER}})
|
||||||
|
endforeach()
|
||||||
|
elseif(BENCHMARK_DOWNLOAD_DEPENDENCIES)
|
||||||
|
build_external_gtest()
|
||||||
|
else()
|
||||||
|
find_package(GTest REQUIRED)
|
||||||
|
find_path(GMOCK_INCLUDE_DIRS gmock/gmock.h
|
||||||
|
HINTS ${GTEST_INCLUDE_DIRS})
|
||||||
|
if (NOT GMOCK_INCLUDE_DIRS)
|
||||||
|
message(FATAL_ERROR "Failed to find header gmock/gmock.h with hint ${GTEST_INCLUDE_DIRS}")
|
||||||
|
endif()
|
||||||
|
set(GTEST_INCLUDE_DIRS ${GTEST_INCLUDE_DIRS} ${GMOCK_INCLUDE_DIRS})
|
||||||
|
# FIXME: We don't currently require the gmock library to build the tests,
|
||||||
|
# and it's likely we won't find it, so we don't try. As long as we've
|
||||||
|
# found the gmock/gmock.h header and gtest_main that should be good enough.
|
||||||
|
endif()
|
||||||
|
endif()
|
16
vendor/github.com/google/benchmark/cmake/Modules/FindLLVMAr.cmake
generated
vendored
Normal file
16
vendor/github.com/google/benchmark/cmake/Modules/FindLLVMAr.cmake
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
include(FeatureSummary)
|
||||||
|
|
||||||
|
find_program(LLVMAR_EXECUTABLE
|
||||||
|
NAMES llvm-ar
|
||||||
|
DOC "The llvm-ar executable"
|
||||||
|
)
|
||||||
|
|
||||||
|
include(FindPackageHandleStandardArgs)
|
||||||
|
find_package_handle_standard_args(LLVMAr
|
||||||
|
DEFAULT_MSG
|
||||||
|
LLVMAR_EXECUTABLE)
|
||||||
|
|
||||||
|
SET_PACKAGE_PROPERTIES(LLVMAr PROPERTIES
|
||||||
|
URL https://llvm.org/docs/CommandGuide/llvm-ar.html
|
||||||
|
DESCRIPTION "create, modify, and extract from archives"
|
||||||
|
)
|
16
vendor/github.com/google/benchmark/cmake/Modules/FindLLVMNm.cmake
generated
vendored
Normal file
16
vendor/github.com/google/benchmark/cmake/Modules/FindLLVMNm.cmake
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
include(FeatureSummary)
|
||||||
|
|
||||||
|
find_program(LLVMNM_EXECUTABLE
|
||||||
|
NAMES llvm-nm
|
||||||
|
DOC "The llvm-nm executable"
|
||||||
|
)
|
||||||
|
|
||||||
|
include(FindPackageHandleStandardArgs)
|
||||||
|
find_package_handle_standard_args(LLVMNm
|
||||||
|
DEFAULT_MSG
|
||||||
|
LLVMNM_EXECUTABLE)
|
||||||
|
|
||||||
|
SET_PACKAGE_PROPERTIES(LLVMNm PROPERTIES
|
||||||
|
URL https://llvm.org/docs/CommandGuide/llvm-nm.html
|
||||||
|
DESCRIPTION "list LLVM bitcode and object file’s symbol table"
|
||||||
|
)
|
15
vendor/github.com/google/benchmark/cmake/Modules/FindLLVMRanLib.cmake
generated
vendored
Normal file
15
vendor/github.com/google/benchmark/cmake/Modules/FindLLVMRanLib.cmake
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
include(FeatureSummary)
|
||||||
|
|
||||||
|
find_program(LLVMRANLIB_EXECUTABLE
|
||||||
|
NAMES llvm-ranlib
|
||||||
|
DOC "The llvm-ranlib executable"
|
||||||
|
)
|
||||||
|
|
||||||
|
include(FindPackageHandleStandardArgs)
|
||||||
|
find_package_handle_standard_args(LLVMRanLib
|
||||||
|
DEFAULT_MSG
|
||||||
|
LLVMRANLIB_EXECUTABLE)
|
||||||
|
|
||||||
|
SET_PACKAGE_PROPERTIES(LLVMRanLib PROPERTIES
|
||||||
|
DESCRIPTION "generate index for LLVM archive"
|
||||||
|
)
|
11
vendor/github.com/google/benchmark/cmake/benchmark.pc.in
generated
vendored
Normal file
11
vendor/github.com/google/benchmark/cmake/benchmark.pc.in
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
prefix=@CMAKE_INSTALL_PREFIX@
|
||||||
|
exec_prefix=${prefix}
|
||||||
|
libdir=${prefix}/lib
|
||||||
|
includedir=${prefix}/include
|
||||||
|
|
||||||
|
Name: @PROJECT_NAME@
|
||||||
|
Description: Google microbenchmark framework
|
||||||
|
Version: @VERSION@
|
||||||
|
|
||||||
|
Libs: -L${libdir} -lbenchmark
|
||||||
|
Cflags: -I${includedir}
|
8
vendor/github.com/google/benchmark/cmake/llvm-toolchain.cmake
generated
vendored
Normal file
8
vendor/github.com/google/benchmark/cmake/llvm-toolchain.cmake
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
find_package(LLVMAr REQUIRED)
|
||||||
|
set(CMAKE_AR "${LLVMAR_EXECUTABLE}" CACHE FILEPATH "" FORCE)
|
||||||
|
|
||||||
|
find_package(LLVMNm REQUIRED)
|
||||||
|
set(CMAKE_NM "${LLVMNM_EXECUTABLE}" CACHE FILEPATH "" FORCE)
|
||||||
|
|
||||||
|
find_package(LLVMRanLib REQUIRED)
|
||||||
|
set(CMAKE_RANLIB "${LLVMRANLIB_EXECUTABLE}" CACHE FILEPATH "" FORCE)
|
3
vendor/github.com/google/benchmark/cmake/split_list.cmake
generated
vendored
Normal file
3
vendor/github.com/google/benchmark/cmake/split_list.cmake
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
macro(split_list listname)
|
||||||
|
string(REPLACE ";" " " ${listname} "${${listname}}")
|
||||||
|
endmacro()
|
147
vendor/github.com/google/benchmark/docs/AssemblyTests.md
generated
vendored
Normal file
147
vendor/github.com/google/benchmark/docs/AssemblyTests.md
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
|||||||
|
# Assembly Tests
|
||||||
|
|
||||||
|
The Benchmark library provides a number of functions whose primary
|
||||||
|
purpose in to affect assembly generation, including `DoNotOptimize`
|
||||||
|
and `ClobberMemory`. In addition there are other functions,
|
||||||
|
such as `KeepRunning`, for which generating good assembly is paramount.
|
||||||
|
|
||||||
|
For these functions it's important to have tests that verify the
|
||||||
|
correctness and quality of the implementation. This requires testing
|
||||||
|
the code generated by the compiler.
|
||||||
|
|
||||||
|
This document describes how the Benchmark library tests compiler output,
|
||||||
|
as well as how to properly write new tests.
|
||||||
|
|
||||||
|
|
||||||
|
## Anatomy of a Test
|
||||||
|
|
||||||
|
Writing a test has two steps:
|
||||||
|
|
||||||
|
* Write the code you want to generate assembly for.
|
||||||
|
* Add `// CHECK` lines to match against the verified assembly.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```c++
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_add:
|
||||||
|
extern "C" int test_add() {
|
||||||
|
extern int ExternInt;
|
||||||
|
return ExternInt + 1;
|
||||||
|
|
||||||
|
// CHECK: movl ExternInt(%rip), %eax
|
||||||
|
// CHECK: addl %eax
|
||||||
|
// CHECK: ret
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
#### LLVM Filecheck
|
||||||
|
|
||||||
|
[LLVM's Filecheck](https://llvm.org/docs/CommandGuide/FileCheck.html)
|
||||||
|
is used to test the generated assembly against the `// CHECK` lines
|
||||||
|
specified in the tests source file. Please see the documentation
|
||||||
|
linked above for information on how to write `CHECK` directives.
|
||||||
|
|
||||||
|
#### Tips and Tricks:
|
||||||
|
|
||||||
|
* Tests should match the minimal amount of output required to establish
|
||||||
|
correctness. `CHECK` directives don't have to match on the exact next line
|
||||||
|
after the previous match, so tests should omit checks for unimportant
|
||||||
|
bits of assembly. ([`CHECK-NEXT`](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-next-directive)
|
||||||
|
can be used to ensure a match occurs exactly after the previous match).
|
||||||
|
|
||||||
|
* The tests are compiled with `-O3 -g0`. So we're only testing the
|
||||||
|
optimized output.
|
||||||
|
|
||||||
|
* The assembly output is further cleaned up using `tools/strip_asm.py`.
|
||||||
|
This removes comments, assembler directives, and unused labels before
|
||||||
|
the test is run.
|
||||||
|
|
||||||
|
* The generated and stripped assembly file for a test is output under
|
||||||
|
`<build-directory>/test/<test-name>.s`
|
||||||
|
|
||||||
|
* Filecheck supports using [`CHECK` prefixes](https://llvm.org/docs/CommandGuide/FileCheck.html#cmdoption-check-prefixes)
|
||||||
|
to specify lines that should only match in certain situations.
|
||||||
|
The Benchmark tests use `CHECK-CLANG` and `CHECK-GNU` for lines that
|
||||||
|
are only expected to match Clang or GCC's output respectively. Normal
|
||||||
|
`CHECK` lines match against all compilers. (Note: `CHECK-NOT` and
|
||||||
|
`CHECK-LABEL` are NOT prefixes. They are versions of non-prefixed
|
||||||
|
`CHECK` lines)
|
||||||
|
|
||||||
|
* Use `extern "C"` to disable name mangling for specific functions. This
|
||||||
|
makes them easier to name in the `CHECK` lines.
|
||||||
|
|
||||||
|
|
||||||
|
## Problems Writing Portable Tests
|
||||||
|
|
||||||
|
Writing tests which check the code generated by a compiler are
|
||||||
|
inherently non-portable. Different compilers and even different compiler
|
||||||
|
versions may generate entirely different code. The Benchmark tests
|
||||||
|
must tolerate this.
|
||||||
|
|
||||||
|
LLVM Filecheck provides a number of mechanisms to help write
|
||||||
|
"more portable" tests; including [matching using regular expressions](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-pattern-matching-syntax),
|
||||||
|
allowing the creation of [named variables](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-variables)
|
||||||
|
for later matching, and [checking non-sequential matches](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-dag-directive).
|
||||||
|
|
||||||
|
#### Capturing Variables
|
||||||
|
|
||||||
|
For example, say GCC stores a variable in a register but Clang stores
|
||||||
|
it in memory. To write a test that tolerates both cases we "capture"
|
||||||
|
the destination of the store, and then use the captured expression
|
||||||
|
to write the remainder of the test.
|
||||||
|
|
||||||
|
```c++
|
||||||
|
// CHECK-LABEL: test_div_no_op_into_shr:
|
||||||
|
extern "C" void test_div_no_op_into_shr(int value) {
|
||||||
|
int divisor = 2;
|
||||||
|
benchmark::DoNotOptimize(divisor); // hide the value from the optimizer
|
||||||
|
return value / divisor;
|
||||||
|
|
||||||
|
// CHECK: movl $2, [[DEST:.*]]
|
||||||
|
// CHECK: idivl [[DEST]]
|
||||||
|
// CHECK: ret
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Using Regular Expressions to Match Differing Output
|
||||||
|
|
||||||
|
Often tests require testing assembly lines which may subtly differ
|
||||||
|
between compilers or compiler versions. A common example of this
|
||||||
|
is matching stack frame addresses. In this case regular expressions
|
||||||
|
can be used to match the differing bits of output. For example:
|
||||||
|
|
||||||
|
```c++
|
||||||
|
int ExternInt;
|
||||||
|
struct Point { int x, y, z; };
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_store_point:
|
||||||
|
extern "C" void test_store_point() {
|
||||||
|
Point p{ExternInt, ExternInt, ExternInt};
|
||||||
|
benchmark::DoNotOptimize(p);
|
||||||
|
|
||||||
|
// CHECK: movl ExternInt(%rip), %eax
|
||||||
|
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
|
||||||
|
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
|
||||||
|
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
|
||||||
|
// CHECK: ret
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Current Requirements and Limitations
|
||||||
|
|
||||||
|
The tests require Filecheck to be installed along the `PATH` of the
|
||||||
|
build machine. Otherwise the tests will be disabled.
|
||||||
|
|
||||||
|
Additionally, as mentioned in the previous section, codegen tests are
|
||||||
|
inherently non-portable. Currently the tests are limited to:
|
||||||
|
|
||||||
|
* x86_64 targets.
|
||||||
|
* Compiled with GCC or Clang
|
||||||
|
|
||||||
|
Further work could be done, at least on a limited basis, to extend the
|
||||||
|
tests to other architectures and compilers (using `CHECK` prefixes).
|
||||||
|
|
||||||
|
Furthermore, the tests fail for builds which specify additional flags
|
||||||
|
that modify code generation, including `--coverage` or `-fsanitize=`.
|
||||||
|
|
243
vendor/github.com/google/benchmark/docs/tools.md
generated
vendored
243
vendor/github.com/google/benchmark/docs/tools.md
generated
vendored
@ -11,49 +11,232 @@ $ compare_bench.py <old-benchmark> <new-benchmark> [benchmark options]...
|
|||||||
|
|
||||||
Where `<old-benchmark>` and `<new-benchmark>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
|
Where `<old-benchmark>` and `<new-benchmark>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
|
||||||
|
|
||||||
|
`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes.
|
||||||
|
|
||||||
The sample output using the JSON test files under `Inputs/` gives:
|
The sample output using the JSON test files under `Inputs/` gives:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ ./compare_bench.py ./gbench/Inputs/test1_run1.json ./gbench/Inputs/test1_run2.json
|
$ ./compare_bench.py ./gbench/Inputs/test1_run1.json ./gbench/Inputs/test1_run2.json
|
||||||
Comparing ./gbench/Inputs/test1_run1.json to ./gbench/Inputs/test1_run2.json
|
Comparing ./gbench/Inputs/test1_run1.json to ./gbench/Inputs/test1_run2.json
|
||||||
Benchmark Time CPU
|
Benchmark Time CPU Time Old Time New CPU Old CPU New
|
||||||
----------------------------------------------
|
-------------------------------------------------------------------------------------------------------------
|
||||||
BM_SameTimes +0.00 +0.00
|
BM_SameTimes +0.0000 +0.0000 10 10 10 10
|
||||||
BM_2xFaster -0.50 -0.50
|
BM_2xFaster -0.5000 -0.5000 50 25 50 25
|
||||||
BM_2xSlower +1.00 +1.00
|
BM_2xSlower +1.0000 +1.0000 50 100 50 100
|
||||||
BM_10PercentFaster -0.10 -0.10
|
BM_1PercentFaster -0.0100 -0.0100 100 99 100 99
|
||||||
BM_10PercentSlower +0.10 +0.10
|
BM_1PercentSlower +0.0100 +0.0100 100 101 100 101
|
||||||
|
BM_10PercentFaster -0.1000 -0.1000 100 90 100 90
|
||||||
|
BM_10PercentSlower +0.1000 +0.1000 100 110 100 110
|
||||||
|
BM_100xSlower +99.0000 +99.0000 100 10000 100 10000
|
||||||
|
BM_100xFaster -0.9900 -0.9900 10000 100 10000 100
|
||||||
|
BM_10PercentCPUToTime +0.1000 -0.1000 100 110 100 90
|
||||||
|
BM_ThirdFaster -0.3333 -0.3334 100 67 100 67
|
||||||
|
BM_BadTimeUnit -0.9000 +0.2000 0 0 0 1
|
||||||
```
|
```
|
||||||
|
|
||||||
|
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
|
||||||
|
|
||||||
When a benchmark executable is run, the raw output from the benchmark is printed in real time to stdout. The sample output using `benchmark/basic_test` for both arguments looks like:
|
When a benchmark executable is run, the raw output from the benchmark is printed in real time to stdout. The sample output using `benchmark/basic_test` for both arguments looks like:
|
||||||
|
|
||||||
```
|
```
|
||||||
./compare_bench.py test/basic_test test/basic_test --benchmark_filter=BM_empty.*
|
./compare_bench.py test/basic_test test/basic_test --benchmark_filter=BM_empty.*
|
||||||
RUNNING: test/basic_test --benchmark_filter=BM_empty.*
|
RUNNING: test/basic_test --benchmark_filter=BM_empty.* --benchmark_out=/tmp/tmpN7LF3a
|
||||||
Run on (4 X 4228.32 MHz CPU s)
|
Run on (8 X 4000 MHz CPU s)
|
||||||
2016-08-02 19:21:33
|
2017-11-07 23:28:36
|
||||||
|
---------------------------------------------------------------------
|
||||||
Benchmark Time CPU Iterations
|
Benchmark Time CPU Iterations
|
||||||
--------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
BM_empty 9 ns 9 ns 79545455
|
BM_empty 4 ns 4 ns 170178757
|
||||||
BM_empty/threads:4 4 ns 9 ns 75268816
|
BM_empty/threads:8 1 ns 7 ns 103868920
|
||||||
BM_empty_stop_start 8 ns 8 ns 83333333
|
BM_empty_stop_start 0 ns 0 ns 1000000000
|
||||||
BM_empty_stop_start/threads:4 3 ns 8 ns 83333332
|
BM_empty_stop_start/threads:8 0 ns 0 ns 1403031720
|
||||||
RUNNING: test/basic_test --benchmark_filter=BM_empty.*
|
RUNNING: /test/basic_test --benchmark_filter=BM_empty.* --benchmark_out=/tmp/tmplvrIp8
|
||||||
Run on (4 X 4228.32 MHz CPU s)
|
Run on (8 X 4000 MHz CPU s)
|
||||||
2016-08-02 19:21:35
|
2017-11-07 23:28:38
|
||||||
|
---------------------------------------------------------------------
|
||||||
Benchmark Time CPU Iterations
|
Benchmark Time CPU Iterations
|
||||||
--------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
BM_empty 9 ns 9 ns 76086957
|
BM_empty 4 ns 4 ns 169534855
|
||||||
BM_empty/threads:4 4 ns 9 ns 76086956
|
BM_empty/threads:8 1 ns 7 ns 104188776
|
||||||
BM_empty_stop_start 8 ns 8 ns 87500000
|
BM_empty_stop_start 0 ns 0 ns 1000000000
|
||||||
BM_empty_stop_start/threads:4 3 ns 8 ns 88607596
|
BM_empty_stop_start/threads:8 0 ns 0 ns 1404159424
|
||||||
Comparing test/basic_test to test/basic_test
|
Comparing ../build/test/basic_test to ../build/test/basic_test
|
||||||
Benchmark Time CPU
|
Benchmark Time CPU Time Old Time New CPU Old CPU New
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------------------------------------------------------------------
|
||||||
BM_empty +0.00 +0.00
|
BM_empty -0.0048 -0.0049 4 4 4 4
|
||||||
BM_empty/threads:4 +0.00 +0.00
|
BM_empty/threads:8 -0.0123 -0.0054 1 1 7 7
|
||||||
BM_empty_stop_start +0.00 +0.00
|
BM_empty_stop_start -0.0000 -0.0000 0 0 0 0
|
||||||
BM_empty_stop_start/threads:4 +0.00 +0.00
|
BM_empty_stop_start/threads:8 -0.0029 +0.0001 0 0 0 0
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
|
||||||
Obviously this example doesn't give any useful output, but it's intended to show the output format when 'compare_bench.py' needs to run benchmarks.
|
Obviously this example doesn't give any useful output, but it's intended to show the output format when 'compare_bench.py' needs to run benchmarks.
|
||||||
|
|
||||||
|
## compare.py
|
||||||
|
|
||||||
|
The `compare.py` can be used to compare the result of benchmarks.
|
||||||
|
There are three modes of operation:
|
||||||
|
|
||||||
|
1. Just compare two benchmarks, what `compare_bench.py` did.
|
||||||
|
The program is invoked like:
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
$ compare.py benchmarks <benchmark_baseline> <benchmark_contender> [benchmark options]...
|
||||||
|
```
|
||||||
|
Where `<benchmark_baseline>` and `<benchmark_contender>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
|
||||||
|
|
||||||
|
`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes.
|
||||||
|
|
||||||
|
Example output:
|
||||||
|
```
|
||||||
|
$ ./compare.py benchmarks ./a.out ./a.out
|
||||||
|
RUNNING: ./a.out --benchmark_out=/tmp/tmprBT5nW
|
||||||
|
Run on (8 X 4000 MHz CPU s)
|
||||||
|
2017-11-07 21:16:44
|
||||||
|
------------------------------------------------------
|
||||||
|
Benchmark Time CPU Iterations
|
||||||
|
------------------------------------------------------
|
||||||
|
BM_memcpy/8 36 ns 36 ns 19101577 211.669MB/s
|
||||||
|
BM_memcpy/64 76 ns 76 ns 9412571 800.199MB/s
|
||||||
|
BM_memcpy/512 84 ns 84 ns 8249070 5.64771GB/s
|
||||||
|
BM_memcpy/1024 116 ns 116 ns 6181763 8.19505GB/s
|
||||||
|
BM_memcpy/8192 643 ns 643 ns 1062855 11.8636GB/s
|
||||||
|
BM_copy/8 222 ns 222 ns 3137987 34.3772MB/s
|
||||||
|
BM_copy/64 1608 ns 1608 ns 432758 37.9501MB/s
|
||||||
|
BM_copy/512 12589 ns 12589 ns 54806 38.7867MB/s
|
||||||
|
BM_copy/1024 25169 ns 25169 ns 27713 38.8003MB/s
|
||||||
|
BM_copy/8192 201165 ns 201112 ns 3486 38.8466MB/s
|
||||||
|
RUNNING: ./a.out --benchmark_out=/tmp/tmpt1wwG_
|
||||||
|
Run on (8 X 4000 MHz CPU s)
|
||||||
|
2017-11-07 21:16:53
|
||||||
|
------------------------------------------------------
|
||||||
|
Benchmark Time CPU Iterations
|
||||||
|
------------------------------------------------------
|
||||||
|
BM_memcpy/8 36 ns 36 ns 19397903 211.255MB/s
|
||||||
|
BM_memcpy/64 73 ns 73 ns 9691174 839.635MB/s
|
||||||
|
BM_memcpy/512 85 ns 85 ns 8312329 5.60101GB/s
|
||||||
|
BM_memcpy/1024 118 ns 118 ns 6438774 8.11608GB/s
|
||||||
|
BM_memcpy/8192 656 ns 656 ns 1068644 11.6277GB/s
|
||||||
|
BM_copy/8 223 ns 223 ns 3146977 34.2338MB/s
|
||||||
|
BM_copy/64 1611 ns 1611 ns 435340 37.8751MB/s
|
||||||
|
BM_copy/512 12622 ns 12622 ns 54818 38.6844MB/s
|
||||||
|
BM_copy/1024 25257 ns 25239 ns 27779 38.6927MB/s
|
||||||
|
BM_copy/8192 205013 ns 205010 ns 3479 38.108MB/s
|
||||||
|
Comparing ./a.out to ./a.out
|
||||||
|
Benchmark Time CPU Time Old Time New CPU Old CPU New
|
||||||
|
------------------------------------------------------------------------------------------------------
|
||||||
|
BM_memcpy/8 +0.0020 +0.0020 36 36 36 36
|
||||||
|
BM_memcpy/64 -0.0468 -0.0470 76 73 76 73
|
||||||
|
BM_memcpy/512 +0.0081 +0.0083 84 85 84 85
|
||||||
|
BM_memcpy/1024 +0.0098 +0.0097 116 118 116 118
|
||||||
|
BM_memcpy/8192 +0.0200 +0.0203 643 656 643 656
|
||||||
|
BM_copy/8 +0.0046 +0.0042 222 223 222 223
|
||||||
|
BM_copy/64 +0.0020 +0.0020 1608 1611 1608 1611
|
||||||
|
BM_copy/512 +0.0027 +0.0026 12589 12622 12589 12622
|
||||||
|
BM_copy/1024 +0.0035 +0.0028 25169 25257 25169 25239
|
||||||
|
BM_copy/8192 +0.0191 +0.0194 201165 205013 201112 205010
|
||||||
|
```
|
||||||
|
|
||||||
|
What it does is for the every benchmark from the first run it looks for the benchmark with exactly the same name in the second run, and then compares the results. If the names differ, the benchmark is omitted from the diff.
|
||||||
|
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
|
||||||
|
|
||||||
|
2. Compare two different filters of one benchmark
|
||||||
|
The program is invoked like:
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
$ compare.py filters <benchmark> <filter_baseline> <filter_contender> [benchmark options]...
|
||||||
|
```
|
||||||
|
Where `<benchmark>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
|
||||||
|
|
||||||
|
Where `<filter_baseline>` and `<filter_contender>` are the same regex filters that you would pass to the `[--benchmark_filter=<regex>]` parameter of the benchmark binary.
|
||||||
|
|
||||||
|
`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes.
|
||||||
|
|
||||||
|
Example output:
|
||||||
|
```
|
||||||
|
$ ./compare.py filters ./a.out BM_memcpy BM_copy
|
||||||
|
RUNNING: ./a.out --benchmark_filter=BM_memcpy --benchmark_out=/tmp/tmpBWKk0k
|
||||||
|
Run on (8 X 4000 MHz CPU s)
|
||||||
|
2017-11-07 21:37:28
|
||||||
|
------------------------------------------------------
|
||||||
|
Benchmark Time CPU Iterations
|
||||||
|
------------------------------------------------------
|
||||||
|
BM_memcpy/8 36 ns 36 ns 17891491 211.215MB/s
|
||||||
|
BM_memcpy/64 74 ns 74 ns 9400999 825.646MB/s
|
||||||
|
BM_memcpy/512 87 ns 87 ns 8027453 5.46126GB/s
|
||||||
|
BM_memcpy/1024 111 ns 111 ns 6116853 8.5648GB/s
|
||||||
|
BM_memcpy/8192 657 ns 656 ns 1064679 11.6247GB/s
|
||||||
|
RUNNING: ./a.out --benchmark_filter=BM_copy --benchmark_out=/tmp/tmpAvWcOM
|
||||||
|
Run on (8 X 4000 MHz CPU s)
|
||||||
|
2017-11-07 21:37:33
|
||||||
|
----------------------------------------------------
|
||||||
|
Benchmark Time CPU Iterations
|
||||||
|
----------------------------------------------------
|
||||||
|
BM_copy/8 227 ns 227 ns 3038700 33.6264MB/s
|
||||||
|
BM_copy/64 1640 ns 1640 ns 426893 37.2154MB/s
|
||||||
|
BM_copy/512 12804 ns 12801 ns 55417 38.1444MB/s
|
||||||
|
BM_copy/1024 25409 ns 25407 ns 27516 38.4365MB/s
|
||||||
|
BM_copy/8192 202986 ns 202990 ns 3454 38.4871MB/s
|
||||||
|
Comparing BM_memcpy to BM_copy (from ./a.out)
|
||||||
|
Benchmark Time CPU Time Old Time New CPU Old CPU New
|
||||||
|
--------------------------------------------------------------------------------------------------------------------
|
||||||
|
[BM_memcpy vs. BM_copy]/8 +5.2829 +5.2812 36 227 36 227
|
||||||
|
[BM_memcpy vs. BM_copy]/64 +21.1719 +21.1856 74 1640 74 1640
|
||||||
|
[BM_memcpy vs. BM_copy]/512 +145.6487 +145.6097 87 12804 87 12801
|
||||||
|
[BM_memcpy vs. BM_copy]/1024 +227.1860 +227.1776 111 25409 111 25407
|
||||||
|
[BM_memcpy vs. BM_copy]/8192 +308.1664 +308.2898 657 202986 656 202990
|
||||||
|
```
|
||||||
|
|
||||||
|
As you can see, it applies filter to the benchmarks, both when running the benchmark, and before doing the diff. And to make the diff work, the matches are replaced with some common string. Thus, you can compare two different benchmark families within one benchmark binary.
|
||||||
|
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
|
||||||
|
|
||||||
|
3. Compare filter one from benchmark one to filter two from benchmark two:
|
||||||
|
The program is invoked like:
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
$ compare.py filters <benchmark_baseline> <filter_baseline> <benchmark_contender> <filter_contender> [benchmark options]...
|
||||||
|
```
|
||||||
|
|
||||||
|
Where `<benchmark_baseline>` and `<benchmark_contender>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
|
||||||
|
|
||||||
|
Where `<filter_baseline>` and `<filter_contender>` are the same regex filters that you would pass to the `[--benchmark_filter=<regex>]` parameter of the benchmark binary.
|
||||||
|
|
||||||
|
`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes.
|
||||||
|
|
||||||
|
Example output:
|
||||||
|
```
|
||||||
|
$ ./compare.py benchmarksfiltered ./a.out BM_memcpy ./a.out BM_copy
|
||||||
|
RUNNING: ./a.out --benchmark_filter=BM_memcpy --benchmark_out=/tmp/tmp_FvbYg
|
||||||
|
Run on (8 X 4000 MHz CPU s)
|
||||||
|
2017-11-07 21:38:27
|
||||||
|
------------------------------------------------------
|
||||||
|
Benchmark Time CPU Iterations
|
||||||
|
------------------------------------------------------
|
||||||
|
BM_memcpy/8 37 ns 37 ns 18953482 204.118MB/s
|
||||||
|
BM_memcpy/64 74 ns 74 ns 9206578 828.245MB/s
|
||||||
|
BM_memcpy/512 91 ns 91 ns 8086195 5.25476GB/s
|
||||||
|
BM_memcpy/1024 120 ns 120 ns 5804513 7.95662GB/s
|
||||||
|
BM_memcpy/8192 664 ns 664 ns 1028363 11.4948GB/s
|
||||||
|
RUNNING: ./a.out --benchmark_filter=BM_copy --benchmark_out=/tmp/tmpDfL5iE
|
||||||
|
Run on (8 X 4000 MHz CPU s)
|
||||||
|
2017-11-07 21:38:32
|
||||||
|
----------------------------------------------------
|
||||||
|
Benchmark Time CPU Iterations
|
||||||
|
----------------------------------------------------
|
||||||
|
BM_copy/8 230 ns 230 ns 2985909 33.1161MB/s
|
||||||
|
BM_copy/64 1654 ns 1653 ns 419408 36.9137MB/s
|
||||||
|
BM_copy/512 13122 ns 13120 ns 53403 37.2156MB/s
|
||||||
|
BM_copy/1024 26679 ns 26666 ns 26575 36.6218MB/s
|
||||||
|
BM_copy/8192 215068 ns 215053 ns 3221 36.3283MB/s
|
||||||
|
Comparing BM_memcpy (from ./a.out) to BM_copy (from ./a.out)
|
||||||
|
Benchmark Time CPU Time Old Time New CPU Old CPU New
|
||||||
|
--------------------------------------------------------------------------------------------------------------------
|
||||||
|
[BM_memcpy vs. BM_copy]/8 +5.1649 +5.1637 37 230 37 230
|
||||||
|
[BM_memcpy vs. BM_copy]/64 +21.4352 +21.4374 74 1654 74 1653
|
||||||
|
[BM_memcpy vs. BM_copy]/512 +143.6022 +143.5865 91 13122 91 13120
|
||||||
|
[BM_memcpy vs. BM_copy]/1024 +221.5903 +221.4790 120 26679 120 26666
|
||||||
|
[BM_memcpy vs. BM_copy]/8192 +322.9059 +323.0096 664 215068 664 215053
|
||||||
|
```
|
||||||
|
This is a mix of the previous two modes, two (potentially different) benchmark binaries are run, and a different filter is applied to each one.
|
||||||
|
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
|
||||||
|
420
vendor/github.com/google/benchmark/include/benchmark/benchmark.h
generated
vendored
420
vendor/github.com/google/benchmark/include/benchmark/benchmark.h
generated
vendored
@ -18,7 +18,7 @@
|
|||||||
// Define a function that executes the code to be measured a
|
// Define a function that executes the code to be measured a
|
||||||
// specified number of times:
|
// specified number of times:
|
||||||
static void BM_StringCreation(benchmark::State& state) {
|
static void BM_StringCreation(benchmark::State& state) {
|
||||||
while (state.KeepRunning())
|
for (auto _ : state)
|
||||||
std::string empty_string;
|
std::string empty_string;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -28,7 +28,7 @@ BENCHMARK(BM_StringCreation);
|
|||||||
// Define another benchmark
|
// Define another benchmark
|
||||||
static void BM_StringCopy(benchmark::State& state) {
|
static void BM_StringCopy(benchmark::State& state) {
|
||||||
std::string x = "hello";
|
std::string x = "hello";
|
||||||
while (state.KeepRunning())
|
for (auto _ : state)
|
||||||
std::string copy(x);
|
std::string copy(x);
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_StringCopy);
|
BENCHMARK(BM_StringCopy);
|
||||||
@ -54,7 +54,7 @@ int main(int argc, char** argv) {
|
|||||||
static void BM_memcpy(benchmark::State& state) {
|
static void BM_memcpy(benchmark::State& state) {
|
||||||
char* src = new char[state.range(0)]; char* dst = new char[state.range(0)];
|
char* src = new char[state.range(0)]; char* dst = new char[state.range(0)];
|
||||||
memset(src, 'x', state.range(0));
|
memset(src, 'x', state.range(0));
|
||||||
while (state.KeepRunning())
|
for (auto _ : state)
|
||||||
memcpy(dst, src, state.range(0));
|
memcpy(dst, src, state.range(0));
|
||||||
state.SetBytesProcessed(int64_t(state.iterations()) *
|
state.SetBytesProcessed(int64_t(state.iterations()) *
|
||||||
int64_t(state.range(0)));
|
int64_t(state.range(0)));
|
||||||
@ -72,29 +72,30 @@ BENCHMARK(BM_memcpy)->Range(8, 8<<10);
|
|||||||
// example, the following code defines a family of microbenchmarks for
|
// example, the following code defines a family of microbenchmarks for
|
||||||
// measuring the speed of set insertion.
|
// measuring the speed of set insertion.
|
||||||
static void BM_SetInsert(benchmark::State& state) {
|
static void BM_SetInsert(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
set<int> data;
|
||||||
|
for (auto _ : state) {
|
||||||
state.PauseTiming();
|
state.PauseTiming();
|
||||||
set<int> data = ConstructRandomSet(state.range(0));
|
data = ConstructRandomSet(state.range(0));
|
||||||
state.ResumeTiming();
|
state.ResumeTiming();
|
||||||
for (int j = 0; j < state.range(1); ++j)
|
for (int j = 0; j < state.range(1); ++j)
|
||||||
data.insert(RandomNumber());
|
data.insert(RandomNumber());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_SetInsert)
|
BENCHMARK(BM_SetInsert)
|
||||||
->Args({1<<10, 1})
|
->Args({1<<10, 128})
|
||||||
->Args({1<<10, 8})
|
->Args({2<<10, 128})
|
||||||
->Args({1<<10, 64})
|
->Args({4<<10, 128})
|
||||||
|
->Args({8<<10, 128})
|
||||||
->Args({1<<10, 512})
|
->Args({1<<10, 512})
|
||||||
->Args({8<<10, 1})
|
->Args({2<<10, 512})
|
||||||
->Args({8<<10, 8})
|
->Args({4<<10, 512})
|
||||||
->Args({8<<10, 64})
|
|
||||||
->Args({8<<10, 512});
|
->Args({8<<10, 512});
|
||||||
|
|
||||||
// The preceding code is quite repetitive, and can be replaced with
|
// The preceding code is quite repetitive, and can be replaced with
|
||||||
// the following short-hand. The following macro will pick a few
|
// the following short-hand. The following macro will pick a few
|
||||||
// appropriate arguments in the product of the two specified ranges
|
// appropriate arguments in the product of the two specified ranges
|
||||||
// and will generate a microbenchmark for each such pair.
|
// and will generate a microbenchmark for each such pair.
|
||||||
BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {1, 512}});
|
BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}});
|
||||||
|
|
||||||
// For more complex patterns of inputs, passing a custom function
|
// For more complex patterns of inputs, passing a custom function
|
||||||
// to Apply allows programmatic specification of an
|
// to Apply allows programmatic specification of an
|
||||||
@ -114,7 +115,7 @@ BENCHMARK(BM_SetInsert)->Apply(CustomArguments);
|
|||||||
template <class Q> int BM_Sequential(benchmark::State& state) {
|
template <class Q> int BM_Sequential(benchmark::State& state) {
|
||||||
Q q;
|
Q q;
|
||||||
typename Q::value_type v;
|
typename Q::value_type v;
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
for (int i = state.range(0); i--; )
|
for (int i = state.range(0); i--; )
|
||||||
q.push(v);
|
q.push(v);
|
||||||
for (int e = state.range(0); e--; )
|
for (int e = state.range(0); e--; )
|
||||||
@ -135,15 +136,15 @@ void BM_test(benchmark::State& state) {
|
|||||||
BENCHMARK(BM_test)->MinTime(2.0); // Run for at least 2 seconds.
|
BENCHMARK(BM_test)->MinTime(2.0); // Run for at least 2 seconds.
|
||||||
|
|
||||||
In a multithreaded test, it is guaranteed that none of the threads will start
|
In a multithreaded test, it is guaranteed that none of the threads will start
|
||||||
until all have called KeepRunning, and all will have finished before KeepRunning
|
until all have reached the loop start, and all will have finished before any
|
||||||
returns false. As such, any global setup or teardown you want to do can be
|
thread exits the loop body. As such, any global setup or teardown you want to
|
||||||
wrapped in a check against the thread index:
|
do can be wrapped in a check against the thread index:
|
||||||
|
|
||||||
static void BM_MultiThreaded(benchmark::State& state) {
|
static void BM_MultiThreaded(benchmark::State& state) {
|
||||||
if (state.thread_index == 0) {
|
if (state.thread_index == 0) {
|
||||||
// Setup code here.
|
// Setup code here.
|
||||||
}
|
}
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
// Run the test as normal.
|
// Run the test as normal.
|
||||||
}
|
}
|
||||||
if (state.thread_index == 0) {
|
if (state.thread_index == 0) {
|
||||||
@ -164,12 +165,14 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
|
|||||||
#define BENCHMARK_BENCHMARK_H_
|
#define BENCHMARK_BENCHMARK_H_
|
||||||
|
|
||||||
|
|
||||||
#if __cplusplus >= 201103L
|
// The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer.
|
||||||
|
#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L)
|
||||||
#define BENCHMARK_HAS_CXX11
|
#define BENCHMARK_HAS_CXX11
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <iosfwd>
|
#include <iosfwd>
|
||||||
@ -237,7 +240,6 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
|
|||||||
#define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
#define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
namespace benchmark {
|
namespace benchmark {
|
||||||
class BenchmarkReporter;
|
class BenchmarkReporter;
|
||||||
|
|
||||||
@ -289,25 +291,32 @@ BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams();
|
|||||||
} // namespace internal
|
} // namespace internal
|
||||||
|
|
||||||
|
|
||||||
#if !defined(__GNUC__) || defined(__pnacl__) || defined(EMSCRIPTN)
|
#if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \
|
||||||
|
defined(__EMSCRIPTEN__)
|
||||||
# define BENCHMARK_HAS_NO_INLINE_ASSEMBLY
|
# define BENCHMARK_HAS_NO_INLINE_ASSEMBLY
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
// The DoNotOptimize(...) function can be used to prevent a value or
|
// The DoNotOptimize(...) function can be used to prevent a value or
|
||||||
// expression from being optimized away by the compiler. This function is
|
// expression from being optimized away by the compiler. This function is
|
||||||
// intended to add little to no overhead.
|
// intended to add little to no overhead.
|
||||||
// See: https://youtu.be/nXaxk27zwlk?t=2441
|
// See: https://youtu.be/nXaxk27zwlk?t=2441
|
||||||
#ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY
|
#ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY
|
||||||
template <class Tp>
|
template <class Tp>
|
||||||
inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
|
inline BENCHMARK_ALWAYS_INLINE
|
||||||
// Clang doesn't like the 'X' constraint on `value` and certain GCC versions
|
void DoNotOptimize(Tp const& value) {
|
||||||
// don't like the 'g' constraint. Attempt to placate them both.
|
asm volatile("" : : "r,m"(value) : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Tp>
|
||||||
|
inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) {
|
||||||
#if defined(__clang__)
|
#if defined(__clang__)
|
||||||
asm volatile("" : : "g"(value) : "memory");
|
asm volatile("" : "+r,m"(value) : : "memory");
|
||||||
#else
|
#else
|
||||||
asm volatile("" : : "i,r,m"(value) : "memory");
|
asm volatile("" : "+m,r"(value) : : "memory");
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
// Force the compiler to flush pending writes to global memory. Acts as an
|
// Force the compiler to flush pending writes to global memory. Acts as an
|
||||||
// effective read/write barrier
|
// effective read/write barrier
|
||||||
inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
|
inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
|
||||||
@ -376,7 +385,19 @@ enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda };
|
|||||||
|
|
||||||
// BigOFunc is passed to a benchmark in order to specify the asymptotic
|
// BigOFunc is passed to a benchmark in order to specify the asymptotic
|
||||||
// computational complexity for the benchmark.
|
// computational complexity for the benchmark.
|
||||||
typedef double(BigOFunc)(int);
|
typedef double(BigOFunc)(int64_t);
|
||||||
|
|
||||||
|
// StatisticsFunc is passed to a benchmark in order to compute some descriptive
|
||||||
|
// statistics over all the measurements of some type
|
||||||
|
typedef double(StatisticsFunc)(const std::vector<double>&);
|
||||||
|
|
||||||
|
struct Statistics {
|
||||||
|
std::string name_;
|
||||||
|
StatisticsFunc* compute_;
|
||||||
|
|
||||||
|
Statistics(std::string name, StatisticsFunc* compute)
|
||||||
|
: name_(name), compute_(compute) {}
|
||||||
|
};
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
class ThreadTimer;
|
class ThreadTimer;
|
||||||
@ -398,24 +419,40 @@ enum ReportMode
|
|||||||
// benchmark to use.
|
// benchmark to use.
|
||||||
class State {
|
class State {
|
||||||
public:
|
public:
|
||||||
|
struct StateIterator;
|
||||||
|
friend struct StateIterator;
|
||||||
|
|
||||||
|
// Returns iterators used to run each iteration of a benchmark using a
|
||||||
|
// C++11 ranged-based for loop. These functions should not be called directly.
|
||||||
|
//
|
||||||
|
// REQUIRES: The benchmark has not started running yet. Neither begin nor end
|
||||||
|
// have been called previously.
|
||||||
|
//
|
||||||
|
// NOTE: KeepRunning may not be used after calling either of these functions.
|
||||||
|
BENCHMARK_ALWAYS_INLINE StateIterator begin();
|
||||||
|
BENCHMARK_ALWAYS_INLINE StateIterator end();
|
||||||
|
|
||||||
// Returns true if the benchmark should continue through another iteration.
|
// Returns true if the benchmark should continue through another iteration.
|
||||||
// NOTE: A benchmark may not return from the test until KeepRunning() has
|
// NOTE: A benchmark may not return from the test until KeepRunning() has
|
||||||
// returned false.
|
// returned false.
|
||||||
bool KeepRunning() {
|
bool KeepRunning();
|
||||||
if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
|
|
||||||
StartKeepRunning();
|
// Returns true iff the benchmark should run n more iterations.
|
||||||
}
|
// REQUIRES: 'n' > 0.
|
||||||
bool const res = total_iterations_++ < max_iterations;
|
// NOTE: A benchmark must not return from the test until KeepRunningBatch()
|
||||||
if (BENCHMARK_BUILTIN_EXPECT(!res, false)) {
|
// has returned false.
|
||||||
FinishKeepRunning();
|
// NOTE: KeepRunningBatch() may overshoot by up to 'n' iterations.
|
||||||
}
|
//
|
||||||
return res;
|
// Intended usage:
|
||||||
}
|
// while (state.KeepRunningBatch(1000)) {
|
||||||
|
// // process 1000 elements
|
||||||
|
// }
|
||||||
|
bool KeepRunningBatch(size_t n);
|
||||||
|
|
||||||
// REQUIRES: timer is running and 'SkipWithError(...)' has not been called
|
// REQUIRES: timer is running and 'SkipWithError(...)' has not been called
|
||||||
// by the current thread.
|
// by the current thread.
|
||||||
// Stop the benchmark timer. If not called, the timer will be
|
// Stop the benchmark timer. If not called, the timer will be
|
||||||
// automatically stopped after KeepRunning() returns false for the first time.
|
// automatically stopped after the last iteration of the benchmark loop.
|
||||||
//
|
//
|
||||||
// For threaded benchmarks the PauseTiming() function only pauses the timing
|
// For threaded benchmarks the PauseTiming() function only pauses the timing
|
||||||
// for the current thread.
|
// for the current thread.
|
||||||
@ -431,7 +468,8 @@ class State {
|
|||||||
// REQUIRES: timer is not running and 'SkipWithError(...)' has not been called
|
// REQUIRES: timer is not running and 'SkipWithError(...)' has not been called
|
||||||
// by the current thread.
|
// by the current thread.
|
||||||
// Start the benchmark timer. The timer is NOT running on entrance to the
|
// Start the benchmark timer. The timer is NOT running on entrance to the
|
||||||
// benchmark function. It begins running after the first call to KeepRunning()
|
// benchmark function. It begins running after control flow enters the
|
||||||
|
// benchmark loop.
|
||||||
//
|
//
|
||||||
// NOTE: PauseTiming()/ResumeTiming() are relatively
|
// NOTE: PauseTiming()/ResumeTiming() are relatively
|
||||||
// heavyweight, and so their use should generally be avoided
|
// heavyweight, and so their use should generally be avoided
|
||||||
@ -440,9 +478,13 @@ class State {
|
|||||||
|
|
||||||
// REQUIRES: 'SkipWithError(...)' has not been called previously by the
|
// REQUIRES: 'SkipWithError(...)' has not been called previously by the
|
||||||
// current thread.
|
// current thread.
|
||||||
// Skip any future iterations of the 'KeepRunning()' loop in the current
|
// Report the benchmark as resulting in an error with the specified 'msg'.
|
||||||
// thread and report an error with the specified 'msg'. After this call
|
// After this call the user may explicitly 'return' from the benchmark.
|
||||||
// the user may explicitly 'return' from the benchmark.
|
//
|
||||||
|
// If the ranged-for style of benchmark loop is used, the user must explicitly
|
||||||
|
// break from the loop, otherwise all future iterations will be run.
|
||||||
|
// If the 'KeepRunning()' loop is used the current thread will automatically
|
||||||
|
// exit the loop at the end of the current iteration.
|
||||||
//
|
//
|
||||||
// For threaded benchmarks only the current thread stops executing and future
|
// For threaded benchmarks only the current thread stops executing and future
|
||||||
// calls to `KeepRunning()` will block until all threads have completed
|
// calls to `KeepRunning()` will block until all threads have completed
|
||||||
@ -455,7 +497,7 @@ class State {
|
|||||||
// responsibility to exit the scope as needed.
|
// responsibility to exit the scope as needed.
|
||||||
void SkipWithError(const char* msg);
|
void SkipWithError(const char* msg);
|
||||||
|
|
||||||
// REQUIRES: called exactly once per iteration of the KeepRunning loop.
|
// REQUIRES: called exactly once per iteration of the benchmarking loop.
|
||||||
// Set the manually measured time for this benchmark iteration, which
|
// Set the manually measured time for this benchmark iteration, which
|
||||||
// is used instead of automatically measured time if UseManualTime() was
|
// is used instead of automatically measured time if UseManualTime() was
|
||||||
// specified.
|
// specified.
|
||||||
@ -470,12 +512,12 @@ class State {
|
|||||||
// value > 0, the report is printed in MB/sec instead of nanoseconds
|
// value > 0, the report is printed in MB/sec instead of nanoseconds
|
||||||
// per iteration.
|
// per iteration.
|
||||||
//
|
//
|
||||||
// REQUIRES: a benchmark has exited its KeepRunning loop.
|
// REQUIRES: a benchmark has exited its benchmarking loop.
|
||||||
BENCHMARK_ALWAYS_INLINE
|
BENCHMARK_ALWAYS_INLINE
|
||||||
void SetBytesProcessed(size_t bytes) { bytes_processed_ = bytes; }
|
void SetBytesProcessed(int64_t bytes) { bytes_processed_ = bytes; }
|
||||||
|
|
||||||
BENCHMARK_ALWAYS_INLINE
|
BENCHMARK_ALWAYS_INLINE
|
||||||
size_t bytes_processed() const { return bytes_processed_; }
|
int64_t bytes_processed() const { return bytes_processed_; }
|
||||||
|
|
||||||
// If this routine is called with complexity_n > 0 and complexity report is
|
// If this routine is called with complexity_n > 0 and complexity report is
|
||||||
// requested for the
|
// requested for the
|
||||||
@ -483,22 +525,22 @@ class State {
|
|||||||
// and complexity_n will
|
// and complexity_n will
|
||||||
// represent the length of N.
|
// represent the length of N.
|
||||||
BENCHMARK_ALWAYS_INLINE
|
BENCHMARK_ALWAYS_INLINE
|
||||||
void SetComplexityN(int complexity_n) { complexity_n_ = complexity_n; }
|
void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; }
|
||||||
|
|
||||||
BENCHMARK_ALWAYS_INLINE
|
BENCHMARK_ALWAYS_INLINE
|
||||||
int complexity_length_n() { return complexity_n_; }
|
int64_t complexity_length_n() { return complexity_n_; }
|
||||||
|
|
||||||
// If this routine is called with items > 0, then an items/s
|
// If this routine is called with items > 0, then an items/s
|
||||||
// label is printed on the benchmark report line for the currently
|
// label is printed on the benchmark report line for the currently
|
||||||
// executing benchmark. It is typically called at the end of a processing
|
// executing benchmark. It is typically called at the end of a processing
|
||||||
// benchmark where a processing items/second output is desired.
|
// benchmark where a processing items/second output is desired.
|
||||||
//
|
//
|
||||||
// REQUIRES: a benchmark has exited its KeepRunning loop.
|
// REQUIRES: a benchmark has exited its benchmarking loop.
|
||||||
BENCHMARK_ALWAYS_INLINE
|
BENCHMARK_ALWAYS_INLINE
|
||||||
void SetItemsProcessed(size_t items) { items_processed_ = items; }
|
void SetItemsProcessed(int64_t items) { items_processed_ = items; }
|
||||||
|
|
||||||
BENCHMARK_ALWAYS_INLINE
|
BENCHMARK_ALWAYS_INLINE
|
||||||
size_t items_processed() const { return items_processed_; }
|
int64_t items_processed() const { return items_processed_; }
|
||||||
|
|
||||||
// If this routine is called, the specified label is printed at the
|
// If this routine is called, the specified label is printed at the
|
||||||
// end of the benchmark report line for the currently executing
|
// end of the benchmark report line for the currently executing
|
||||||
@ -506,12 +548,12 @@ class State {
|
|||||||
// static void BM_Compress(benchmark::State& state) {
|
// static void BM_Compress(benchmark::State& state) {
|
||||||
// ...
|
// ...
|
||||||
// double compress = input_size / output_size;
|
// double compress = input_size / output_size;
|
||||||
// state.SetLabel(StringPrintf("compress:%.1f%%", 100.0*compression));
|
// state.SetLabel(StrFormat("compress:%.1f%%", 100.0*compression));
|
||||||
// }
|
// }
|
||||||
// Produces output that looks like:
|
// Produces output that looks like:
|
||||||
// BM_Compress 50 50 14115038 compress:27.3%
|
// BM_Compress 50 50 14115038 compress:27.3%
|
||||||
//
|
//
|
||||||
// REQUIRES: a benchmark has exited its KeepRunning loop.
|
// REQUIRES: a benchmark has exited its benchmarking loop.
|
||||||
void SetLabel(const char* label);
|
void SetLabel(const char* label);
|
||||||
|
|
||||||
void BENCHMARK_ALWAYS_INLINE SetLabel(const std::string& str) {
|
void BENCHMARK_ALWAYS_INLINE SetLabel(const std::string& str) {
|
||||||
@ -520,34 +562,52 @@ class State {
|
|||||||
|
|
||||||
// Range arguments for this run. CHECKs if the argument has been set.
|
// Range arguments for this run. CHECKs if the argument has been set.
|
||||||
BENCHMARK_ALWAYS_INLINE
|
BENCHMARK_ALWAYS_INLINE
|
||||||
int range(std::size_t pos = 0) const {
|
int64_t range(std::size_t pos = 0) const {
|
||||||
assert(range_.size() > pos);
|
assert(range_.size() > pos);
|
||||||
return range_[pos];
|
return range_[pos];
|
||||||
}
|
}
|
||||||
|
|
||||||
BENCHMARK_DEPRECATED_MSG("use 'range(0)' instead")
|
BENCHMARK_DEPRECATED_MSG("use 'range(0)' instead")
|
||||||
int range_x() const { return range(0); }
|
int64_t range_x() const { return range(0); }
|
||||||
|
|
||||||
BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead")
|
BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead")
|
||||||
int range_y() const { return range(1); }
|
int64_t range_y() const { return range(1); }
|
||||||
|
|
||||||
BENCHMARK_ALWAYS_INLINE
|
BENCHMARK_ALWAYS_INLINE
|
||||||
size_t iterations() const { return total_iterations_; }
|
size_t iterations() const {
|
||||||
|
if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return max_iterations - total_iterations_ + batch_leftover_;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private: // items we expect on the first cache line (ie 64 bytes of the struct)
|
||||||
bool started_;
|
|
||||||
bool finished_;
|
// When total_iterations_ is 0, KeepRunning() and friends will return false.
|
||||||
|
// May be larger than max_iterations.
|
||||||
size_t total_iterations_;
|
size_t total_iterations_;
|
||||||
|
|
||||||
std::vector<int> range_;
|
// When using KeepRunningBatch(), batch_leftover_ holds the number of
|
||||||
|
// iterations beyond max_iters that were run. Used to track
|
||||||
|
// completed_iterations_ accurately.
|
||||||
|
size_t batch_leftover_;
|
||||||
|
|
||||||
size_t bytes_processed_;
|
public:
|
||||||
size_t items_processed_;
|
const size_t max_iterations;
|
||||||
|
|
||||||
int complexity_n_;
|
|
||||||
|
|
||||||
|
private:
|
||||||
|
bool started_;
|
||||||
|
bool finished_;
|
||||||
bool error_occurred_;
|
bool error_occurred_;
|
||||||
|
|
||||||
|
private: // items we don't need on the first cache line
|
||||||
|
std::vector<int64_t> range_;
|
||||||
|
|
||||||
|
int64_t bytes_processed_;
|
||||||
|
int64_t items_processed_;
|
||||||
|
|
||||||
|
int64_t complexity_n_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Container for user-defined counters.
|
// Container for user-defined counters.
|
||||||
UserCounters counters;
|
UserCounters counters;
|
||||||
@ -555,21 +615,110 @@ class State {
|
|||||||
const int thread_index;
|
const int thread_index;
|
||||||
// Number of threads concurrently executing the benchmark.
|
// Number of threads concurrently executing the benchmark.
|
||||||
const int threads;
|
const int threads;
|
||||||
const size_t max_iterations;
|
|
||||||
|
|
||||||
// TODO(EricWF) make me private
|
// TODO(EricWF) make me private
|
||||||
State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
|
State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
|
||||||
int n_threads, internal::ThreadTimer* timer,
|
int n_threads, internal::ThreadTimer* timer,
|
||||||
internal::ThreadManager* manager);
|
internal::ThreadManager* manager);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void StartKeepRunning();
|
void StartKeepRunning();
|
||||||
|
// Implementation of KeepRunning() and KeepRunningBatch().
|
||||||
|
// is_batch must be true unless n is 1.
|
||||||
|
bool KeepRunningInternal(size_t n, bool is_batch);
|
||||||
void FinishKeepRunning();
|
void FinishKeepRunning();
|
||||||
internal::ThreadTimer* timer_;
|
internal::ThreadTimer* timer_;
|
||||||
internal::ThreadManager* manager_;
|
internal::ThreadManager* manager_;
|
||||||
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(State);
|
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(State);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
inline BENCHMARK_ALWAYS_INLINE
|
||||||
|
bool State::KeepRunning() {
|
||||||
|
return KeepRunningInternal(1, /*is_batch=*/ false);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline BENCHMARK_ALWAYS_INLINE
|
||||||
|
bool State::KeepRunningBatch(size_t n) {
|
||||||
|
return KeepRunningInternal(n, /*is_batch=*/ true);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline BENCHMARK_ALWAYS_INLINE
|
||||||
|
bool State::KeepRunningInternal(size_t n, bool is_batch) {
|
||||||
|
// total_iterations_ is set to 0 by the constructor, and always set to a
|
||||||
|
// nonzero value by StartKepRunning().
|
||||||
|
assert(n > 0);
|
||||||
|
// n must be 1 unless is_batch is true.
|
||||||
|
assert(is_batch || n == 1);
|
||||||
|
if (BENCHMARK_BUILTIN_EXPECT(total_iterations_ >= n, true)) {
|
||||||
|
total_iterations_ -= n;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (!started_) {
|
||||||
|
StartKeepRunning();
|
||||||
|
if (!error_occurred_ && total_iterations_ >= n) {
|
||||||
|
total_iterations_-= n;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// For non-batch runs, total_iterations_ must be 0 by now.
|
||||||
|
if (is_batch && total_iterations_ != 0) {
|
||||||
|
batch_leftover_ = n - total_iterations_;
|
||||||
|
total_iterations_ = 0;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
FinishKeepRunning();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct State::StateIterator {
|
||||||
|
struct BENCHMARK_UNUSED Value {};
|
||||||
|
typedef std::forward_iterator_tag iterator_category;
|
||||||
|
typedef Value value_type;
|
||||||
|
typedef Value reference;
|
||||||
|
typedef Value pointer;
|
||||||
|
typedef std::ptrdiff_t difference_type;
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend class State;
|
||||||
|
BENCHMARK_ALWAYS_INLINE
|
||||||
|
StateIterator() : cached_(0), parent_() {}
|
||||||
|
|
||||||
|
BENCHMARK_ALWAYS_INLINE
|
||||||
|
explicit StateIterator(State* st)
|
||||||
|
: cached_(st->error_occurred_ ? 0 : st->max_iterations), parent_(st) {}
|
||||||
|
|
||||||
|
public:
|
||||||
|
BENCHMARK_ALWAYS_INLINE
|
||||||
|
Value operator*() const { return Value(); }
|
||||||
|
|
||||||
|
BENCHMARK_ALWAYS_INLINE
|
||||||
|
StateIterator& operator++() {
|
||||||
|
assert(cached_ > 0);
|
||||||
|
--cached_;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_ALWAYS_INLINE
|
||||||
|
bool operator!=(StateIterator const&) const {
|
||||||
|
if (BENCHMARK_BUILTIN_EXPECT(cached_ != 0, true)) return true;
|
||||||
|
parent_->FinishKeepRunning();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
size_t cached_;
|
||||||
|
State* const parent_;
|
||||||
|
};
|
||||||
|
|
||||||
|
inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::begin() {
|
||||||
|
return StateIterator(this);
|
||||||
|
}
|
||||||
|
inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::end() {
|
||||||
|
StartKeepRunning();
|
||||||
|
return StateIterator();
|
||||||
|
}
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
typedef void(Function)(State&);
|
typedef void(Function)(State&);
|
||||||
@ -590,7 +739,7 @@ class Benchmark {
|
|||||||
// Run this benchmark once with "x" as the extra argument passed
|
// Run this benchmark once with "x" as the extra argument passed
|
||||||
// to the function.
|
// to the function.
|
||||||
// REQUIRES: The function passed to the constructor must accept an arg1.
|
// REQUIRES: The function passed to the constructor must accept an arg1.
|
||||||
Benchmark* Arg(int x);
|
Benchmark* Arg(int64_t x);
|
||||||
|
|
||||||
// Run this benchmark with the given time unit for the generated output report
|
// Run this benchmark with the given time unit for the generated output report
|
||||||
Benchmark* Unit(TimeUnit unit);
|
Benchmark* Unit(TimeUnit unit);
|
||||||
@ -598,23 +747,23 @@ class Benchmark {
|
|||||||
// Run this benchmark once for a number of values picked from the
|
// Run this benchmark once for a number of values picked from the
|
||||||
// range [start..limit]. (start and limit are always picked.)
|
// range [start..limit]. (start and limit are always picked.)
|
||||||
// REQUIRES: The function passed to the constructor must accept an arg1.
|
// REQUIRES: The function passed to the constructor must accept an arg1.
|
||||||
Benchmark* Range(int start, int limit);
|
Benchmark* Range(int64_t start, int64_t limit);
|
||||||
|
|
||||||
// Run this benchmark once for all values in the range [start..limit] with
|
// Run this benchmark once for all values in the range [start..limit] with
|
||||||
// specific step
|
// specific step
|
||||||
// REQUIRES: The function passed to the constructor must accept an arg1.
|
// REQUIRES: The function passed to the constructor must accept an arg1.
|
||||||
Benchmark* DenseRange(int start, int limit, int step = 1);
|
Benchmark* DenseRange(int64_t start, int64_t limit, int step = 1);
|
||||||
|
|
||||||
// Run this benchmark once with "args" as the extra arguments passed
|
// Run this benchmark once with "args" as the extra arguments passed
|
||||||
// to the function.
|
// to the function.
|
||||||
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
|
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
|
||||||
Benchmark* Args(const std::vector<int>& args);
|
Benchmark* Args(const std::vector<int64_t>& args);
|
||||||
|
|
||||||
// Equivalent to Args({x, y})
|
// Equivalent to Args({x, y})
|
||||||
// NOTE: This is a legacy C++03 interface provided for compatibility only.
|
// NOTE: This is a legacy C++03 interface provided for compatibility only.
|
||||||
// New code should use 'Args'.
|
// New code should use 'Args'.
|
||||||
Benchmark* ArgPair(int x, int y) {
|
Benchmark* ArgPair(int64_t x, int64_t y) {
|
||||||
std::vector<int> args;
|
std::vector<int64_t> args;
|
||||||
args.push_back(x);
|
args.push_back(x);
|
||||||
args.push_back(y);
|
args.push_back(y);
|
||||||
return Args(args);
|
return Args(args);
|
||||||
@ -623,7 +772,7 @@ class Benchmark {
|
|||||||
// Run this benchmark once for a number of values picked from the
|
// Run this benchmark once for a number of values picked from the
|
||||||
// ranges [start..limit]. (starts and limits are always picked.)
|
// ranges [start..limit]. (starts and limits are always picked.)
|
||||||
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
|
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
|
||||||
Benchmark* Ranges(const std::vector<std::pair<int, int> >& ranges);
|
Benchmark* Ranges(const std::vector<std::pair<int64_t, int64_t> >& ranges);
|
||||||
|
|
||||||
// Equivalent to ArgNames({name})
|
// Equivalent to ArgNames({name})
|
||||||
Benchmark* ArgName(const std::string& name);
|
Benchmark* ArgName(const std::string& name);
|
||||||
@ -635,8 +784,8 @@ class Benchmark {
|
|||||||
// Equivalent to Ranges({{lo1, hi1}, {lo2, hi2}}).
|
// Equivalent to Ranges({{lo1, hi1}, {lo2, hi2}}).
|
||||||
// NOTE: This is a legacy C++03 interface provided for compatibility only.
|
// NOTE: This is a legacy C++03 interface provided for compatibility only.
|
||||||
// New code should use 'Ranges'.
|
// New code should use 'Ranges'.
|
||||||
Benchmark* RangePair(int lo1, int hi1, int lo2, int hi2) {
|
Benchmark* RangePair(int64_t lo1, int64_t hi1, int64_t lo2, int64_t hi2) {
|
||||||
std::vector<std::pair<int, int> > ranges;
|
std::vector<std::pair<int64_t, int64_t> > ranges;
|
||||||
ranges.push_back(std::make_pair(lo1, hi1));
|
ranges.push_back(std::make_pair(lo1, hi1));
|
||||||
ranges.push_back(std::make_pair(lo2, hi2));
|
ranges.push_back(std::make_pair(lo2, hi2));
|
||||||
return Ranges(ranges);
|
return Ranges(ranges);
|
||||||
@ -698,6 +847,9 @@ class Benchmark {
|
|||||||
// the asymptotic computational complexity will be shown on the output.
|
// the asymptotic computational complexity will be shown on the output.
|
||||||
Benchmark* Complexity(BigOFunc* complexity);
|
Benchmark* Complexity(BigOFunc* complexity);
|
||||||
|
|
||||||
|
// Add this statistics to be computed over all the values of benchmark run
|
||||||
|
Benchmark* ComputeStatistics(std::string name, StatisticsFunc* statistics);
|
||||||
|
|
||||||
// Support for running multiple copies of the same benchmark concurrently
|
// Support for running multiple copies of the same benchmark concurrently
|
||||||
// in multiple threads. This may be useful when measuring the scaling
|
// in multiple threads. This may be useful when measuring the scaling
|
||||||
// of some piece of code.
|
// of some piece of code.
|
||||||
@ -740,15 +892,13 @@ class Benchmark {
|
|||||||
|
|
||||||
int ArgsCnt() const;
|
int ArgsCnt() const;
|
||||||
|
|
||||||
static void AddRange(std::vector<int>* dst, int lo, int hi, int mult);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class BenchmarkFamilies;
|
friend class BenchmarkFamilies;
|
||||||
|
|
||||||
std::string name_;
|
std::string name_;
|
||||||
ReportMode report_mode_;
|
ReportMode report_mode_;
|
||||||
std::vector<std::string> arg_names_; // Args for all benchmark runs
|
std::vector<std::string> arg_names_; // Args for all benchmark runs
|
||||||
std::vector<std::vector<int> > args_; // Args for all benchmark runs
|
std::vector<std::vector<int64_t> > args_; // Args for all benchmark runs
|
||||||
TimeUnit time_unit_;
|
TimeUnit time_unit_;
|
||||||
int range_multiplier_;
|
int range_multiplier_;
|
||||||
double min_time_;
|
double min_time_;
|
||||||
@ -758,6 +908,7 @@ class Benchmark {
|
|||||||
bool use_manual_time_;
|
bool use_manual_time_;
|
||||||
BigO complexity_;
|
BigO complexity_;
|
||||||
BigOFunc* complexity_lambda_;
|
BigOFunc* complexity_lambda_;
|
||||||
|
std::vector<Statistics> statistics_;
|
||||||
std::vector<int> thread_counts_;
|
std::vector<int> thread_counts_;
|
||||||
|
|
||||||
Benchmark& operator=(Benchmark const&);
|
Benchmark& operator=(Benchmark const&);
|
||||||
@ -905,7 +1056,7 @@ class Fixture : public internal::Benchmark {
|
|||||||
#define BENCHMARK_RANGE2(n, l1, h1, l2, h2) \
|
#define BENCHMARK_RANGE2(n, l1, h1, l2, h2) \
|
||||||
BENCHMARK(n)->RangePair({{(l1), (h1)}, {(l2), (h2)}})
|
BENCHMARK(n)->RangePair({{(l1), (h1)}, {(l2), (h2)}})
|
||||||
|
|
||||||
#if __cplusplus >= 201103L
|
#ifdef BENCHMARK_HAS_CXX11
|
||||||
|
|
||||||
// Register a benchmark which invokes the function specified by `func`
|
// Register a benchmark which invokes the function specified by `func`
|
||||||
// with the additional arguments specified by `...`.
|
// with the additional arguments specified by `...`.
|
||||||
@ -925,7 +1076,7 @@ class Fixture : public internal::Benchmark {
|
|||||||
#func "/" #test_case_name, \
|
#func "/" #test_case_name, \
|
||||||
[](::benchmark::State& st) { func(st, __VA_ARGS__); })))
|
[](::benchmark::State& st) { func(st, __VA_ARGS__); })))
|
||||||
|
|
||||||
#endif // __cplusplus >= 11
|
#endif // BENCHMARK_HAS_CXX11
|
||||||
|
|
||||||
// This will register a benchmark for a templatized function. For example:
|
// This will register a benchmark for a templatized function. For example:
|
||||||
//
|
//
|
||||||
@ -946,7 +1097,7 @@ class Fixture : public internal::Benchmark {
|
|||||||
new ::benchmark::internal::FunctionBenchmark(#n "<" #a "," #b ">", \
|
new ::benchmark::internal::FunctionBenchmark(#n "<" #a "," #b ">", \
|
||||||
n<a, b>)))
|
n<a, b>)))
|
||||||
|
|
||||||
#if __cplusplus >= 201103L
|
#ifdef BENCHMARK_HAS_CXX11
|
||||||
#define BENCHMARK_TEMPLATE(n, ...) \
|
#define BENCHMARK_TEMPLATE(n, ...) \
|
||||||
BENCHMARK_PRIVATE_DECLARE(n) = \
|
BENCHMARK_PRIVATE_DECLARE(n) = \
|
||||||
(::benchmark::internal::RegisterBenchmarkInternal( \
|
(::benchmark::internal::RegisterBenchmarkInternal( \
|
||||||
@ -967,10 +1118,63 @@ class Fixture : public internal::Benchmark {
|
|||||||
virtual void BenchmarkCase(::benchmark::State&); \
|
virtual void BenchmarkCase(::benchmark::State&); \
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
|
||||||
|
class BaseClass##_##Method##_Benchmark : public BaseClass<a> { \
|
||||||
|
public: \
|
||||||
|
BaseClass##_##Method##_Benchmark() : BaseClass<a>() { \
|
||||||
|
this->SetName(#BaseClass"<" #a ">/" #Method); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
protected: \
|
||||||
|
virtual void BenchmarkCase(::benchmark::State&); \
|
||||||
|
};
|
||||||
|
|
||||||
|
#define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
|
||||||
|
class BaseClass##_##Method##_Benchmark : public BaseClass<a, b> { \
|
||||||
|
public: \
|
||||||
|
BaseClass##_##Method##_Benchmark() : BaseClass<a, b>() { \
|
||||||
|
this->SetName(#BaseClass"<" #a "," #b ">/" #Method); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
protected: \
|
||||||
|
virtual void BenchmarkCase(::benchmark::State&); \
|
||||||
|
};
|
||||||
|
|
||||||
|
#ifdef BENCHMARK_HAS_CXX11
|
||||||
|
#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, ...) \
|
||||||
|
class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \
|
||||||
|
public: \
|
||||||
|
BaseClass##_##Method##_Benchmark() : BaseClass<__VA_ARGS__>() { \
|
||||||
|
this->SetName(#BaseClass"<" #__VA_ARGS__ ">/" #Method); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
protected: \
|
||||||
|
virtual void BenchmarkCase(::benchmark::State&); \
|
||||||
|
};
|
||||||
|
#else
|
||||||
|
#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a)
|
||||||
|
#endif
|
||||||
|
|
||||||
#define BENCHMARK_DEFINE_F(BaseClass, Method) \
|
#define BENCHMARK_DEFINE_F(BaseClass, Method) \
|
||||||
BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
|
BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
|
||||||
void BaseClass##_##Method##_Benchmark::BenchmarkCase
|
void BaseClass##_##Method##_Benchmark::BenchmarkCase
|
||||||
|
|
||||||
|
#define BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) \
|
||||||
|
BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
|
||||||
|
void BaseClass##_##Method##_Benchmark::BenchmarkCase
|
||||||
|
|
||||||
|
#define BENCHMARK_TEMPLATE2_DEFINE_F(BaseClass, Method, a, b) \
|
||||||
|
BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
|
||||||
|
void BaseClass##_##Method##_Benchmark::BenchmarkCase
|
||||||
|
|
||||||
|
#ifdef BENCHMARK_HAS_CXX11
|
||||||
|
#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, ...) \
|
||||||
|
BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
|
||||||
|
void BaseClass##_##Method##_Benchmark::BenchmarkCase
|
||||||
|
#else
|
||||||
|
#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a)
|
||||||
|
#endif
|
||||||
|
|
||||||
#define BENCHMARK_REGISTER_F(BaseClass, Method) \
|
#define BENCHMARK_REGISTER_F(BaseClass, Method) \
|
||||||
BENCHMARK_PRIVATE_REGISTER_F(BaseClass##_##Method##_Benchmark)
|
BENCHMARK_PRIVATE_REGISTER_F(BaseClass##_##Method##_Benchmark)
|
||||||
|
|
||||||
@ -984,13 +1188,33 @@ class Fixture : public internal::Benchmark {
|
|||||||
BENCHMARK_REGISTER_F(BaseClass, Method); \
|
BENCHMARK_REGISTER_F(BaseClass, Method); \
|
||||||
void BaseClass##_##Method##_Benchmark::BenchmarkCase
|
void BaseClass##_##Method##_Benchmark::BenchmarkCase
|
||||||
|
|
||||||
|
#define BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) \
|
||||||
|
BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
|
||||||
|
BENCHMARK_REGISTER_F(BaseClass, Method); \
|
||||||
|
void BaseClass##_##Method##_Benchmark::BenchmarkCase
|
||||||
|
|
||||||
|
#define BENCHMARK_TEMPLATE2_F(BaseClass, Method, a, b) \
|
||||||
|
BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
|
||||||
|
BENCHMARK_REGISTER_F(BaseClass, Method); \
|
||||||
|
void BaseClass##_##Method##_Benchmark::BenchmarkCase
|
||||||
|
|
||||||
|
#ifdef BENCHMARK_HAS_CXX11
|
||||||
|
#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \
|
||||||
|
BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
|
||||||
|
BENCHMARK_REGISTER_F(BaseClass, Method); \
|
||||||
|
void BaseClass##_##Method##_Benchmark::BenchmarkCase
|
||||||
|
#else
|
||||||
|
#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_F(BaseClass, Method, a)
|
||||||
|
#endif
|
||||||
|
|
||||||
// Helper macro to create a main routine in a test that runs the benchmarks
|
// Helper macro to create a main routine in a test that runs the benchmarks
|
||||||
#define BENCHMARK_MAIN() \
|
#define BENCHMARK_MAIN() \
|
||||||
int main(int argc, char** argv) { \
|
int main(int argc, char** argv) { \
|
||||||
::benchmark::Initialize(&argc, argv); \
|
::benchmark::Initialize(&argc, argv); \
|
||||||
if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \
|
if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \
|
||||||
::benchmark::RunSpecifiedBenchmarks(); \
|
::benchmark::RunSpecifiedBenchmarks(); \
|
||||||
}
|
} \
|
||||||
|
int main(int, char**)
|
||||||
|
|
||||||
|
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
@ -998,6 +1222,26 @@ class Fixture : public internal::Benchmark {
|
|||||||
|
|
||||||
namespace benchmark {
|
namespace benchmark {
|
||||||
|
|
||||||
|
struct CPUInfo {
|
||||||
|
struct CacheInfo {
|
||||||
|
std::string type;
|
||||||
|
int level;
|
||||||
|
int size;
|
||||||
|
int num_sharing;
|
||||||
|
};
|
||||||
|
|
||||||
|
int num_cpus;
|
||||||
|
double cycles_per_second;
|
||||||
|
std::vector<CacheInfo> caches;
|
||||||
|
bool scaling_enabled;
|
||||||
|
|
||||||
|
static const CPUInfo& Get();
|
||||||
|
|
||||||
|
private:
|
||||||
|
CPUInfo();
|
||||||
|
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(CPUInfo);
|
||||||
|
};
|
||||||
|
|
||||||
// Interface for custom benchmark result printers.
|
// Interface for custom benchmark result printers.
|
||||||
// By default, benchmark reports are printed to stdout. However an application
|
// By default, benchmark reports are printed to stdout. However an application
|
||||||
// can control the destination of the reports by calling
|
// can control the destination of the reports by calling
|
||||||
@ -1006,12 +1250,11 @@ namespace benchmark {
|
|||||||
class BenchmarkReporter {
|
class BenchmarkReporter {
|
||||||
public:
|
public:
|
||||||
struct Context {
|
struct Context {
|
||||||
int num_cpus;
|
CPUInfo const& cpu_info;
|
||||||
double mhz_per_cpu;
|
|
||||||
bool cpu_scaling_enabled;
|
|
||||||
|
|
||||||
// The number of chars in the longest benchmark name.
|
// The number of chars in the longest benchmark name.
|
||||||
size_t name_field_width;
|
size_t name_field_width;
|
||||||
|
static const char *executable_name;
|
||||||
|
Context();
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Run {
|
struct Run {
|
||||||
@ -1063,7 +1306,10 @@ class BenchmarkReporter {
|
|||||||
// Keep track of arguments to compute asymptotic complexity
|
// Keep track of arguments to compute asymptotic complexity
|
||||||
BigO complexity;
|
BigO complexity;
|
||||||
BigOFunc* complexity_lambda;
|
BigOFunc* complexity_lambda;
|
||||||
int complexity_n;
|
int64_t complexity_n;
|
||||||
|
|
||||||
|
// what statistics to compute from the measurements
|
||||||
|
const std::vector<Statistics>* statistics;
|
||||||
|
|
||||||
// Inform print function whether the current run is a complexity report
|
// Inform print function whether the current run is a complexity report
|
||||||
bool report_big_o;
|
bool report_big_o;
|
||||||
|
27
vendor/github.com/google/benchmark/include/benchmark/benchmark_api.h
generated
vendored
27
vendor/github.com/google/benchmark/include/benchmark/benchmark_api.h
generated
vendored
@ -1,27 +0,0 @@
|
|||||||
// Copyright 2015 Google Inc. All rights reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
#ifndef BENCHMARK_BENCHMARK_API_H_
|
|
||||||
#define BENCHMARK_BENCHMARK_API_H_
|
|
||||||
|
|
||||||
#ifdef __DEPRECATED
|
|
||||||
# ifndef BENCHMARK_WARNING_MSG
|
|
||||||
# warning the benchmark_api.h header has been deprecated and will be removed, please include benchmark.h instead
|
|
||||||
# else
|
|
||||||
BENCHMARK_WARNING_MSG("the benchmark_api.h header has been deprecated and will be removed, please include benchmark.h instead")
|
|
||||||
# endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "benchmark.h" // For forward declaration of BenchmarkReporter
|
|
||||||
|
|
||||||
#endif // BENCHMARK_BENCHMARK_API_H_
|
|
16
vendor/github.com/google/benchmark/releasing.md
generated
vendored
Normal file
16
vendor/github.com/google/benchmark/releasing.md
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# How to release
|
||||||
|
|
||||||
|
* Make sure you're on master and synced to HEAD
|
||||||
|
* Ensure the project builds and tests run (sanity check only, obviously)
|
||||||
|
* `parallel -j0 exec ::: test/*_test` can help ensure everything at least
|
||||||
|
passes
|
||||||
|
* Prepare release notes
|
||||||
|
* `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of
|
||||||
|
commits between the last annotated tag and HEAD
|
||||||
|
* Pick the most interesting.
|
||||||
|
* Create a release through github's interface
|
||||||
|
* Note this will create a lightweight tag.
|
||||||
|
* Update this to an annotated tag:
|
||||||
|
* `git pull --tags`
|
||||||
|
* `git tag -a -f <tag> <tag>`
|
||||||
|
* `git push --force origin`
|
65
vendor/github.com/google/benchmark/src/CMakeLists.txt
generated
vendored
65
vendor/github.com/google/benchmark/src/CMakeLists.txt
generated
vendored
@ -11,6 +11,7 @@ file(GLOB
|
|||||||
*.cc
|
*.cc
|
||||||
${PROJECT_SOURCE_DIR}/include/benchmark/*.h
|
${PROJECT_SOURCE_DIR}/include/benchmark/*.h
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/*.h)
|
${CMAKE_CURRENT_SOURCE_DIR}/*.h)
|
||||||
|
list(FILTER SOURCE_FILES EXCLUDE REGEX "benchmark_main\\.cc")
|
||||||
|
|
||||||
add_library(benchmark ${SOURCE_FILES})
|
add_library(benchmark ${SOURCE_FILES})
|
||||||
set_target_properties(benchmark PROPERTIES
|
set_target_properties(benchmark PROPERTIES
|
||||||
@ -34,15 +35,34 @@ if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
|
|||||||
target_link_libraries(benchmark Shlwapi)
|
target_link_libraries(benchmark Shlwapi)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# We need extra libraries on Solaris
|
||||||
|
if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS")
|
||||||
|
target_link_libraries(benchmark kstat)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Benchmark main library
|
||||||
|
add_library(benchmark_main "benchmark_main.cc")
|
||||||
|
set_target_properties(benchmark_main PROPERTIES
|
||||||
|
OUTPUT_NAME "benchmark_main"
|
||||||
|
VERSION ${GENERIC_LIB_VERSION}
|
||||||
|
SOVERSION ${GENERIC_LIB_SOVERSION}
|
||||||
|
)
|
||||||
|
target_include_directories(benchmark PUBLIC
|
||||||
|
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include>
|
||||||
|
)
|
||||||
|
target_link_libraries(benchmark_main benchmark)
|
||||||
|
|
||||||
set(include_install_dir "include")
|
set(include_install_dir "include")
|
||||||
set(lib_install_dir "lib/")
|
set(lib_install_dir "lib/")
|
||||||
set(bin_install_dir "bin/")
|
set(bin_install_dir "bin/")
|
||||||
set(config_install_dir "lib/cmake/${PROJECT_NAME}")
|
set(config_install_dir "lib/cmake/${PROJECT_NAME}")
|
||||||
|
set(pkgconfig_install_dir "lib/pkgconfig")
|
||||||
|
|
||||||
set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
|
set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
|
||||||
|
|
||||||
set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake")
|
set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake")
|
||||||
set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake")
|
set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake")
|
||||||
|
set(pkg_config "${generated_dir}/${PROJECT_NAME}.pc")
|
||||||
set(targets_export_name "${PROJECT_NAME}Targets")
|
set(targets_export_name "${PROJECT_NAME}Targets")
|
||||||
|
|
||||||
set(namespace "${PROJECT_NAME}::")
|
set(namespace "${PROJECT_NAME}::")
|
||||||
@ -53,26 +73,33 @@ write_basic_package_version_file(
|
|||||||
)
|
)
|
||||||
|
|
||||||
configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY)
|
configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY)
|
||||||
|
configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark.pc.in" "${pkg_config}" @ONLY)
|
||||||
|
|
||||||
# Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable)
|
if (BENCHMARK_ENABLE_INSTALL)
|
||||||
install(
|
# Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable)
|
||||||
TARGETS benchmark
|
install(
|
||||||
EXPORT ${targets_export_name}
|
TARGETS benchmark benchmark_main
|
||||||
ARCHIVE DESTINATION ${lib_install_dir}
|
EXPORT ${targets_export_name}
|
||||||
LIBRARY DESTINATION ${lib_install_dir}
|
ARCHIVE DESTINATION ${lib_install_dir}
|
||||||
RUNTIME DESTINATION ${bin_install_dir}
|
LIBRARY DESTINATION ${lib_install_dir}
|
||||||
INCLUDES DESTINATION ${include_install_dir})
|
RUNTIME DESTINATION ${bin_install_dir}
|
||||||
|
INCLUDES DESTINATION ${include_install_dir})
|
||||||
|
|
||||||
install(
|
install(
|
||||||
DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark"
|
DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark"
|
||||||
DESTINATION ${include_install_dir}
|
DESTINATION ${include_install_dir}
|
||||||
FILES_MATCHING PATTERN "*.*h")
|
FILES_MATCHING PATTERN "*.*h")
|
||||||
|
|
||||||
install(
|
install(
|
||||||
FILES "${project_config}" "${version_config}"
|
FILES "${project_config}" "${version_config}"
|
||||||
DESTINATION "${config_install_dir}")
|
DESTINATION "${config_install_dir}")
|
||||||
|
|
||||||
install(
|
install(
|
||||||
EXPORT "${targets_export_name}"
|
FILES "${pkg_config}"
|
||||||
NAMESPACE "${namespace}"
|
DESTINATION "${pkgconfig_install_dir}")
|
||||||
DESTINATION "${config_install_dir}")
|
|
||||||
|
install(
|
||||||
|
EXPORT "${targets_export_name}"
|
||||||
|
NAMESPACE "${namespace}"
|
||||||
|
DESTINATION "${config_install_dir}")
|
||||||
|
endif()
|
||||||
|
193
vendor/github.com/google/benchmark/src/benchmark.cc
generated
vendored
193
vendor/github.com/google/benchmark/src/benchmark.cc
generated
vendored
@ -17,7 +17,9 @@
|
|||||||
#include "internal_macros.h"
|
#include "internal_macros.h"
|
||||||
|
|
||||||
#ifndef BENCHMARK_OS_WINDOWS
|
#ifndef BENCHMARK_OS_WINDOWS
|
||||||
|
#ifndef BENCHMARK_OS_FUCHSIA
|
||||||
#include <sys/resource.h>
|
#include <sys/resource.h>
|
||||||
|
#endif
|
||||||
#include <sys/time.h>
|
#include <sys/time.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#endif
|
#endif
|
||||||
@ -27,10 +29,10 @@
|
|||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <cstring>
|
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
#include "check.h"
|
#include "check.h"
|
||||||
@ -38,13 +40,14 @@
|
|||||||
#include "commandlineflags.h"
|
#include "commandlineflags.h"
|
||||||
#include "complexity.h"
|
#include "complexity.h"
|
||||||
#include "counter.h"
|
#include "counter.h"
|
||||||
|
#include "internal_macros.h"
|
||||||
#include "log.h"
|
#include "log.h"
|
||||||
#include "mutex.h"
|
#include "mutex.h"
|
||||||
#include "re.h"
|
#include "re.h"
|
||||||
#include "stat.h"
|
#include "statistics.h"
|
||||||
#include "string_util.h"
|
#include "string_util.h"
|
||||||
#include "sysinfo.h"
|
#include "thread_manager.h"
|
||||||
#include "timers.h"
|
#include "thread_timer.h"
|
||||||
|
|
||||||
DEFINE_bool(benchmark_list_tests, false,
|
DEFINE_bool(benchmark_list_tests, false,
|
||||||
"Print a list of benchmarks. This option overrides all other "
|
"Print a list of benchmarks. This option overrides all other "
|
||||||
@ -82,7 +85,7 @@ DEFINE_string(benchmark_out_format, "json",
|
|||||||
"The format to use for file output. Valid values are "
|
"The format to use for file output. Valid values are "
|
||||||
"'console', 'json', or 'csv'.");
|
"'console', 'json', or 'csv'.");
|
||||||
|
|
||||||
DEFINE_string(benchmark_out, "", "The file to write additonal output to");
|
DEFINE_string(benchmark_out, "", "The file to write additional output to");
|
||||||
|
|
||||||
DEFINE_string(benchmark_color, "auto",
|
DEFINE_string(benchmark_color, "auto",
|
||||||
"Whether to use colors in the output. Valid values: "
|
"Whether to use colors in the output. Valid values: "
|
||||||
@ -99,130 +102,20 @@ DEFINE_bool(benchmark_counters_tabular, false,
|
|||||||
DEFINE_int32(v, 0, "The level of verbose logging to output");
|
DEFINE_int32(v, 0, "The level of verbose logging to output");
|
||||||
|
|
||||||
namespace benchmark {
|
namespace benchmark {
|
||||||
namespace internal {
|
|
||||||
|
|
||||||
void UseCharPointer(char const volatile*) {}
|
|
||||||
|
|
||||||
} // end namespace internal
|
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
static const size_t kMaxIterations = 1000000000;
|
static const size_t kMaxIterations = 1000000000;
|
||||||
|
|
||||||
} // end namespace
|
} // end namespace
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
class ThreadManager {
|
void UseCharPointer(char const volatile*) {}
|
||||||
public:
|
|
||||||
ThreadManager(int num_threads)
|
|
||||||
: alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
|
|
||||||
|
|
||||||
Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
|
|
||||||
return benchmark_mutex_;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
|
|
||||||
return start_stop_barrier_.wait();
|
|
||||||
}
|
|
||||||
|
|
||||||
void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
|
|
||||||
start_stop_barrier_.removeThread();
|
|
||||||
if (--alive_threads_ == 0) {
|
|
||||||
MutexLock lock(end_cond_mutex_);
|
|
||||||
end_condition_.notify_all();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
|
|
||||||
MutexLock lock(end_cond_mutex_);
|
|
||||||
end_condition_.wait(lock.native_handle(),
|
|
||||||
[this]() { return alive_threads_ == 0; });
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
struct Result {
|
|
||||||
double real_time_used = 0;
|
|
||||||
double cpu_time_used = 0;
|
|
||||||
double manual_time_used = 0;
|
|
||||||
int64_t bytes_processed = 0;
|
|
||||||
int64_t items_processed = 0;
|
|
||||||
int complexity_n = 0;
|
|
||||||
std::string report_label_;
|
|
||||||
std::string error_message_;
|
|
||||||
bool has_error_ = false;
|
|
||||||
UserCounters counters;
|
|
||||||
};
|
|
||||||
GUARDED_BY(GetBenchmarkMutex()) Result results;
|
|
||||||
|
|
||||||
private:
|
|
||||||
mutable Mutex benchmark_mutex_;
|
|
||||||
std::atomic<int> alive_threads_;
|
|
||||||
Barrier start_stop_barrier_;
|
|
||||||
Mutex end_cond_mutex_;
|
|
||||||
Condition end_condition_;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Timer management class
|
|
||||||
class ThreadTimer {
|
|
||||||
public:
|
|
||||||
ThreadTimer() = default;
|
|
||||||
|
|
||||||
// Called by each thread
|
|
||||||
void StartTimer() {
|
|
||||||
running_ = true;
|
|
||||||
start_real_time_ = ChronoClockNow();
|
|
||||||
start_cpu_time_ = ThreadCPUUsage();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Called by each thread
|
|
||||||
void StopTimer() {
|
|
||||||
CHECK(running_);
|
|
||||||
running_ = false;
|
|
||||||
real_time_used_ += ChronoClockNow() - start_real_time_;
|
|
||||||
cpu_time_used_ += ThreadCPUUsage() - start_cpu_time_;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Called by each thread
|
|
||||||
void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
|
|
||||||
|
|
||||||
bool running() const { return running_; }
|
|
||||||
|
|
||||||
// REQUIRES: timer is not running
|
|
||||||
double real_time_used() {
|
|
||||||
CHECK(!running_);
|
|
||||||
return real_time_used_;
|
|
||||||
}
|
|
||||||
|
|
||||||
// REQUIRES: timer is not running
|
|
||||||
double cpu_time_used() {
|
|
||||||
CHECK(!running_);
|
|
||||||
return cpu_time_used_;
|
|
||||||
}
|
|
||||||
|
|
||||||
// REQUIRES: timer is not running
|
|
||||||
double manual_time_used() {
|
|
||||||
CHECK(!running_);
|
|
||||||
return manual_time_used_;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
bool running_ = false; // Is the timer running
|
|
||||||
double start_real_time_ = 0; // If running_
|
|
||||||
double start_cpu_time_ = 0; // If running_
|
|
||||||
|
|
||||||
// Accumulated time so far (does not contain current slice if running_)
|
|
||||||
double real_time_used_ = 0;
|
|
||||||
double cpu_time_used_ = 0;
|
|
||||||
// Manually set iteration time. User sets this with SetIterationTime(seconds).
|
|
||||||
double manual_time_used_ = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
BenchmarkReporter::Run CreateRunReport(
|
BenchmarkReporter::Run CreateRunReport(
|
||||||
const benchmark::internal::Benchmark::Instance& b,
|
const benchmark::internal::Benchmark::Instance& b,
|
||||||
const internal::ThreadManager::Result& results, size_t iters,
|
const internal::ThreadManager::Result& results,
|
||||||
double seconds) {
|
double seconds) {
|
||||||
// Create report about this benchmark run.
|
// Create report about this benchmark run.
|
||||||
BenchmarkReporter::Run report;
|
BenchmarkReporter::Run report;
|
||||||
@ -231,8 +124,8 @@ BenchmarkReporter::Run CreateRunReport(
|
|||||||
report.error_occurred = results.has_error_;
|
report.error_occurred = results.has_error_;
|
||||||
report.error_message = results.error_message_;
|
report.error_message = results.error_message_;
|
||||||
report.report_label = results.report_label_;
|
report.report_label = results.report_label_;
|
||||||
// Report the total iterations across all threads.
|
// This is the total iterations across all threads.
|
||||||
report.iterations = static_cast<int64_t>(iters) * b.threads;
|
report.iterations = results.iterations;
|
||||||
report.time_unit = b.time_unit;
|
report.time_unit = b.time_unit;
|
||||||
|
|
||||||
if (!report.error_occurred) {
|
if (!report.error_occurred) {
|
||||||
@ -256,6 +149,7 @@ BenchmarkReporter::Run CreateRunReport(
|
|||||||
report.complexity_n = results.complexity_n;
|
report.complexity_n = results.complexity_n;
|
||||||
report.complexity = b.complexity;
|
report.complexity = b.complexity;
|
||||||
report.complexity_lambda = b.complexity_lambda;
|
report.complexity_lambda = b.complexity_lambda;
|
||||||
|
report.statistics = b.statistics;
|
||||||
report.counters = results.counters;
|
report.counters = results.counters;
|
||||||
internal::Finish(&report.counters, seconds, b.threads);
|
internal::Finish(&report.counters, seconds, b.threads);
|
||||||
}
|
}
|
||||||
@ -270,11 +164,12 @@ void RunInThread(const benchmark::internal::Benchmark::Instance* b,
|
|||||||
internal::ThreadTimer timer;
|
internal::ThreadTimer timer;
|
||||||
State st(iters, b->arg, thread_id, b->threads, &timer, manager);
|
State st(iters, b->arg, thread_id, b->threads, &timer, manager);
|
||||||
b->benchmark->Run(st);
|
b->benchmark->Run(st);
|
||||||
CHECK(st.iterations() == st.max_iterations)
|
CHECK(st.iterations() >= st.max_iterations)
|
||||||
<< "Benchmark returned before State::KeepRunning() returned false!";
|
<< "Benchmark returned before State::KeepRunning() returned false!";
|
||||||
{
|
{
|
||||||
MutexLock l(manager->GetBenchmarkMutex());
|
MutexLock l(manager->GetBenchmarkMutex());
|
||||||
internal::ThreadManager::Result& results = manager->results;
|
internal::ThreadManager::Result& results = manager->results;
|
||||||
|
results.iterations += st.iterations();
|
||||||
results.cpu_time_used += timer.cpu_time_used();
|
results.cpu_time_used += timer.cpu_time_used();
|
||||||
results.real_time_used += timer.real_time_used();
|
results.real_time_used += timer.real_time_used();
|
||||||
results.manual_time_used += timer.manual_time_used();
|
results.manual_time_used += timer.manual_time_used();
|
||||||
@ -342,18 +237,17 @@ std::vector<BenchmarkReporter::Run> RunBenchmark(
|
|||||||
// Determine if this run should be reported; Either it has
|
// Determine if this run should be reported; Either it has
|
||||||
// run for a sufficient amount of time or because an error was reported.
|
// run for a sufficient amount of time or because an error was reported.
|
||||||
const bool should_report = repetition_num > 0
|
const bool should_report = repetition_num > 0
|
||||||
|| has_explicit_iteration_count // An exact iteration count was requested
|
|| has_explicit_iteration_count // An exact iteration count was requested
|
||||||
|| results.has_error_
|
|| results.has_error_
|
||||||
|| iters >= kMaxIterations
|
|| iters >= kMaxIterations // No chance to try again, we hit the limit.
|
||||||
|| seconds >= min_time // the elapsed time is large enough
|
|| seconds >= min_time // the elapsed time is large enough
|
||||||
// CPU time is specified but the elapsed real time greatly exceeds the
|
// CPU time is specified but the elapsed real time greatly exceeds the
|
||||||
// minimum time. Note that user provided timers are except from this
|
// minimum time. Note that user provided timers are except from this
|
||||||
// sanity check.
|
// sanity check.
|
||||||
|| ((results.real_time_used >= 5 * min_time) && !b.use_manual_time);
|
|| ((results.real_time_used >= 5 * min_time) && !b.use_manual_time);
|
||||||
|
|
||||||
if (should_report) {
|
if (should_report) {
|
||||||
BenchmarkReporter::Run report =
|
BenchmarkReporter::Run report = CreateRunReport(b, results, seconds);
|
||||||
CreateRunReport(b, results, iters, seconds);
|
|
||||||
if (!report.error_occurred && b.complexity != oNone)
|
if (!report.error_occurred && b.complexity != oNone)
|
||||||
complexity_reports->push_back(report);
|
complexity_reports->push_back(report);
|
||||||
reports.push_back(report);
|
reports.push_back(report);
|
||||||
@ -396,25 +290,44 @@ std::vector<BenchmarkReporter::Run> RunBenchmark(
|
|||||||
} // namespace
|
} // namespace
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
|
|
||||||
State::State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
|
State::State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
|
||||||
int n_threads, internal::ThreadTimer* timer,
|
int n_threads, internal::ThreadTimer* timer,
|
||||||
internal::ThreadManager* manager)
|
internal::ThreadManager* manager)
|
||||||
: started_(false),
|
: total_iterations_(0),
|
||||||
|
batch_leftover_(0),
|
||||||
|
max_iterations(max_iters),
|
||||||
|
started_(false),
|
||||||
finished_(false),
|
finished_(false),
|
||||||
total_iterations_(0),
|
error_occurred_(false),
|
||||||
range_(ranges),
|
range_(ranges),
|
||||||
bytes_processed_(0),
|
bytes_processed_(0),
|
||||||
items_processed_(0),
|
items_processed_(0),
|
||||||
complexity_n_(0),
|
complexity_n_(0),
|
||||||
error_occurred_(false),
|
|
||||||
counters(),
|
counters(),
|
||||||
thread_index(thread_i),
|
thread_index(thread_i),
|
||||||
threads(n_threads),
|
threads(n_threads),
|
||||||
max_iterations(max_iters),
|
|
||||||
timer_(timer),
|
timer_(timer),
|
||||||
manager_(manager) {
|
manager_(manager) {
|
||||||
CHECK(max_iterations != 0) << "At least one iteration must be run";
|
CHECK(max_iterations != 0) << "At least one iteration must be run";
|
||||||
CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
|
CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
|
||||||
|
|
||||||
|
// Note: The use of offsetof below is technically undefined until C++17
|
||||||
|
// because State is not a standard layout type. However, all compilers
|
||||||
|
// currently provide well-defined behavior as an extension (which is
|
||||||
|
// demonstrated since constexpr evaluation must diagnose all undefined
|
||||||
|
// behavior). However, GCC and Clang also warn about this use of offsetof,
|
||||||
|
// which must be suppressed.
|
||||||
|
#ifdef __GNUC__
|
||||||
|
#pragma GCC diagnostic push
|
||||||
|
#pragma GCC diagnostic ignored "-Winvalid-offsetof"
|
||||||
|
#endif
|
||||||
|
// Offset tests to ensure commonly accessed data is on the first cache line.
|
||||||
|
const int cache_line_size = 64;
|
||||||
|
static_assert(offsetof(State, error_occurred_) <=
|
||||||
|
(cache_line_size - sizeof(error_occurred_)), "");
|
||||||
|
#ifdef __GNUC__
|
||||||
|
#pragma GCC diagnostic pop
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void State::PauseTiming() {
|
void State::PauseTiming() {
|
||||||
@ -438,7 +351,7 @@ void State::SkipWithError(const char* msg) {
|
|||||||
manager_->results.has_error_ = true;
|
manager_->results.has_error_ = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
total_iterations_ = max_iterations;
|
total_iterations_ = 0;
|
||||||
if (timer_->running()) timer_->StopTimer();
|
if (timer_->running()) timer_->StopTimer();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -454,6 +367,7 @@ void State::SetLabel(const char* label) {
|
|||||||
void State::StartKeepRunning() {
|
void State::StartKeepRunning() {
|
||||||
CHECK(!started_ && !finished_);
|
CHECK(!started_ && !finished_);
|
||||||
started_ = true;
|
started_ = true;
|
||||||
|
total_iterations_ = error_occurred_ ? 0 : max_iterations;
|
||||||
manager_->StartStopBarrier();
|
manager_->StartStopBarrier();
|
||||||
if (!error_occurred_) ResumeTiming();
|
if (!error_occurred_) ResumeTiming();
|
||||||
}
|
}
|
||||||
@ -463,8 +377,8 @@ void State::FinishKeepRunning() {
|
|||||||
if (!error_occurred_) {
|
if (!error_occurred_) {
|
||||||
PauseTiming();
|
PauseTiming();
|
||||||
}
|
}
|
||||||
// Total iterations now is one greater than max iterations. Fix this.
|
// Total iterations has now wrapped around past 0. Fix this.
|
||||||
total_iterations_ = max_iterations;
|
total_iterations_ = 0;
|
||||||
finished_ = true;
|
finished_ = true;
|
||||||
manager_->StartStopBarrier();
|
manager_->StartStopBarrier();
|
||||||
}
|
}
|
||||||
@ -481,22 +395,22 @@ void RunBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
|
|||||||
// Determine the width of the name field using a minimum width of 10.
|
// Determine the width of the name field using a minimum width of 10.
|
||||||
bool has_repetitions = FLAGS_benchmark_repetitions > 1;
|
bool has_repetitions = FLAGS_benchmark_repetitions > 1;
|
||||||
size_t name_field_width = 10;
|
size_t name_field_width = 10;
|
||||||
|
size_t stat_field_width = 0;
|
||||||
for (const Benchmark::Instance& benchmark : benchmarks) {
|
for (const Benchmark::Instance& benchmark : benchmarks) {
|
||||||
name_field_width =
|
name_field_width =
|
||||||
std::max<size_t>(name_field_width, benchmark.name.size());
|
std::max<size_t>(name_field_width, benchmark.name.size());
|
||||||
has_repetitions |= benchmark.repetitions > 1;
|
has_repetitions |= benchmark.repetitions > 1;
|
||||||
|
|
||||||
|
for(const auto& Stat : *benchmark.statistics)
|
||||||
|
stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
|
||||||
}
|
}
|
||||||
if (has_repetitions) name_field_width += std::strlen("_stddev");
|
if (has_repetitions) name_field_width += 1 + stat_field_width;
|
||||||
|
|
||||||
// Print header here
|
// Print header here
|
||||||
BenchmarkReporter::Context context;
|
BenchmarkReporter::Context context;
|
||||||
context.num_cpus = NumCPUs();
|
|
||||||
context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f;
|
|
||||||
|
|
||||||
context.cpu_scaling_enabled = CpuScalingEnabled();
|
|
||||||
context.name_field_width = name_field_width;
|
context.name_field_width = name_field_width;
|
||||||
|
|
||||||
// Keep track of runing times of all instances of current benchmark
|
// Keep track of running times of all instances of current benchmark
|
||||||
std::vector<BenchmarkReporter::Run> complexity_reports;
|
std::vector<BenchmarkReporter::Run> complexity_reports;
|
||||||
|
|
||||||
// We flush streams after invoking reporter methods that write to them. This
|
// We flush streams after invoking reporter methods that write to them. This
|
||||||
@ -654,6 +568,7 @@ void PrintUsageAndExit() {
|
|||||||
|
|
||||||
void ParseCommandLineFlags(int* argc, char** argv) {
|
void ParseCommandLineFlags(int* argc, char** argv) {
|
||||||
using namespace benchmark;
|
using namespace benchmark;
|
||||||
|
BenchmarkReporter::Context::executable_name = argv[0];
|
||||||
for (int i = 1; i < *argc; ++i) {
|
for (int i = 1; i < *argc; ++i) {
|
||||||
if (ParseBoolFlag(argv[i], "benchmark_list_tests",
|
if (ParseBoolFlag(argv[i], "benchmark_list_tests",
|
||||||
&FLAGS_benchmark_list_tests) ||
|
&FLAGS_benchmark_list_tests) ||
|
||||||
|
3
vendor/github.com/google/benchmark/src/benchmark_api_internal.h
generated
vendored
3
vendor/github.com/google/benchmark/src/benchmark_api_internal.h
generated
vendored
@ -17,7 +17,7 @@ struct Benchmark::Instance {
|
|||||||
std::string name;
|
std::string name;
|
||||||
Benchmark* benchmark;
|
Benchmark* benchmark;
|
||||||
ReportMode report_mode;
|
ReportMode report_mode;
|
||||||
std::vector<int> arg;
|
std::vector<int64_t> arg;
|
||||||
TimeUnit time_unit;
|
TimeUnit time_unit;
|
||||||
int range_multiplier;
|
int range_multiplier;
|
||||||
bool use_real_time;
|
bool use_real_time;
|
||||||
@ -25,6 +25,7 @@ struct Benchmark::Instance {
|
|||||||
BigO complexity;
|
BigO complexity;
|
||||||
BigOFunc* complexity_lambda;
|
BigOFunc* complexity_lambda;
|
||||||
UserCounters counters;
|
UserCounters counters;
|
||||||
|
const std::vector<Statistics>* statistics;
|
||||||
bool last_benchmark_instance;
|
bool last_benchmark_instance;
|
||||||
int repetitions;
|
int repetitions;
|
||||||
double min_time;
|
double min_time;
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2015 Google Inc. All rights reserved.
|
// Copyright 2018 Google Inc. All rights reserved.
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
@ -11,17 +11,7 @@
|
|||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
#ifndef BENCHMARK_REPORTER_H_
|
|
||||||
#define BENCHMARK_REPORTER_H_
|
|
||||||
|
|
||||||
#ifdef __DEPRECATED
|
#include "benchmark/benchmark.h"
|
||||||
# ifndef BENCHMARK_WARNING_MSG
|
|
||||||
# warning the reporter.h header has been deprecated and will be removed, please include benchmark.h instead
|
|
||||||
# else
|
|
||||||
BENCHMARK_WARNING_MSG("the reporter.h header has been deprecated and will be removed, please include benchmark.h instead")
|
|
||||||
# endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "benchmark.h" // For forward declaration of BenchmarkReporter
|
BENCHMARK_MAIN();
|
||||||
|
|
||||||
#endif // BENCHMARK_REPORTER_H_
|
|
100
vendor/github.com/google/benchmark/src/benchmark_register.cc
generated
vendored
100
vendor/github.com/google/benchmark/src/benchmark_register.cc
generated
vendored
@ -12,12 +12,12 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
#include "benchmark/benchmark.h"
|
#include "benchmark_register.h"
|
||||||
#include "benchmark_api_internal.h"
|
|
||||||
#include "internal_macros.h"
|
|
||||||
|
|
||||||
#ifndef BENCHMARK_OS_WINDOWS
|
#ifndef BENCHMARK_OS_WINDOWS
|
||||||
|
#ifndef BENCHMARK_OS_FUCHSIA
|
||||||
#include <sys/resource.h>
|
#include <sys/resource.h>
|
||||||
|
#endif
|
||||||
#include <sys/time.h>
|
#include <sys/time.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#endif
|
#endif
|
||||||
@ -34,15 +34,17 @@
|
|||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
|
#include "benchmark/benchmark.h"
|
||||||
|
#include "benchmark_api_internal.h"
|
||||||
#include "check.h"
|
#include "check.h"
|
||||||
#include "commandlineflags.h"
|
#include "commandlineflags.h"
|
||||||
#include "complexity.h"
|
#include "complexity.h"
|
||||||
|
#include "internal_macros.h"
|
||||||
#include "log.h"
|
#include "log.h"
|
||||||
#include "mutex.h"
|
#include "mutex.h"
|
||||||
#include "re.h"
|
#include "re.h"
|
||||||
#include "stat.h"
|
#include "statistics.h"
|
||||||
#include "string_util.h"
|
#include "string_util.h"
|
||||||
#include "sysinfo.h"
|
|
||||||
#include "timers.h"
|
#include "timers.h"
|
||||||
|
|
||||||
namespace benchmark {
|
namespace benchmark {
|
||||||
@ -75,7 +77,7 @@ class BenchmarkFamilies {
|
|||||||
|
|
||||||
// Extract the list of benchmark instances that match the specified
|
// Extract the list of benchmark instances that match the specified
|
||||||
// regular expression.
|
// regular expression.
|
||||||
bool FindBenchmarks(const std::string& re,
|
bool FindBenchmarks(std::string re,
|
||||||
std::vector<Benchmark::Instance>* benchmarks,
|
std::vector<Benchmark::Instance>* benchmarks,
|
||||||
std::ostream* Err);
|
std::ostream* Err);
|
||||||
|
|
||||||
@ -105,13 +107,18 @@ void BenchmarkFamilies::ClearBenchmarks() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool BenchmarkFamilies::FindBenchmarks(
|
bool BenchmarkFamilies::FindBenchmarks(
|
||||||
const std::string& spec, std::vector<Benchmark::Instance>* benchmarks,
|
std::string spec, std::vector<Benchmark::Instance>* benchmarks,
|
||||||
std::ostream* ErrStream) {
|
std::ostream* ErrStream) {
|
||||||
CHECK(ErrStream);
|
CHECK(ErrStream);
|
||||||
auto& Err = *ErrStream;
|
auto& Err = *ErrStream;
|
||||||
// Make regular expression out of command-line flag
|
// Make regular expression out of command-line flag
|
||||||
std::string error_msg;
|
std::string error_msg;
|
||||||
Regex re;
|
Regex re;
|
||||||
|
bool isNegativeFilter = false;
|
||||||
|
if(spec[0] == '-') {
|
||||||
|
spec.replace(0, 1, "");
|
||||||
|
isNegativeFilter = true;
|
||||||
|
}
|
||||||
if (!re.Init(spec, &error_msg)) {
|
if (!re.Init(spec, &error_msg)) {
|
||||||
Err << "Could not compile benchmark re: " << error_msg << std::endl;
|
Err << "Could not compile benchmark re: " << error_msg << std::endl;
|
||||||
return false;
|
return false;
|
||||||
@ -159,6 +166,7 @@ bool BenchmarkFamilies::FindBenchmarks(
|
|||||||
instance.use_manual_time = family->use_manual_time_;
|
instance.use_manual_time = family->use_manual_time_;
|
||||||
instance.complexity = family->complexity_;
|
instance.complexity = family->complexity_;
|
||||||
instance.complexity_lambda = family->complexity_lambda_;
|
instance.complexity_lambda = family->complexity_lambda_;
|
||||||
|
instance.statistics = &family->statistics_;
|
||||||
instance.threads = num_threads;
|
instance.threads = num_threads;
|
||||||
|
|
||||||
// Add arguments to instance name
|
// Add arguments to instance name
|
||||||
@ -170,20 +178,20 @@ bool BenchmarkFamilies::FindBenchmarks(
|
|||||||
const auto& arg_name = family->arg_names_[arg_i];
|
const auto& arg_name = family->arg_names_[arg_i];
|
||||||
if (!arg_name.empty()) {
|
if (!arg_name.empty()) {
|
||||||
instance.name +=
|
instance.name +=
|
||||||
StringPrintF("%s:", family->arg_names_[arg_i].c_str());
|
StrFormat("%s:", family->arg_names_[arg_i].c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
instance.name += StringPrintF("%d", arg);
|
instance.name += StrFormat("%d", arg);
|
||||||
++arg_i;
|
++arg_i;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!IsZero(family->min_time_))
|
if (!IsZero(family->min_time_))
|
||||||
instance.name += StringPrintF("/min_time:%0.3f", family->min_time_);
|
instance.name += StrFormat("/min_time:%0.3f", family->min_time_);
|
||||||
if (family->iterations_ != 0)
|
if (family->iterations_ != 0)
|
||||||
instance.name += StringPrintF("/iterations:%d", family->iterations_);
|
instance.name += StrFormat("/iterations:%d", family->iterations_);
|
||||||
if (family->repetitions_ != 0)
|
if (family->repetitions_ != 0)
|
||||||
instance.name += StringPrintF("/repeats:%d", family->repetitions_);
|
instance.name += StrFormat("/repeats:%d", family->repetitions_);
|
||||||
|
|
||||||
if (family->use_manual_time_) {
|
if (family->use_manual_time_) {
|
||||||
instance.name += "/manual_time";
|
instance.name += "/manual_time";
|
||||||
@ -193,10 +201,11 @@ bool BenchmarkFamilies::FindBenchmarks(
|
|||||||
|
|
||||||
// Add the number of threads used to the name
|
// Add the number of threads used to the name
|
||||||
if (!family->thread_counts_.empty()) {
|
if (!family->thread_counts_.empty()) {
|
||||||
instance.name += StringPrintF("/threads:%d", instance.threads);
|
instance.name += StrFormat("/threads:%d", instance.threads);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (re.Match(instance.name)) {
|
if ((re.Match(instance.name) && !isNegativeFilter) ||
|
||||||
|
(!re.Match(instance.name) && isNegativeFilter)) {
|
||||||
instance.last_benchmark_instance = (&args == &family->args_.back());
|
instance.last_benchmark_instance = (&args == &family->args_.back());
|
||||||
benchmarks->push_back(std::move(instance));
|
benchmarks->push_back(std::move(instance));
|
||||||
}
|
}
|
||||||
@ -236,34 +245,15 @@ Benchmark::Benchmark(const char* name)
|
|||||||
use_real_time_(false),
|
use_real_time_(false),
|
||||||
use_manual_time_(false),
|
use_manual_time_(false),
|
||||||
complexity_(oNone),
|
complexity_(oNone),
|
||||||
complexity_lambda_(nullptr) {}
|
complexity_lambda_(nullptr) {
|
||||||
|
ComputeStatistics("mean", StatisticsMean);
|
||||||
|
ComputeStatistics("median", StatisticsMedian);
|
||||||
|
ComputeStatistics("stddev", StatisticsStdDev);
|
||||||
|
}
|
||||||
|
|
||||||
Benchmark::~Benchmark() {}
|
Benchmark::~Benchmark() {}
|
||||||
|
|
||||||
void Benchmark::AddRange(std::vector<int>* dst, int lo, int hi, int mult) {
|
Benchmark* Benchmark::Arg(int64_t x) {
|
||||||
CHECK_GE(lo, 0);
|
|
||||||
CHECK_GE(hi, lo);
|
|
||||||
CHECK_GE(mult, 2);
|
|
||||||
|
|
||||||
// Add "lo"
|
|
||||||
dst->push_back(lo);
|
|
||||||
|
|
||||||
static const int kint32max = std::numeric_limits<int32_t>::max();
|
|
||||||
|
|
||||||
// Now space out the benchmarks in multiples of "mult"
|
|
||||||
for (int32_t i = 1; i < kint32max / mult; i *= mult) {
|
|
||||||
if (i >= hi) break;
|
|
||||||
if (i > lo) {
|
|
||||||
dst->push_back(i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Add "hi" (if different from "lo")
|
|
||||||
if (hi != lo) {
|
|
||||||
dst->push_back(hi);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Benchmark* Benchmark::Arg(int x) {
|
|
||||||
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
|
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
|
||||||
args_.push_back({x});
|
args_.push_back({x});
|
||||||
return this;
|
return this;
|
||||||
@ -274,20 +264,21 @@ Benchmark* Benchmark::Unit(TimeUnit unit) {
|
|||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
Benchmark* Benchmark::Range(int start, int limit) {
|
Benchmark* Benchmark::Range(int64_t start, int64_t limit) {
|
||||||
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
|
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
|
||||||
std::vector<int> arglist;
|
std::vector<int64_t> arglist;
|
||||||
AddRange(&arglist, start, limit, range_multiplier_);
|
AddRange(&arglist, start, limit, range_multiplier_);
|
||||||
|
|
||||||
for (int i : arglist) {
|
for (int64_t i : arglist) {
|
||||||
args_.push_back({i});
|
args_.push_back({i});
|
||||||
}
|
}
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
Benchmark* Benchmark::Ranges(const std::vector<std::pair<int, int>>& ranges) {
|
Benchmark* Benchmark::Ranges(
|
||||||
|
const std::vector<std::pair<int64_t, int64_t>>& ranges) {
|
||||||
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
|
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
|
||||||
std::vector<std::vector<int>> arglists(ranges.size());
|
std::vector<std::vector<int64_t>> arglists(ranges.size());
|
||||||
std::size_t total = 1;
|
std::size_t total = 1;
|
||||||
for (std::size_t i = 0; i < ranges.size(); i++) {
|
for (std::size_t i = 0; i < ranges.size(); i++) {
|
||||||
AddRange(&arglists[i], ranges[i].first, ranges[i].second,
|
AddRange(&arglists[i], ranges[i].first, ranges[i].second,
|
||||||
@ -298,7 +289,7 @@ Benchmark* Benchmark::Ranges(const std::vector<std::pair<int, int>>& ranges) {
|
|||||||
std::vector<std::size_t> ctr(arglists.size(), 0);
|
std::vector<std::size_t> ctr(arglists.size(), 0);
|
||||||
|
|
||||||
for (std::size_t i = 0; i < total; i++) {
|
for (std::size_t i = 0; i < total; i++) {
|
||||||
std::vector<int> tmp;
|
std::vector<int64_t> tmp;
|
||||||
tmp.reserve(arglists.size());
|
tmp.reserve(arglists.size());
|
||||||
|
|
||||||
for (std::size_t j = 0; j < arglists.size(); j++) {
|
for (std::size_t j = 0; j < arglists.size(); j++) {
|
||||||
@ -330,17 +321,17 @@ Benchmark* Benchmark::ArgNames(const std::vector<std::string>& names) {
|
|||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
Benchmark* Benchmark::DenseRange(int start, int limit, int step) {
|
Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) {
|
||||||
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
|
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
|
||||||
CHECK_GE(start, 0);
|
CHECK_GE(start, 0);
|
||||||
CHECK_LE(start, limit);
|
CHECK_LE(start, limit);
|
||||||
for (int arg = start; arg <= limit; arg += step) {
|
for (int64_t arg = start; arg <= limit; arg += step) {
|
||||||
args_.push_back({arg});
|
args_.push_back({arg});
|
||||||
}
|
}
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
Benchmark* Benchmark::Args(const std::vector<int>& args) {
|
Benchmark* Benchmark::Args(const std::vector<int64_t>& args) {
|
||||||
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
|
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
|
||||||
args_.push_back(args);
|
args_.push_back(args);
|
||||||
return this;
|
return this;
|
||||||
@ -357,7 +348,6 @@ Benchmark* Benchmark::RangeMultiplier(int multiplier) {
|
|||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Benchmark* Benchmark::MinTime(double t) {
|
Benchmark* Benchmark::MinTime(double t) {
|
||||||
CHECK(t > 0.0);
|
CHECK(t > 0.0);
|
||||||
CHECK(iterations_ == 0);
|
CHECK(iterations_ == 0);
|
||||||
@ -365,7 +355,6 @@ Benchmark* Benchmark::MinTime(double t) {
|
|||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Benchmark* Benchmark::Iterations(size_t n) {
|
Benchmark* Benchmark::Iterations(size_t n) {
|
||||||
CHECK(n > 0);
|
CHECK(n > 0);
|
||||||
CHECK(IsZero(min_time_));
|
CHECK(IsZero(min_time_));
|
||||||
@ -409,6 +398,12 @@ Benchmark* Benchmark::Complexity(BigOFunc* complexity) {
|
|||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Benchmark* Benchmark::ComputeStatistics(std::string name,
|
||||||
|
StatisticsFunc* statistics) {
|
||||||
|
statistics_.emplace_back(name, statistics);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
Benchmark* Benchmark::Threads(int t) {
|
Benchmark* Benchmark::Threads(int t) {
|
||||||
CHECK_GT(t, 0);
|
CHECK_GT(t, 0);
|
||||||
thread_counts_.push_back(t);
|
thread_counts_.push_back(t);
|
||||||
@ -437,8 +432,7 @@ Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads,
|
|||||||
}
|
}
|
||||||
|
|
||||||
Benchmark* Benchmark::ThreadPerCpu() {
|
Benchmark* Benchmark::ThreadPerCpu() {
|
||||||
static int num_cpus = NumCPUs();
|
thread_counts_.push_back(CPUInfo::Get().num_cpus);
|
||||||
thread_counts_.push_back(num_cpus);
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
33
vendor/github.com/google/benchmark/src/benchmark_register.h
generated
vendored
Normal file
33
vendor/github.com/google/benchmark/src/benchmark_register.h
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
#ifndef BENCHMARK_REGISTER_H
|
||||||
|
#define BENCHMARK_REGISTER_H
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "check.h"
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
|
||||||
|
CHECK_GE(lo, 0);
|
||||||
|
CHECK_GE(hi, lo);
|
||||||
|
CHECK_GE(mult, 2);
|
||||||
|
|
||||||
|
// Add "lo"
|
||||||
|
dst->push_back(lo);
|
||||||
|
|
||||||
|
static const T kmax = std::numeric_limits<T>::max();
|
||||||
|
|
||||||
|
// Now space out the benchmarks in multiples of "mult"
|
||||||
|
for (T i = 1; i < kmax / mult; i *= mult) {
|
||||||
|
if (i >= hi) break;
|
||||||
|
if (i > lo) {
|
||||||
|
dst->push_back(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add "hi" (if different from "lo")
|
||||||
|
if (hi != lo) {
|
||||||
|
dst->push_back(hi);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // BENCHMARK_REGISTER_H
|
126
vendor/github.com/google/benchmark/src/complexity.cc
generated
vendored
126
vendor/github.com/google/benchmark/src/complexity.cc
generated
vendored
@ -21,7 +21,6 @@
|
|||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include "check.h"
|
#include "check.h"
|
||||||
#include "complexity.h"
|
#include "complexity.h"
|
||||||
#include "stat.h"
|
|
||||||
|
|
||||||
namespace benchmark {
|
namespace benchmark {
|
||||||
|
|
||||||
@ -29,18 +28,18 @@ namespace benchmark {
|
|||||||
BigOFunc* FittingCurve(BigO complexity) {
|
BigOFunc* FittingCurve(BigO complexity) {
|
||||||
switch (complexity) {
|
switch (complexity) {
|
||||||
case oN:
|
case oN:
|
||||||
return [](int n) -> double { return n; };
|
return [](int64_t n) -> double { return static_cast<double>(n); };
|
||||||
case oNSquared:
|
case oNSquared:
|
||||||
return [](int n) -> double { return std::pow(n, 2); };
|
return [](int64_t n) -> double { return std::pow(n, 2); };
|
||||||
case oNCubed:
|
case oNCubed:
|
||||||
return [](int n) -> double { return std::pow(n, 3); };
|
return [](int64_t n) -> double { return std::pow(n, 3); };
|
||||||
case oLogN:
|
case oLogN:
|
||||||
return [](int n) { return log2(n); };
|
return [](int64_t n) { return log2(n); };
|
||||||
case oNLogN:
|
case oNLogN:
|
||||||
return [](int n) { return n * log2(n); };
|
return [](int64_t n) { return n * log2(n); };
|
||||||
case o1:
|
case o1:
|
||||||
default:
|
default:
|
||||||
return [](int) { return 1.0; };
|
return [](int64_t) { return 1.0; };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,15 +65,15 @@ std::string GetBigOString(BigO complexity) {
|
|||||||
|
|
||||||
// Find the coefficient for the high-order term in the running time, by
|
// Find the coefficient for the high-order term in the running time, by
|
||||||
// minimizing the sum of squares of relative error, for the fitting curve
|
// minimizing the sum of squares of relative error, for the fitting curve
|
||||||
// given by the lambda expresion.
|
// given by the lambda expression.
|
||||||
// - n : Vector containing the size of the benchmark tests.
|
// - n : Vector containing the size of the benchmark tests.
|
||||||
// - time : Vector containing the times for the benchmark tests.
|
// - time : Vector containing the times for the benchmark tests.
|
||||||
// - fitting_curve : lambda expresion (e.g. [](int n) {return n; };).
|
// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };).
|
||||||
|
|
||||||
// For a deeper explanation on the algorithm logic, look the README file at
|
// For a deeper explanation on the algorithm logic, look the README file at
|
||||||
// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
|
// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
|
||||||
|
|
||||||
LeastSq MinimalLeastSq(const std::vector<int>& n,
|
LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
|
||||||
const std::vector<double>& time,
|
const std::vector<double>& time,
|
||||||
BigOFunc* fitting_curve) {
|
BigOFunc* fitting_curve) {
|
||||||
double sigma_gn = 0.0;
|
double sigma_gn = 0.0;
|
||||||
@ -118,7 +117,7 @@ LeastSq MinimalLeastSq(const std::vector<int>& n,
|
|||||||
// - complexity : If different than oAuto, the fitting curve will stick to
|
// - complexity : If different than oAuto, the fitting curve will stick to
|
||||||
// this one. If it is oAuto, it will be calculated the best
|
// this one. If it is oAuto, it will be calculated the best
|
||||||
// fitting curve.
|
// fitting curve.
|
||||||
LeastSq MinimalLeastSq(const std::vector<int>& n,
|
LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
|
||||||
const std::vector<double>& time, const BigO complexity) {
|
const std::vector<double>& time, const BigO complexity) {
|
||||||
CHECK_EQ(n.size(), time.size());
|
CHECK_EQ(n.size(), time.size());
|
||||||
CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
|
CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
|
||||||
@ -150,109 +149,6 @@ LeastSq MinimalLeastSq(const std::vector<int>& n,
|
|||||||
return best_fit;
|
return best_fit;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<BenchmarkReporter::Run> ComputeStats(
|
|
||||||
const std::vector<BenchmarkReporter::Run>& reports) {
|
|
||||||
typedef BenchmarkReporter::Run Run;
|
|
||||||
std::vector<Run> results;
|
|
||||||
|
|
||||||
auto error_count =
|
|
||||||
std::count_if(reports.begin(), reports.end(),
|
|
||||||
[](Run const& run) { return run.error_occurred; });
|
|
||||||
|
|
||||||
if (reports.size() - error_count < 2) {
|
|
||||||
// We don't report aggregated data if there was a single run.
|
|
||||||
return results;
|
|
||||||
}
|
|
||||||
// Accumulators.
|
|
||||||
Stat1_d real_accumulated_time_stat;
|
|
||||||
Stat1_d cpu_accumulated_time_stat;
|
|
||||||
Stat1_d bytes_per_second_stat;
|
|
||||||
Stat1_d items_per_second_stat;
|
|
||||||
// All repetitions should be run with the same number of iterations so we
|
|
||||||
// can take this information from the first benchmark.
|
|
||||||
int64_t const run_iterations = reports.front().iterations;
|
|
||||||
// create stats for user counters
|
|
||||||
struct CounterStat {
|
|
||||||
Counter c;
|
|
||||||
Stat1_d s;
|
|
||||||
};
|
|
||||||
std::map< std::string, CounterStat > counter_stats;
|
|
||||||
for(Run const& r : reports) {
|
|
||||||
for(auto const& cnt : r.counters) {
|
|
||||||
auto it = counter_stats.find(cnt.first);
|
|
||||||
if(it == counter_stats.end()) {
|
|
||||||
counter_stats.insert({cnt.first, {cnt.second, Stat1_d{}}});
|
|
||||||
} else {
|
|
||||||
CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Populate the accumulators.
|
|
||||||
for (Run const& run : reports) {
|
|
||||||
CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
|
|
||||||
CHECK_EQ(run_iterations, run.iterations);
|
|
||||||
if (run.error_occurred) continue;
|
|
||||||
real_accumulated_time_stat +=
|
|
||||||
Stat1_d(run.real_accumulated_time / run.iterations);
|
|
||||||
cpu_accumulated_time_stat +=
|
|
||||||
Stat1_d(run.cpu_accumulated_time / run.iterations);
|
|
||||||
items_per_second_stat += Stat1_d(run.items_per_second);
|
|
||||||
bytes_per_second_stat += Stat1_d(run.bytes_per_second);
|
|
||||||
// user counters
|
|
||||||
for(auto const& cnt : run.counters) {
|
|
||||||
auto it = counter_stats.find(cnt.first);
|
|
||||||
CHECK_NE(it, counter_stats.end());
|
|
||||||
it->second.s += Stat1_d(cnt.second);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the data from the accumulator to BenchmarkReporter::Run's.
|
|
||||||
Run mean_data;
|
|
||||||
mean_data.benchmark_name = reports[0].benchmark_name + "_mean";
|
|
||||||
mean_data.iterations = run_iterations;
|
|
||||||
mean_data.real_accumulated_time =
|
|
||||||
real_accumulated_time_stat.Mean() * run_iterations;
|
|
||||||
mean_data.cpu_accumulated_time =
|
|
||||||
cpu_accumulated_time_stat.Mean() * run_iterations;
|
|
||||||
mean_data.bytes_per_second = bytes_per_second_stat.Mean();
|
|
||||||
mean_data.items_per_second = items_per_second_stat.Mean();
|
|
||||||
mean_data.time_unit = reports[0].time_unit;
|
|
||||||
// user counters
|
|
||||||
for(auto const& kv : counter_stats) {
|
|
||||||
auto c = Counter(kv.second.s.Mean(), counter_stats[kv.first].c.flags);
|
|
||||||
mean_data.counters[kv.first] = c;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only add label to mean/stddev if it is same for all runs
|
|
||||||
mean_data.report_label = reports[0].report_label;
|
|
||||||
for (std::size_t i = 1; i < reports.size(); i++) {
|
|
||||||
if (reports[i].report_label != reports[0].report_label) {
|
|
||||||
mean_data.report_label = "";
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Run stddev_data;
|
|
||||||
stddev_data.benchmark_name = reports[0].benchmark_name + "_stddev";
|
|
||||||
stddev_data.report_label = mean_data.report_label;
|
|
||||||
stddev_data.iterations = 0;
|
|
||||||
stddev_data.real_accumulated_time = real_accumulated_time_stat.StdDev();
|
|
||||||
stddev_data.cpu_accumulated_time = cpu_accumulated_time_stat.StdDev();
|
|
||||||
stddev_data.bytes_per_second = bytes_per_second_stat.StdDev();
|
|
||||||
stddev_data.items_per_second = items_per_second_stat.StdDev();
|
|
||||||
stddev_data.time_unit = reports[0].time_unit;
|
|
||||||
// user counters
|
|
||||||
for(auto const& kv : counter_stats) {
|
|
||||||
auto c = Counter(kv.second.s.StdDev(), counter_stats[kv.first].c.flags);
|
|
||||||
stddev_data.counters[kv.first] = c;
|
|
||||||
}
|
|
||||||
|
|
||||||
results.push_back(mean_data);
|
|
||||||
results.push_back(stddev_data);
|
|
||||||
return results;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<BenchmarkReporter::Run> ComputeBigO(
|
std::vector<BenchmarkReporter::Run> ComputeBigO(
|
||||||
const std::vector<BenchmarkReporter::Run>& reports) {
|
const std::vector<BenchmarkReporter::Run>& reports) {
|
||||||
typedef BenchmarkReporter::Run Run;
|
typedef BenchmarkReporter::Run Run;
|
||||||
@ -261,7 +157,7 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
|
|||||||
if (reports.size() < 2) return results;
|
if (reports.size() < 2) return results;
|
||||||
|
|
||||||
// Accumulators.
|
// Accumulators.
|
||||||
std::vector<int> n;
|
std::vector<int64_t> n;
|
||||||
std::vector<double> real_time;
|
std::vector<double> real_time;
|
||||||
std::vector<double> cpu_time;
|
std::vector<double> cpu_time;
|
||||||
|
|
||||||
|
7
vendor/github.com/google/benchmark/src/complexity.h
generated
vendored
7
vendor/github.com/google/benchmark/src/complexity.h
generated
vendored
@ -25,12 +25,6 @@
|
|||||||
|
|
||||||
namespace benchmark {
|
namespace benchmark {
|
||||||
|
|
||||||
// Return a vector containing the mean and standard devation information for
|
|
||||||
// the specified list of reports. If 'reports' contains less than two
|
|
||||||
// non-errored runs an empty vector is returned
|
|
||||||
std::vector<BenchmarkReporter::Run> ComputeStats(
|
|
||||||
const std::vector<BenchmarkReporter::Run>& reports);
|
|
||||||
|
|
||||||
// Return a vector containing the bigO and RMS information for the specified
|
// Return a vector containing the bigO and RMS information for the specified
|
||||||
// list of reports. If 'reports.size() < 2' an empty vector is returned.
|
// list of reports. If 'reports.size() < 2' an empty vector is returned.
|
||||||
std::vector<BenchmarkReporter::Run> ComputeBigO(
|
std::vector<BenchmarkReporter::Run> ComputeBigO(
|
||||||
@ -57,4 +51,5 @@ struct LeastSq {
|
|||||||
std::string GetBigOString(BigO complexity);
|
std::string GetBigOString(BigO complexity);
|
||||||
|
|
||||||
} // end namespace benchmark
|
} // end namespace benchmark
|
||||||
|
|
||||||
#endif // COMPLEXITY_H_
|
#endif // COMPLEXITY_H_
|
||||||
|
8
vendor/github.com/google/benchmark/src/console_reporter.cc
generated
vendored
8
vendor/github.com/google/benchmark/src/console_reporter.cc
generated
vendored
@ -148,12 +148,14 @@ void ConsoleReporter::PrintRunData(const Run& result) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (auto& c : result.counters) {
|
for (auto& c : result.counters) {
|
||||||
auto const& s = HumanReadableNumber(c.second.value);
|
const std::size_t cNameLen = std::max(std::string::size_type(10),
|
||||||
|
c.first.length());
|
||||||
|
auto const& s = HumanReadableNumber(c.second.value, 1000);
|
||||||
if (output_options_ & OO_Tabular) {
|
if (output_options_ & OO_Tabular) {
|
||||||
if (c.second.flags & Counter::kIsRate) {
|
if (c.second.flags & Counter::kIsRate) {
|
||||||
printer(Out, COLOR_DEFAULT, " %8s/s", s.c_str());
|
printer(Out, COLOR_DEFAULT, " %*s/s", cNameLen - 2, s.c_str());
|
||||||
} else {
|
} else {
|
||||||
printer(Out, COLOR_DEFAULT, " %10s", s.c_str());
|
printer(Out, COLOR_DEFAULT, " %*s", cNameLen, s.c_str());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
const char* unit = (c.second.flags & Counter::kIsRate) ? "/s" : "";
|
const char* unit = (c.second.flags & Counter::kIsRate) ? "/s" : "";
|
||||||
|
5
vendor/github.com/google/benchmark/src/cycleclock.h
generated
vendored
5
vendor/github.com/google/benchmark/src/cycleclock.h
generated
vendored
@ -159,6 +159,11 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
|
|||||||
struct timeval tv;
|
struct timeval tv;
|
||||||
gettimeofday(&tv, nullptr);
|
gettimeofday(&tv, nullptr);
|
||||||
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
|
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
|
||||||
|
#elif defined(__s390__) // Covers both s390 and s390x.
|
||||||
|
// Return the CPU clock.
|
||||||
|
uint64_t tsc;
|
||||||
|
asm("stck %0" : "=Q" (tsc) : : "cc");
|
||||||
|
return tsc;
|
||||||
#else
|
#else
|
||||||
// The soft failover to a generic implementation is automatic only for ARM.
|
// The soft failover to a generic implementation is automatic only for ARM.
|
||||||
// For other platforms the developer is expected to make an attempt to create
|
// For other platforms the developer is expected to make an attempt to create
|
||||||
|
66
vendor/github.com/google/benchmark/src/internal_macros.h
generated
vendored
66
vendor/github.com/google/benchmark/src/internal_macros.h
generated
vendored
@ -6,31 +6,41 @@
|
|||||||
#ifndef __has_feature
|
#ifndef __has_feature
|
||||||
#define __has_feature(x) 0
|
#define __has_feature(x) 0
|
||||||
#endif
|
#endif
|
||||||
|
#ifndef __has_builtin
|
||||||
|
#define __has_builtin(x) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(__clang__)
|
#if defined(__clang__)
|
||||||
#define COMPILER_CLANG
|
#if !defined(COMPILER_CLANG)
|
||||||
|
#define COMPILER_CLANG
|
||||||
|
#endif
|
||||||
#elif defined(_MSC_VER)
|
#elif defined(_MSC_VER)
|
||||||
#define COMPILER_MSVC
|
#if !defined(COMPILER_MSVC)
|
||||||
|
#define COMPILER_MSVC
|
||||||
|
#endif
|
||||||
#elif defined(__GNUC__)
|
#elif defined(__GNUC__)
|
||||||
#define COMPILER_GCC
|
#if !defined(COMPILER_GCC)
|
||||||
|
#define COMPILER_GCC
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __has_feature(cxx_attributes)
|
#if __has_feature(cxx_attributes)
|
||||||
#define BENCHMARK_NORETURN [[noreturn]]
|
#define BENCHMARK_NORETURN [[noreturn]]
|
||||||
#elif defined(__GNUC__)
|
#elif defined(__GNUC__)
|
||||||
#define BENCHMARK_NORETURN __attribute__((noreturn))
|
#define BENCHMARK_NORETURN __attribute__((noreturn))
|
||||||
#elif defined(COMPILER_MSVC)
|
#elif defined(COMPILER_MSVC)
|
||||||
#define BENCHMARK_NORETURN __declspec(noreturn)
|
#define BENCHMARK_NORETURN __declspec(noreturn)
|
||||||
#else
|
#else
|
||||||
#define BENCHMARK_NORETURN
|
#define BENCHMARK_NORETURN
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(__CYGWIN__)
|
#if defined(__CYGWIN__)
|
||||||
#define BENCHMARK_OS_CYGWIN 1
|
#define BENCHMARK_OS_CYGWIN 1
|
||||||
#elif defined(_WIN32)
|
#elif defined(_WIN32)
|
||||||
#define BENCHMARK_OS_WINDOWS 1
|
#define BENCHMARK_OS_WINDOWS 1
|
||||||
#elif defined(__APPLE__)
|
#elif defined(__APPLE__)
|
||||||
#include "TargetConditionals.h"
|
#define BENCHMARK_OS_APPLE 1
|
||||||
|
#include "TargetConditionals.h"
|
||||||
#if defined(TARGET_OS_MAC)
|
#if defined(TARGET_OS_MAC)
|
||||||
#define BENCHMARK_OS_MACOSX 1
|
#define BENCHMARK_OS_MACOSX 1
|
||||||
#if defined(TARGET_OS_IPHONE)
|
#if defined(TARGET_OS_IPHONE)
|
||||||
@ -38,20 +48,42 @@
|
|||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
#elif defined(__FreeBSD__)
|
#elif defined(__FreeBSD__)
|
||||||
#define BENCHMARK_OS_FREEBSD 1
|
#define BENCHMARK_OS_FREEBSD 1
|
||||||
|
#elif defined(__NetBSD__)
|
||||||
|
#define BENCHMARK_OS_NETBSD 1
|
||||||
|
#elif defined(__OpenBSD__)
|
||||||
|
#define BENCHMARK_OS_OPENBSD 1
|
||||||
#elif defined(__linux__)
|
#elif defined(__linux__)
|
||||||
#define BENCHMARK_OS_LINUX 1
|
#define BENCHMARK_OS_LINUX 1
|
||||||
#elif defined(__native_client__)
|
#elif defined(__native_client__)
|
||||||
#define BENCHMARK_OS_NACL 1
|
#define BENCHMARK_OS_NACL 1
|
||||||
#elif defined(EMSCRIPTEN)
|
#elif defined(__EMSCRIPTEN__)
|
||||||
#define BENCHMARK_OS_EMSCRIPTEN 1
|
#define BENCHMARK_OS_EMSCRIPTEN 1
|
||||||
#elif defined(__rtems__)
|
#elif defined(__rtems__)
|
||||||
#define BENCHMARK_OS_RTEMS 1
|
#define BENCHMARK_OS_RTEMS 1
|
||||||
|
#elif defined(__Fuchsia__)
|
||||||
|
#define BENCHMARK_OS_FUCHSIA 1
|
||||||
|
#elif defined (__SVR4) && defined (__sun)
|
||||||
|
#define BENCHMARK_OS_SOLARIS 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \
|
#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \
|
||||||
&& !defined(__EXCEPTIONS)
|
&& !defined(__EXCEPTIONS)
|
||||||
#define BENCHMARK_HAS_NO_EXCEPTIONS
|
#define BENCHMARK_HAS_NO_EXCEPTIONS
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(COMPILER_CLANG) || defined(COMPILER_GCC)
|
||||||
|
#define BENCHMARK_MAYBE_UNUSED __attribute__((unused))
|
||||||
|
#else
|
||||||
|
#define BENCHMARK_MAYBE_UNUSED
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(COMPILER_GCC) || __has_builtin(__builtin_unreachable)
|
||||||
|
#define BENCHMARK_UNREACHABLE() __builtin_unreachable()
|
||||||
|
#elif defined(COMPILER_MSVC)
|
||||||
|
#define BENCHMARK_UNREACHABLE() __assume(false)
|
||||||
|
#else
|
||||||
|
#define BENCHMARK_UNREACHABLE() ((void)0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif // BENCHMARK_INTERNAL_MACROS_H_
|
#endif // BENCHMARK_INTERNAL_MACROS_H_
|
||||||
|
65
vendor/github.com/google/benchmark/src/json_reporter.cc
generated
vendored
65
vendor/github.com/google/benchmark/src/json_reporter.cc
generated
vendored
@ -21,6 +21,8 @@
|
|||||||
#include <string>
|
#include <string>
|
||||||
#include <tuple>
|
#include <tuple>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include <iomanip> // for setprecision
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
#include "string_util.h"
|
#include "string_util.h"
|
||||||
#include "timers.h"
|
#include "timers.h"
|
||||||
@ -30,15 +32,15 @@ namespace benchmark {
|
|||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
std::string FormatKV(std::string const& key, std::string const& value) {
|
std::string FormatKV(std::string const& key, std::string const& value) {
|
||||||
return StringPrintF("\"%s\": \"%s\"", key.c_str(), value.c_str());
|
return StrFormat("\"%s\": \"%s\"", key.c_str(), value.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string FormatKV(std::string const& key, const char* value) {
|
std::string FormatKV(std::string const& key, const char* value) {
|
||||||
return StringPrintF("\"%s\": \"%s\"", key.c_str(), value);
|
return StrFormat("\"%s\": \"%s\"", key.c_str(), value);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string FormatKV(std::string const& key, bool value) {
|
std::string FormatKV(std::string const& key, bool value) {
|
||||||
return StringPrintF("\"%s\": %s", key.c_str(), value ? "true" : "false");
|
return StrFormat("\"%s\": %s", key.c_str(), value ? "true" : "false");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string FormatKV(std::string const& key, int64_t value) {
|
std::string FormatKV(std::string const& key, int64_t value) {
|
||||||
@ -48,7 +50,14 @@ std::string FormatKV(std::string const& key, int64_t value) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::string FormatKV(std::string const& key, double value) {
|
std::string FormatKV(std::string const& key, double value) {
|
||||||
return StringPrintF("\"%s\": %.2f", key.c_str(), value);
|
std::stringstream ss;
|
||||||
|
ss << '"' << key << "\": ";
|
||||||
|
|
||||||
|
const auto max_digits10 = std::numeric_limits<decltype (value)>::max_digits10;
|
||||||
|
const auto max_fractional_digits10 = max_digits10 - 1;
|
||||||
|
|
||||||
|
ss << std::scientific << std::setprecision(max_fractional_digits10) << value;
|
||||||
|
return ss.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t RoundDouble(double v) { return static_cast<int64_t>(v + 0.5); }
|
int64_t RoundDouble(double v) { return static_cast<int64_t>(v + 0.5); }
|
||||||
@ -68,13 +77,41 @@ bool JSONReporter::ReportContext(const Context& context) {
|
|||||||
std::string walltime_value = LocalDateTimeString();
|
std::string walltime_value = LocalDateTimeString();
|
||||||
out << indent << FormatKV("date", walltime_value) << ",\n";
|
out << indent << FormatKV("date", walltime_value) << ",\n";
|
||||||
|
|
||||||
out << indent << FormatKV("num_cpus", static_cast<int64_t>(context.num_cpus))
|
if (Context::executable_name) {
|
||||||
|
out << indent << FormatKV("executable", Context::executable_name) << ",\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
CPUInfo const& info = context.cpu_info;
|
||||||
|
out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus))
|
||||||
<< ",\n";
|
<< ",\n";
|
||||||
out << indent << FormatKV("mhz_per_cpu", RoundDouble(context.mhz_per_cpu))
|
out << indent
|
||||||
|
<< FormatKV("mhz_per_cpu",
|
||||||
|
RoundDouble(info.cycles_per_second / 1000000.0))
|
||||||
<< ",\n";
|
<< ",\n";
|
||||||
out << indent << FormatKV("cpu_scaling_enabled", context.cpu_scaling_enabled)
|
out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled)
|
||||||
<< ",\n";
|
<< ",\n";
|
||||||
|
|
||||||
|
out << indent << "\"caches\": [\n";
|
||||||
|
indent = std::string(6, ' ');
|
||||||
|
std::string cache_indent(8, ' ');
|
||||||
|
for (size_t i = 0; i < info.caches.size(); ++i) {
|
||||||
|
auto& CI = info.caches[i];
|
||||||
|
out << indent << "{\n";
|
||||||
|
out << cache_indent << FormatKV("type", CI.type) << ",\n";
|
||||||
|
out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level))
|
||||||
|
<< ",\n";
|
||||||
|
out << cache_indent
|
||||||
|
<< FormatKV("size", static_cast<int64_t>(CI.size) * 1000u) << ",\n";
|
||||||
|
out << cache_indent
|
||||||
|
<< FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing))
|
||||||
|
<< "\n";
|
||||||
|
out << indent << "}";
|
||||||
|
if (i != info.caches.size() - 1) out << ",";
|
||||||
|
out << "\n";
|
||||||
|
}
|
||||||
|
indent = std::string(4, ' ');
|
||||||
|
out << indent << "],\n";
|
||||||
|
|
||||||
#if defined(NDEBUG)
|
#if defined(NDEBUG)
|
||||||
const char build_type[] = "release";
|
const char build_type[] = "release";
|
||||||
#else
|
#else
|
||||||
@ -125,18 +162,18 @@ void JSONReporter::PrintRunData(Run const& run) {
|
|||||||
if (!run.report_big_o && !run.report_rms) {
|
if (!run.report_big_o && !run.report_rms) {
|
||||||
out << indent << FormatKV("iterations", run.iterations) << ",\n";
|
out << indent << FormatKV("iterations", run.iterations) << ",\n";
|
||||||
out << indent
|
out << indent
|
||||||
<< FormatKV("real_time", RoundDouble(run.GetAdjustedRealTime()))
|
<< FormatKV("real_time", run.GetAdjustedRealTime())
|
||||||
<< ",\n";
|
<< ",\n";
|
||||||
out << indent
|
out << indent
|
||||||
<< FormatKV("cpu_time", RoundDouble(run.GetAdjustedCPUTime()));
|
<< FormatKV("cpu_time", run.GetAdjustedCPUTime());
|
||||||
out << ",\n"
|
out << ",\n"
|
||||||
<< indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
|
<< indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
|
||||||
} else if (run.report_big_o) {
|
} else if (run.report_big_o) {
|
||||||
out << indent
|
out << indent
|
||||||
<< FormatKV("cpu_coefficient", RoundDouble(run.GetAdjustedCPUTime()))
|
<< FormatKV("cpu_coefficient", run.GetAdjustedCPUTime())
|
||||||
<< ",\n";
|
<< ",\n";
|
||||||
out << indent
|
out << indent
|
||||||
<< FormatKV("real_coefficient", RoundDouble(run.GetAdjustedRealTime()))
|
<< FormatKV("real_coefficient", run.GetAdjustedRealTime())
|
||||||
<< ",\n";
|
<< ",\n";
|
||||||
out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
|
out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
|
||||||
out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
|
out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
|
||||||
@ -147,17 +184,17 @@ void JSONReporter::PrintRunData(Run const& run) {
|
|||||||
if (run.bytes_per_second > 0.0) {
|
if (run.bytes_per_second > 0.0) {
|
||||||
out << ",\n"
|
out << ",\n"
|
||||||
<< indent
|
<< indent
|
||||||
<< FormatKV("bytes_per_second", RoundDouble(run.bytes_per_second));
|
<< FormatKV("bytes_per_second", run.bytes_per_second);
|
||||||
}
|
}
|
||||||
if (run.items_per_second > 0.0) {
|
if (run.items_per_second > 0.0) {
|
||||||
out << ",\n"
|
out << ",\n"
|
||||||
<< indent
|
<< indent
|
||||||
<< FormatKV("items_per_second", RoundDouble(run.items_per_second));
|
<< FormatKV("items_per_second", run.items_per_second);
|
||||||
}
|
}
|
||||||
for(auto &c : run.counters) {
|
for(auto &c : run.counters) {
|
||||||
out << ",\n"
|
out << ",\n"
|
||||||
<< indent
|
<< indent
|
||||||
<< FormatKV(c.first, RoundDouble(c.second));
|
<< FormatKV(c.first, c.second);
|
||||||
}
|
}
|
||||||
if (!run.report_label.empty()) {
|
if (!run.report_label.empty()) {
|
||||||
out << ",\n" << indent << FormatKV("label", run.report_label);
|
out << ",\n" << indent << FormatKV("label", run.report_label);
|
||||||
|
24
vendor/github.com/google/benchmark/src/re.h
generated
vendored
24
vendor/github.com/google/benchmark/src/re.h
generated
vendored
@ -17,19 +17,31 @@
|
|||||||
|
|
||||||
#include "internal_macros.h"
|
#include "internal_macros.h"
|
||||||
|
|
||||||
|
#if !defined(HAVE_STD_REGEX) && \
|
||||||
|
!defined(HAVE_GNU_POSIX_REGEX) && \
|
||||||
|
!defined(HAVE_POSIX_REGEX)
|
||||||
|
// No explicit regex selection; detect based on builtin hints.
|
||||||
|
#if defined(BENCHMARK_OS_LINUX) || defined(BENCHMARK_OS_APPLE)
|
||||||
|
#define HAVE_POSIX_REGEX 1
|
||||||
|
#elif __cplusplus >= 199711L
|
||||||
|
#define HAVE_STD_REGEX 1
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
// Prefer C regex libraries when compiling w/o exceptions so that we can
|
// Prefer C regex libraries when compiling w/o exceptions so that we can
|
||||||
// correctly report errors.
|
// correctly report errors.
|
||||||
#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && defined(HAVE_STD_REGEX) && \
|
#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && \
|
||||||
|
defined(BENCHMARK_HAVE_STD_REGEX) && \
|
||||||
(defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX))
|
(defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX))
|
||||||
#undef HAVE_STD_REGEX
|
#undef HAVE_STD_REGEX
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(HAVE_STD_REGEX)
|
#if defined(HAVE_STD_REGEX)
|
||||||
#include <regex>
|
#include <regex>
|
||||||
#elif defined(HAVE_GNU_POSIX_REGEX)
|
#elif defined(HAVE_GNU_POSIX_REGEX)
|
||||||
#include <gnuregex.h>
|
#include <gnuregex.h>
|
||||||
#elif defined(HAVE_POSIX_REGEX)
|
#elif defined(HAVE_POSIX_REGEX)
|
||||||
#include <regex.h>
|
#include <regex.h>
|
||||||
#else
|
#else
|
||||||
#error No regular expression backend was found!
|
#error No regular expression backend was found!
|
||||||
#endif
|
#endif
|
||||||
@ -64,7 +76,7 @@ class Regex {
|
|||||||
#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX)
|
#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX)
|
||||||
regex_t re_;
|
regex_t re_;
|
||||||
#else
|
#else
|
||||||
#error No regular expression backend implementation available
|
#error No regular expression backend implementation available
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
29
vendor/github.com/google/benchmark/src/reporter.cc
generated
vendored
29
vendor/github.com/google/benchmark/src/reporter.cc
generated
vendored
@ -22,7 +22,6 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "check.h"
|
#include "check.h"
|
||||||
#include "stat.h"
|
|
||||||
|
|
||||||
namespace benchmark {
|
namespace benchmark {
|
||||||
|
|
||||||
@ -36,12 +35,27 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
|
|||||||
CHECK(out) << "cannot be null";
|
CHECK(out) << "cannot be null";
|
||||||
auto &Out = *out;
|
auto &Out = *out;
|
||||||
|
|
||||||
Out << "Run on (" << context.num_cpus << " X " << context.mhz_per_cpu
|
|
||||||
<< " MHz CPU " << ((context.num_cpus > 1) ? "s" : "") << ")\n";
|
|
||||||
|
|
||||||
Out << LocalDateTimeString() << "\n";
|
Out << LocalDateTimeString() << "\n";
|
||||||
|
|
||||||
if (context.cpu_scaling_enabled) {
|
if (context.executable_name)
|
||||||
|
Out << "Running " << context.executable_name << "\n";
|
||||||
|
|
||||||
|
const CPUInfo &info = context.cpu_info;
|
||||||
|
Out << "Run on (" << info.num_cpus << " X "
|
||||||
|
<< (info.cycles_per_second / 1000000.0) << " MHz CPU "
|
||||||
|
<< ((info.num_cpus > 1) ? "s" : "") << ")\n";
|
||||||
|
if (info.caches.size() != 0) {
|
||||||
|
Out << "CPU Caches:\n";
|
||||||
|
for (auto &CInfo : info.caches) {
|
||||||
|
Out << " L" << CInfo.level << " " << CInfo.type << " "
|
||||||
|
<< (CInfo.size / 1000) << "K";
|
||||||
|
if (CInfo.num_sharing != 0)
|
||||||
|
Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")";
|
||||||
|
Out << "\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (info.scaling_enabled) {
|
||||||
Out << "***WARNING*** CPU scaling is enabled, the benchmark "
|
Out << "***WARNING*** CPU scaling is enabled, the benchmark "
|
||||||
"real time measurements may be noisy and will incur extra "
|
"real time measurements may be noisy and will incur extra "
|
||||||
"overhead.\n";
|
"overhead.\n";
|
||||||
@ -53,6 +67,11 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// No initializer because it's already initialized to NULL.
|
||||||
|
const char* BenchmarkReporter::Context::executable_name;
|
||||||
|
|
||||||
|
BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()) {}
|
||||||
|
|
||||||
double BenchmarkReporter::Run::GetAdjustedRealTime() const {
|
double BenchmarkReporter::Run::GetAdjustedRealTime() const {
|
||||||
double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
|
double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
|
||||||
if (iterations != 0) new_time /= static_cast<double>(iterations);
|
if (iterations != 0) new_time /= static_cast<double>(iterations);
|
||||||
|
310
vendor/github.com/google/benchmark/src/stat.h
generated
vendored
310
vendor/github.com/google/benchmark/src/stat.h
generated
vendored
@ -1,310 +0,0 @@
|
|||||||
#ifndef BENCHMARK_STAT_H_
|
|
||||||
#define BENCHMARK_STAT_H_
|
|
||||||
|
|
||||||
#include <cmath>
|
|
||||||
#include <limits>
|
|
||||||
#include <ostream>
|
|
||||||
#include <type_traits>
|
|
||||||
|
|
||||||
namespace benchmark {
|
|
||||||
|
|
||||||
template <typename VType, typename NumType>
|
|
||||||
class Stat1;
|
|
||||||
|
|
||||||
template <typename VType, typename NumType>
|
|
||||||
class Stat1MinMax;
|
|
||||||
|
|
||||||
typedef Stat1<float, int64_t> Stat1_f;
|
|
||||||
typedef Stat1<double, int64_t> Stat1_d;
|
|
||||||
typedef Stat1MinMax<float, int64_t> Stat1MinMax_f;
|
|
||||||
typedef Stat1MinMax<double, int64_t> Stat1MinMax_d;
|
|
||||||
|
|
||||||
template <typename VType>
|
|
||||||
class Vector2;
|
|
||||||
template <typename VType>
|
|
||||||
class Vector3;
|
|
||||||
template <typename VType>
|
|
||||||
class Vector4;
|
|
||||||
|
|
||||||
template <typename VType, typename NumType>
|
|
||||||
class Stat1 {
|
|
||||||
public:
|
|
||||||
typedef Stat1<VType, NumType> Self;
|
|
||||||
|
|
||||||
Stat1() { Clear(); }
|
|
||||||
// Create a sample of value dat and weight 1
|
|
||||||
explicit Stat1(const VType &dat) {
|
|
||||||
sum_ = dat;
|
|
||||||
sum_squares_ = Sqr(dat);
|
|
||||||
numsamples_ = 1;
|
|
||||||
}
|
|
||||||
// Create statistics for all the samples between begin (included)
|
|
||||||
// and end(excluded)
|
|
||||||
explicit Stat1(const VType *begin, const VType *end) {
|
|
||||||
Clear();
|
|
||||||
for (const VType *item = begin; item < end; ++item) {
|
|
||||||
(*this) += Stat1(*item);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Create a sample of value dat and weight w
|
|
||||||
Stat1(const VType &dat, const NumType &w) {
|
|
||||||
sum_ = w * dat;
|
|
||||||
sum_squares_ = w * Sqr(dat);
|
|
||||||
numsamples_ = w;
|
|
||||||
}
|
|
||||||
// Copy operator
|
|
||||||
Stat1(const Self &stat) {
|
|
||||||
sum_ = stat.sum_;
|
|
||||||
sum_squares_ = stat.sum_squares_;
|
|
||||||
numsamples_ = stat.numsamples_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Clear() {
|
|
||||||
numsamples_ = NumType();
|
|
||||||
sum_squares_ = sum_ = VType();
|
|
||||||
}
|
|
||||||
|
|
||||||
Self &operator=(const Self &stat) {
|
|
||||||
sum_ = stat.sum_;
|
|
||||||
sum_squares_ = stat.sum_squares_;
|
|
||||||
numsamples_ = stat.numsamples_;
|
|
||||||
return (*this);
|
|
||||||
}
|
|
||||||
// Merge statistics from two sample sets.
|
|
||||||
Self &operator+=(const Self &stat) {
|
|
||||||
sum_ += stat.sum_;
|
|
||||||
sum_squares_ += stat.sum_squares_;
|
|
||||||
numsamples_ += stat.numsamples_;
|
|
||||||
return (*this);
|
|
||||||
}
|
|
||||||
// The operation opposite to +=
|
|
||||||
Self &operator-=(const Self &stat) {
|
|
||||||
sum_ -= stat.sum_;
|
|
||||||
sum_squares_ -= stat.sum_squares_;
|
|
||||||
numsamples_ -= stat.numsamples_;
|
|
||||||
return (*this);
|
|
||||||
}
|
|
||||||
// Multiply the weight of the set of samples by a factor k
|
|
||||||
Self &operator*=(const VType &k) {
|
|
||||||
sum_ *= k;
|
|
||||||
sum_squares_ *= k;
|
|
||||||
numsamples_ *= k;
|
|
||||||
return (*this);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge statistics from two sample sets.
|
|
||||||
Self operator+(const Self &stat) const { return Self(*this) += stat; }
|
|
||||||
|
|
||||||
// The operation opposite to +
|
|
||||||
Self operator-(const Self &stat) const { return Self(*this) -= stat; }
|
|
||||||
|
|
||||||
// Multiply the weight of the set of samples by a factor k
|
|
||||||
Self operator*(const VType &k) const { return Self(*this) *= k; }
|
|
||||||
|
|
||||||
// Return the total weight of this sample set
|
|
||||||
NumType numSamples() const { return numsamples_; }
|
|
||||||
|
|
||||||
// Return the sum of this sample set
|
|
||||||
VType Sum() const { return sum_; }
|
|
||||||
|
|
||||||
// Return the mean of this sample set
|
|
||||||
VType Mean() const {
|
|
||||||
if (numsamples_ == 0) return VType();
|
|
||||||
return sum_ * (1.0 / numsamples_);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the mean of this sample set and compute the standard deviation at
|
|
||||||
// the same time.
|
|
||||||
VType Mean(VType *stddev) const {
|
|
||||||
if (numsamples_ == 0) return VType();
|
|
||||||
VType mean = sum_ * (1.0 / numsamples_);
|
|
||||||
if (stddev) {
|
|
||||||
// Sample standard deviation is undefined for n = 1
|
|
||||||
if (numsamples_ == 1) {
|
|
||||||
*stddev = VType();
|
|
||||||
} else {
|
|
||||||
VType avg_squares = sum_squares_ * (1.0 / numsamples_);
|
|
||||||
*stddev = Sqrt(numsamples_ / (numsamples_ - 1.0) * (avg_squares - Sqr(mean)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return mean;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the standard deviation of the sample set
|
|
||||||
VType StdDev() const {
|
|
||||||
VType stddev = VType();
|
|
||||||
Mean(&stddev);
|
|
||||||
return stddev;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
static_assert(std::is_integral<NumType>::value &&
|
|
||||||
!std::is_same<NumType, bool>::value,
|
|
||||||
"NumType must be an integral type that is not bool.");
|
|
||||||
// Let i be the index of the samples provided (using +=)
|
|
||||||
// and weight[i],value[i] be the data of sample #i
|
|
||||||
// then the variables have the following meaning:
|
|
||||||
NumType numsamples_; // sum of weight[i];
|
|
||||||
VType sum_; // sum of weight[i]*value[i];
|
|
||||||
VType sum_squares_; // sum of weight[i]*value[i]^2;
|
|
||||||
|
|
||||||
// Template function used to square a number.
|
|
||||||
// For a vector we square all components
|
|
||||||
template <typename SType>
|
|
||||||
static inline SType Sqr(const SType &dat) {
|
|
||||||
return dat * dat;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename SType>
|
|
||||||
static inline Vector2<SType> Sqr(const Vector2<SType> &dat) {
|
|
||||||
return dat.MulComponents(dat);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename SType>
|
|
||||||
static inline Vector3<SType> Sqr(const Vector3<SType> &dat) {
|
|
||||||
return dat.MulComponents(dat);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename SType>
|
|
||||||
static inline Vector4<SType> Sqr(const Vector4<SType> &dat) {
|
|
||||||
return dat.MulComponents(dat);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Template function used to take the square root of a number.
|
|
||||||
// For a vector we square all components
|
|
||||||
template <typename SType>
|
|
||||||
static inline SType Sqrt(const SType &dat) {
|
|
||||||
// Avoid NaN due to imprecision in the calculations
|
|
||||||
if (dat < 0) return 0;
|
|
||||||
return sqrt(dat);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename SType>
|
|
||||||
static inline Vector2<SType> Sqrt(const Vector2<SType> &dat) {
|
|
||||||
// Avoid NaN due to imprecision in the calculations
|
|
||||||
return Max(dat, Vector2<SType>()).Sqrt();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename SType>
|
|
||||||
static inline Vector3<SType> Sqrt(const Vector3<SType> &dat) {
|
|
||||||
// Avoid NaN due to imprecision in the calculations
|
|
||||||
return Max(dat, Vector3<SType>()).Sqrt();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename SType>
|
|
||||||
static inline Vector4<SType> Sqrt(const Vector4<SType> &dat) {
|
|
||||||
// Avoid NaN due to imprecision in the calculations
|
|
||||||
return Max(dat, Vector4<SType>()).Sqrt();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Useful printing function
|
|
||||||
template <typename VType, typename NumType>
|
|
||||||
std::ostream &operator<<(std::ostream &out, const Stat1<VType, NumType> &s) {
|
|
||||||
out << "{ avg = " << s.Mean() << " std = " << s.StdDev()
|
|
||||||
<< " nsamples = " << s.NumSamples() << "}";
|
|
||||||
return out;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stat1MinMax: same as Stat1, but it also
|
|
||||||
// keeps the Min and Max values; the "-"
|
|
||||||
// operator is disabled because it cannot be implemented
|
|
||||||
// efficiently
|
|
||||||
template <typename VType, typename NumType>
|
|
||||||
class Stat1MinMax : public Stat1<VType, NumType> {
|
|
||||||
public:
|
|
||||||
typedef Stat1MinMax<VType, NumType> Self;
|
|
||||||
|
|
||||||
Stat1MinMax() { Clear(); }
|
|
||||||
// Create a sample of value dat and weight 1
|
|
||||||
explicit Stat1MinMax(const VType &dat) : Stat1<VType, NumType>(dat) {
|
|
||||||
max_ = dat;
|
|
||||||
min_ = dat;
|
|
||||||
}
|
|
||||||
// Create statistics for all the samples between begin (included)
|
|
||||||
// and end(excluded)
|
|
||||||
explicit Stat1MinMax(const VType *begin, const VType *end) {
|
|
||||||
Clear();
|
|
||||||
for (const VType *item = begin; item < end; ++item) {
|
|
||||||
(*this) += Stat1MinMax(*item);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Create a sample of value dat and weight w
|
|
||||||
Stat1MinMax(const VType &dat, const NumType &w)
|
|
||||||
: Stat1<VType, NumType>(dat, w) {
|
|
||||||
max_ = dat;
|
|
||||||
min_ = dat;
|
|
||||||
}
|
|
||||||
// Copy operator
|
|
||||||
Stat1MinMax(const Self &stat) : Stat1<VType, NumType>(stat) {
|
|
||||||
max_ = stat.max_;
|
|
||||||
min_ = stat.min_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Clear() {
|
|
||||||
Stat1<VType, NumType>::Clear();
|
|
||||||
if (std::numeric_limits<VType>::has_infinity) {
|
|
||||||
min_ = std::numeric_limits<VType>::infinity();
|
|
||||||
max_ = -std::numeric_limits<VType>::infinity();
|
|
||||||
} else {
|
|
||||||
min_ = std::numeric_limits<VType>::max();
|
|
||||||
max_ = std::numeric_limits<VType>::min();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Self &operator=(const Self &stat) {
|
|
||||||
this->Stat1<VType, NumType>::operator=(stat);
|
|
||||||
max_ = stat.max_;
|
|
||||||
min_ = stat.min_;
|
|
||||||
return (*this);
|
|
||||||
}
|
|
||||||
// Merge statistics from two sample sets.
|
|
||||||
Self &operator+=(const Self &stat) {
|
|
||||||
this->Stat1<VType, NumType>::operator+=(stat);
|
|
||||||
if (stat.max_ > max_) max_ = stat.max_;
|
|
||||||
if (stat.min_ < min_) min_ = stat.min_;
|
|
||||||
return (*this);
|
|
||||||
}
|
|
||||||
// Multiply the weight of the set of samples by a factor k
|
|
||||||
Self &operator*=(const VType &stat) {
|
|
||||||
this->Stat1<VType, NumType>::operator*=(stat);
|
|
||||||
return (*this);
|
|
||||||
}
|
|
||||||
// Merge statistics from two sample sets.
|
|
||||||
Self operator+(const Self &stat) const { return Self(*this) += stat; }
|
|
||||||
// Multiply the weight of the set of samples by a factor k
|
|
||||||
Self operator*(const VType &k) const { return Self(*this) *= k; }
|
|
||||||
|
|
||||||
// Return the maximal value in this sample set
|
|
||||||
VType Max() const { return max_; }
|
|
||||||
// Return the minimal value in this sample set
|
|
||||||
VType Min() const { return min_; }
|
|
||||||
|
|
||||||
private:
|
|
||||||
// The - operation makes no sense with Min/Max
|
|
||||||
// unless we keep the full list of values (but we don't)
|
|
||||||
// make it private, and let it undefined so nobody can call it
|
|
||||||
Self &operator-=(const Self &stat); // senseless. let it undefined.
|
|
||||||
|
|
||||||
// The operation opposite to -
|
|
||||||
Self operator-(const Self &stat) const; // senseless. let it undefined.
|
|
||||||
|
|
||||||
// Let i be the index of the samples provided (using +=)
|
|
||||||
// and weight[i],value[i] be the data of sample #i
|
|
||||||
// then the variables have the following meaning:
|
|
||||||
VType max_; // max of value[i]
|
|
||||||
VType min_; // min of value[i]
|
|
||||||
};
|
|
||||||
|
|
||||||
// Useful printing function
|
|
||||||
template <typename VType, typename NumType>
|
|
||||||
std::ostream &operator<<(std::ostream &out,
|
|
||||||
const Stat1MinMax<VType, NumType> &s) {
|
|
||||||
out << "{ avg = " << s.Mean() << " std = " << s.StdDev()
|
|
||||||
<< " nsamples = " << s.NumSamples() << " min = " << s.Min()
|
|
||||||
<< " max = " << s.Max() << "}";
|
|
||||||
return out;
|
|
||||||
}
|
|
||||||
} // end namespace benchmark
|
|
||||||
|
|
||||||
#endif // BENCHMARK_STAT_H_
|
|
178
vendor/github.com/google/benchmark/src/statistics.cc
generated
vendored
Normal file
178
vendor/github.com/google/benchmark/src/statistics.cc
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
|
||||||
|
// Copyright 2017 Roman Lebedev. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "benchmark/benchmark.h"
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cmath>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
#include <numeric>
|
||||||
|
#include "check.h"
|
||||||
|
#include "statistics.h"
|
||||||
|
|
||||||
|
namespace benchmark {
|
||||||
|
|
||||||
|
auto StatisticsSum = [](const std::vector<double>& v) {
|
||||||
|
return std::accumulate(v.begin(), v.end(), 0.0);
|
||||||
|
};
|
||||||
|
|
||||||
|
double StatisticsMean(const std::vector<double>& v) {
|
||||||
|
if (v.empty()) return 0.0;
|
||||||
|
return StatisticsSum(v) * (1.0 / v.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
double StatisticsMedian(const std::vector<double>& v) {
|
||||||
|
if (v.size() < 3) return StatisticsMean(v);
|
||||||
|
std::vector<double> copy(v);
|
||||||
|
|
||||||
|
auto center = copy.begin() + v.size() / 2;
|
||||||
|
std::nth_element(copy.begin(), center, copy.end());
|
||||||
|
|
||||||
|
// did we have an odd number of samples?
|
||||||
|
// if yes, then center is the median
|
||||||
|
// it no, then we are looking for the average between center and the value before
|
||||||
|
if(v.size() % 2 == 1)
|
||||||
|
return *center;
|
||||||
|
auto center2 = copy.begin() + v.size() / 2 - 1;
|
||||||
|
std::nth_element(copy.begin(), center2, copy.end());
|
||||||
|
return (*center + *center2) / 2.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the sum of the squares of this sample set
|
||||||
|
auto SumSquares = [](const std::vector<double>& v) {
|
||||||
|
return std::inner_product(v.begin(), v.end(), v.begin(), 0.0);
|
||||||
|
};
|
||||||
|
|
||||||
|
auto Sqr = [](const double dat) { return dat * dat; };
|
||||||
|
auto Sqrt = [](const double dat) {
|
||||||
|
// Avoid NaN due to imprecision in the calculations
|
||||||
|
if (dat < 0.0) return 0.0;
|
||||||
|
return std::sqrt(dat);
|
||||||
|
};
|
||||||
|
|
||||||
|
double StatisticsStdDev(const std::vector<double>& v) {
|
||||||
|
const auto mean = StatisticsMean(v);
|
||||||
|
if (v.empty()) return mean;
|
||||||
|
|
||||||
|
// Sample standard deviation is undefined for n = 1
|
||||||
|
if (v.size() == 1)
|
||||||
|
return 0.0;
|
||||||
|
|
||||||
|
const double avg_squares = SumSquares(v) * (1.0 / v.size());
|
||||||
|
return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean)));
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<BenchmarkReporter::Run> ComputeStats(
|
||||||
|
const std::vector<BenchmarkReporter::Run>& reports) {
|
||||||
|
typedef BenchmarkReporter::Run Run;
|
||||||
|
std::vector<Run> results;
|
||||||
|
|
||||||
|
auto error_count =
|
||||||
|
std::count_if(reports.begin(), reports.end(),
|
||||||
|
[](Run const& run) { return run.error_occurred; });
|
||||||
|
|
||||||
|
if (reports.size() - error_count < 2) {
|
||||||
|
// We don't report aggregated data if there was a single run.
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accumulators.
|
||||||
|
std::vector<double> real_accumulated_time_stat;
|
||||||
|
std::vector<double> cpu_accumulated_time_stat;
|
||||||
|
std::vector<double> bytes_per_second_stat;
|
||||||
|
std::vector<double> items_per_second_stat;
|
||||||
|
|
||||||
|
real_accumulated_time_stat.reserve(reports.size());
|
||||||
|
cpu_accumulated_time_stat.reserve(reports.size());
|
||||||
|
bytes_per_second_stat.reserve(reports.size());
|
||||||
|
items_per_second_stat.reserve(reports.size());
|
||||||
|
|
||||||
|
// All repetitions should be run with the same number of iterations so we
|
||||||
|
// can take this information from the first benchmark.
|
||||||
|
int64_t const run_iterations = reports.front().iterations;
|
||||||
|
// create stats for user counters
|
||||||
|
struct CounterStat {
|
||||||
|
Counter c;
|
||||||
|
std::vector<double> s;
|
||||||
|
};
|
||||||
|
std::map< std::string, CounterStat > counter_stats;
|
||||||
|
for(Run const& r : reports) {
|
||||||
|
for(auto const& cnt : r.counters) {
|
||||||
|
auto it = counter_stats.find(cnt.first);
|
||||||
|
if(it == counter_stats.end()) {
|
||||||
|
counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}});
|
||||||
|
it = counter_stats.find(cnt.first);
|
||||||
|
it->second.s.reserve(reports.size());
|
||||||
|
} else {
|
||||||
|
CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populate the accumulators.
|
||||||
|
for (Run const& run : reports) {
|
||||||
|
CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
|
||||||
|
CHECK_EQ(run_iterations, run.iterations);
|
||||||
|
if (run.error_occurred) continue;
|
||||||
|
real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
|
||||||
|
cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
|
||||||
|
items_per_second_stat.emplace_back(run.items_per_second);
|
||||||
|
bytes_per_second_stat.emplace_back(run.bytes_per_second);
|
||||||
|
// user counters
|
||||||
|
for(auto const& cnt : run.counters) {
|
||||||
|
auto it = counter_stats.find(cnt.first);
|
||||||
|
CHECK_NE(it, counter_stats.end());
|
||||||
|
it->second.s.emplace_back(cnt.second);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only add label if it is same for all runs
|
||||||
|
std::string report_label = reports[0].report_label;
|
||||||
|
for (std::size_t i = 1; i < reports.size(); i++) {
|
||||||
|
if (reports[i].report_label != report_label) {
|
||||||
|
report_label = "";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for(const auto& Stat : *reports[0].statistics) {
|
||||||
|
// Get the data from the accumulator to BenchmarkReporter::Run's.
|
||||||
|
Run data;
|
||||||
|
data.benchmark_name = reports[0].benchmark_name + "_" + Stat.name_;
|
||||||
|
data.report_label = report_label;
|
||||||
|
data.iterations = run_iterations;
|
||||||
|
|
||||||
|
data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat);
|
||||||
|
data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat);
|
||||||
|
data.bytes_per_second = Stat.compute_(bytes_per_second_stat);
|
||||||
|
data.items_per_second = Stat.compute_(items_per_second_stat);
|
||||||
|
|
||||||
|
data.time_unit = reports[0].time_unit;
|
||||||
|
|
||||||
|
// user counters
|
||||||
|
for(auto const& kv : counter_stats) {
|
||||||
|
const auto uc_stat = Stat.compute_(kv.second.s);
|
||||||
|
auto c = Counter(uc_stat, counter_stats[kv.first].c.flags);
|
||||||
|
data.counters[kv.first] = c;
|
||||||
|
}
|
||||||
|
|
||||||
|
results.push_back(data);
|
||||||
|
}
|
||||||
|
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // end namespace benchmark
|
37
vendor/github.com/google/benchmark/src/statistics.h
generated
vendored
Normal file
37
vendor/github.com/google/benchmark/src/statistics.h
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
|
||||||
|
// Copyright 2017 Roman Lebedev. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#ifndef STATISTICS_H_
|
||||||
|
#define STATISTICS_H_
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "benchmark/benchmark.h"
|
||||||
|
|
||||||
|
namespace benchmark {
|
||||||
|
|
||||||
|
// Return a vector containing the mean, median and standard devation information
|
||||||
|
// (and any user-specified info) for the specified list of reports. If 'reports'
|
||||||
|
// contains less than two non-errored runs an empty vector is returned
|
||||||
|
std::vector<BenchmarkReporter::Run> ComputeStats(
|
||||||
|
const std::vector<BenchmarkReporter::Run>& reports);
|
||||||
|
|
||||||
|
double StatisticsMean(const std::vector<double>& v);
|
||||||
|
double StatisticsMedian(const std::vector<double>& v);
|
||||||
|
double StatisticsStdDev(const std::vector<double>& v);
|
||||||
|
|
||||||
|
} // end namespace benchmark
|
||||||
|
|
||||||
|
#endif // STATISTICS_H_
|
18
vendor/github.com/google/benchmark/src/string_util.cc
generated
vendored
18
vendor/github.com/google/benchmark/src/string_util.cc
generated
vendored
@ -27,8 +27,6 @@ static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits),
|
|||||||
|
|
||||||
static const int64_t kUnitsSize = arraysize(kBigSIUnits);
|
static const int64_t kUnitsSize = arraysize(kBigSIUnits);
|
||||||
|
|
||||||
} // end anonymous namespace
|
|
||||||
|
|
||||||
void ToExponentAndMantissa(double val, double thresh, int precision,
|
void ToExponentAndMantissa(double val, double thresh, int precision,
|
||||||
double one_k, std::string* mantissa,
|
double one_k, std::string* mantissa,
|
||||||
int64_t* exponent) {
|
int64_t* exponent) {
|
||||||
@ -100,14 +98,16 @@ std::string ExponentToPrefix(int64_t exponent, bool iec) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::string ToBinaryStringFullySpecified(double value, double threshold,
|
std::string ToBinaryStringFullySpecified(double value, double threshold,
|
||||||
int precision) {
|
int precision, double one_k = 1024.0) {
|
||||||
std::string mantissa;
|
std::string mantissa;
|
||||||
int64_t exponent;
|
int64_t exponent;
|
||||||
ToExponentAndMantissa(value, threshold, precision, 1024.0, &mantissa,
|
ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa,
|
||||||
&exponent);
|
&exponent);
|
||||||
return mantissa + ExponentToPrefix(exponent, false);
|
return mantissa + ExponentToPrefix(exponent, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
} // end namespace
|
||||||
|
|
||||||
void AppendHumanReadable(int n, std::string* str) {
|
void AppendHumanReadable(int n, std::string* str) {
|
||||||
std::stringstream ss;
|
std::stringstream ss;
|
||||||
// Round down to the nearest SI prefix.
|
// Round down to the nearest SI prefix.
|
||||||
@ -115,14 +115,14 @@ void AppendHumanReadable(int n, std::string* str) {
|
|||||||
*str += ss.str();
|
*str += ss.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string HumanReadableNumber(double n) {
|
std::string HumanReadableNumber(double n, double one_k) {
|
||||||
// 1.1 means that figures up to 1.1k should be shown with the next unit down;
|
// 1.1 means that figures up to 1.1k should be shown with the next unit down;
|
||||||
// this softens edge effects.
|
// this softens edge effects.
|
||||||
// 1 means that we should show one decimal place of precision.
|
// 1 means that we should show one decimal place of precision.
|
||||||
return ToBinaryStringFullySpecified(n, 1.1, 1);
|
return ToBinaryStringFullySpecified(n, 1.1, 1, one_k);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string StringPrintFImp(const char* msg, va_list args) {
|
std::string StrFormatImp(const char* msg, va_list args) {
|
||||||
// we might need a second shot at this, so pre-emptivly make a copy
|
// we might need a second shot at this, so pre-emptivly make a copy
|
||||||
va_list args_cp;
|
va_list args_cp;
|
||||||
va_copy(args_cp, args);
|
va_copy(args_cp, args);
|
||||||
@ -152,10 +152,10 @@ std::string StringPrintFImp(const char* msg, va_list args) {
|
|||||||
return std::string(buff_ptr.get());
|
return std::string(buff_ptr.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string StringPrintF(const char* format, ...) {
|
std::string StrFormat(const char* format, ...) {
|
||||||
va_list args;
|
va_list args;
|
||||||
va_start(args, format);
|
va_start(args, format);
|
||||||
std::string tmp = StringPrintFImp(format, args);
|
std::string tmp = StrFormatImp(format, args);
|
||||||
va_end(args);
|
va_end(args);
|
||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
|
12
vendor/github.com/google/benchmark/src/string_util.h
generated
vendored
12
vendor/github.com/google/benchmark/src/string_util.h
generated
vendored
@ -10,25 +10,25 @@ namespace benchmark {
|
|||||||
|
|
||||||
void AppendHumanReadable(int n, std::string* str);
|
void AppendHumanReadable(int n, std::string* str);
|
||||||
|
|
||||||
std::string HumanReadableNumber(double n);
|
std::string HumanReadableNumber(double n, double one_k = 1024.0);
|
||||||
|
|
||||||
std::string StringPrintF(const char* format, ...);
|
std::string StrFormat(const char* format, ...);
|
||||||
|
|
||||||
inline std::ostream& StringCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
|
inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class First, class... Rest>
|
template <class First, class... Rest>
|
||||||
inline std::ostream& StringCatImp(std::ostream& out, First&& f,
|
inline std::ostream& StrCatImp(std::ostream& out, First&& f,
|
||||||
Rest&&... rest) {
|
Rest&&... rest) {
|
||||||
out << std::forward<First>(f);
|
out << std::forward<First>(f);
|
||||||
return StringCatImp(out, std::forward<Rest>(rest)...);
|
return StrCatImp(out, std::forward<Rest>(rest)...);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class... Args>
|
template <class... Args>
|
||||||
inline std::string StrCat(Args&&... args) {
|
inline std::string StrCat(Args&&... args) {
|
||||||
std::ostringstream ss;
|
std::ostringstream ss;
|
||||||
StringCatImp(ss, std::forward<Args>(args)...);
|
StrCatImp(ss, std::forward<Args>(args)...);
|
||||||
return ss.str();
|
return ss.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
726
vendor/github.com/google/benchmark/src/sysinfo.cc
generated
vendored
726
vendor/github.com/google/benchmark/src/sysinfo.cc
generated
vendored
@ -12,34 +12,47 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
#include "sysinfo.h"
|
|
||||||
#include "internal_macros.h"
|
#include "internal_macros.h"
|
||||||
|
|
||||||
#ifdef BENCHMARK_OS_WINDOWS
|
#ifdef BENCHMARK_OS_WINDOWS
|
||||||
#include <Shlwapi.h>
|
#include <Shlwapi.h>
|
||||||
|
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
|
||||||
#include <VersionHelpers.h>
|
#include <VersionHelpers.h>
|
||||||
#include <Windows.h>
|
#include <Windows.h>
|
||||||
#else
|
#else
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
|
#ifndef BENCHMARK_OS_FUCHSIA
|
||||||
#include <sys/resource.h>
|
#include <sys/resource.h>
|
||||||
|
#endif
|
||||||
#include <sys/time.h>
|
#include <sys/time.h>
|
||||||
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
|
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX
|
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \
|
||||||
|
defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD
|
||||||
|
#define BENCHMARK_HAS_SYSCTL
|
||||||
#include <sys/sysctl.h>
|
#include <sys/sysctl.h>
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
#if defined(BENCHMARK_OS_SOLARIS)
|
||||||
|
#include <kstat.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <array>
|
||||||
|
#include <bitset>
|
||||||
#include <cerrno>
|
#include <cerrno>
|
||||||
|
#include <climits>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
#include <fstream>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
#include <iterator>
|
||||||
#include <limits>
|
#include <limits>
|
||||||
#include <mutex>
|
#include <memory>
|
||||||
|
#include <sstream>
|
||||||
|
|
||||||
#include "arraysize.h"
|
|
||||||
#include "check.h"
|
#include "check.h"
|
||||||
#include "cycleclock.h"
|
#include "cycleclock.h"
|
||||||
#include "internal_macros.h"
|
#include "internal_macros.h"
|
||||||
@ -49,214 +62,466 @@
|
|||||||
|
|
||||||
namespace benchmark {
|
namespace benchmark {
|
||||||
namespace {
|
namespace {
|
||||||
std::once_flag cpuinfo_init;
|
|
||||||
double cpuinfo_cycles_per_second = 1.0;
|
|
||||||
int cpuinfo_num_cpus = 1; // Conservative guess
|
|
||||||
|
|
||||||
#if !defined BENCHMARK_OS_MACOSX
|
void PrintImp(std::ostream& out) { out << std::endl; }
|
||||||
const int64_t estimate_time_ms = 1000;
|
|
||||||
|
|
||||||
// Helper function estimates cycles/sec by observing cycles elapsed during
|
template <class First, class... Rest>
|
||||||
// sleep(). Using small sleep time decreases accuracy significantly.
|
void PrintImp(std::ostream& out, First&& f, Rest&&... rest) {
|
||||||
int64_t EstimateCyclesPerSecond() {
|
out << std::forward<First>(f);
|
||||||
const int64_t start_ticks = cycleclock::Now();
|
PrintImp(out, std::forward<Rest>(rest)...);
|
||||||
SleepForMilliseconds(estimate_time_ms);
|
|
||||||
return cycleclock::Now() - start_ticks;
|
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
|
template <class... Args>
|
||||||
// Helper function for reading an int from a file. Returns true if successful
|
BENCHMARK_NORETURN void PrintErrorAndDie(Args&&... args) {
|
||||||
// and the memory location pointed to by value is set to the value read.
|
PrintImp(std::cerr, std::forward<Args>(args)...);
|
||||||
bool ReadIntFromFile(const char* file, long* value) {
|
std::exit(EXIT_FAILURE);
|
||||||
bool ret = false;
|
}
|
||||||
int fd = open(file, O_RDONLY);
|
|
||||||
if (fd != -1) {
|
#ifdef BENCHMARK_HAS_SYSCTL
|
||||||
char line[1024];
|
|
||||||
char* err;
|
/// ValueUnion - A type used to correctly alias the byte-for-byte output of
|
||||||
memset(line, '\0', sizeof(line));
|
/// `sysctl` with the result type it's to be interpreted as.
|
||||||
ssize_t read_err = read(fd, line, sizeof(line) - 1);
|
struct ValueUnion {
|
||||||
((void)read_err); // prevent unused warning
|
union DataT {
|
||||||
CHECK(read_err >= 0);
|
uint32_t uint32_value;
|
||||||
const long temp_value = strtol(line, &err, 10);
|
uint64_t uint64_value;
|
||||||
if (line[0] != '\0' && (*err == '\n' || *err == '\0')) {
|
// For correct aliasing of union members from bytes.
|
||||||
*value = temp_value;
|
char bytes[8];
|
||||||
ret = true;
|
};
|
||||||
|
using DataPtr = std::unique_ptr<DataT, decltype(&std::free)>;
|
||||||
|
|
||||||
|
// The size of the data union member + its trailing array size.
|
||||||
|
size_t Size;
|
||||||
|
DataPtr Buff;
|
||||||
|
|
||||||
|
public:
|
||||||
|
ValueUnion() : Size(0), Buff(nullptr, &std::free) {}
|
||||||
|
|
||||||
|
explicit ValueUnion(size_t BuffSize)
|
||||||
|
: Size(sizeof(DataT) + BuffSize),
|
||||||
|
Buff(::new (std::malloc(Size)) DataT(), &std::free) {}
|
||||||
|
|
||||||
|
ValueUnion(ValueUnion&& other) = default;
|
||||||
|
|
||||||
|
explicit operator bool() const { return bool(Buff); }
|
||||||
|
|
||||||
|
char* data() const { return Buff->bytes; }
|
||||||
|
|
||||||
|
std::string GetAsString() const { return std::string(data()); }
|
||||||
|
|
||||||
|
int64_t GetAsInteger() const {
|
||||||
|
if (Size == sizeof(Buff->uint32_value))
|
||||||
|
return static_cast<int32_t>(Buff->uint32_value);
|
||||||
|
else if (Size == sizeof(Buff->uint64_value))
|
||||||
|
return static_cast<int64_t>(Buff->uint64_value);
|
||||||
|
BENCHMARK_UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t GetAsUnsigned() const {
|
||||||
|
if (Size == sizeof(Buff->uint32_value))
|
||||||
|
return Buff->uint32_value;
|
||||||
|
else if (Size == sizeof(Buff->uint64_value))
|
||||||
|
return Buff->uint64_value;
|
||||||
|
BENCHMARK_UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T, int N>
|
||||||
|
std::array<T, N> GetAsArray() {
|
||||||
|
const int ArrSize = sizeof(T) * N;
|
||||||
|
CHECK_LE(ArrSize, Size);
|
||||||
|
std::array<T, N> Arr;
|
||||||
|
std::memcpy(Arr.data(), data(), ArrSize);
|
||||||
|
return Arr;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
ValueUnion GetSysctlImp(std::string const& Name) {
|
||||||
|
#if defined BENCHMARK_OS_OPENBSD
|
||||||
|
int mib[2];
|
||||||
|
|
||||||
|
mib[0] = CTL_HW;
|
||||||
|
if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")){
|
||||||
|
ValueUnion buff(sizeof(int));
|
||||||
|
|
||||||
|
if (Name == "hw.ncpu") {
|
||||||
|
mib[1] = HW_NCPU;
|
||||||
|
} else {
|
||||||
|
mib[1] = HW_CPUSPEED;
|
||||||
}
|
}
|
||||||
close(fd);
|
|
||||||
|
if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1) {
|
||||||
|
return ValueUnion();
|
||||||
|
}
|
||||||
|
return buff;
|
||||||
}
|
}
|
||||||
return ret;
|
return ValueUnion();
|
||||||
|
#else
|
||||||
|
size_t CurBuffSize = 0;
|
||||||
|
if (sysctlbyname(Name.c_str(), nullptr, &CurBuffSize, nullptr, 0) == -1)
|
||||||
|
return ValueUnion();
|
||||||
|
|
||||||
|
ValueUnion buff(CurBuffSize);
|
||||||
|
if (sysctlbyname(Name.c_str(), buff.data(), &buff.Size, nullptr, 0) == 0)
|
||||||
|
return buff;
|
||||||
|
return ValueUnion();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_MAYBE_UNUSED
|
||||||
|
bool GetSysctl(std::string const& Name, std::string* Out) {
|
||||||
|
Out->clear();
|
||||||
|
auto Buff = GetSysctlImp(Name);
|
||||||
|
if (!Buff) return false;
|
||||||
|
Out->assign(Buff.data());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Tp,
|
||||||
|
class = typename std::enable_if<std::is_integral<Tp>::value>::type>
|
||||||
|
bool GetSysctl(std::string const& Name, Tp* Out) {
|
||||||
|
*Out = 0;
|
||||||
|
auto Buff = GetSysctlImp(Name);
|
||||||
|
if (!Buff) return false;
|
||||||
|
*Out = static_cast<Tp>(Buff.GetAsUnsigned());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Tp, size_t N>
|
||||||
|
bool GetSysctl(std::string const& Name, std::array<Tp, N>* Out) {
|
||||||
|
auto Buff = GetSysctlImp(Name);
|
||||||
|
if (!Buff) return false;
|
||||||
|
*Out = Buff.GetAsArray<Tp, N>();
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
|
template <class ArgT>
|
||||||
static std::string convertToLowerCase(std::string s) {
|
bool ReadFromFile(std::string const& fname, ArgT* arg) {
|
||||||
for (auto& ch : s)
|
*arg = ArgT();
|
||||||
ch = std::tolower(ch);
|
std::ifstream f(fname.c_str());
|
||||||
return s;
|
if (!f.is_open()) return false;
|
||||||
|
f >> *arg;
|
||||||
|
return f.good();
|
||||||
}
|
}
|
||||||
static bool startsWithKey(std::string Value, std::string Key,
|
|
||||||
bool IgnoreCase = true) {
|
bool CpuScalingEnabled(int num_cpus) {
|
||||||
if (IgnoreCase) {
|
// We don't have a valid CPU count, so don't even bother.
|
||||||
Key = convertToLowerCase(std::move(Key));
|
if (num_cpus <= 0) return false;
|
||||||
Value = convertToLowerCase(std::move(Value));
|
#ifndef BENCHMARK_OS_WINDOWS
|
||||||
|
// On Linux, the CPUfreq subsystem exposes CPU information as files on the
|
||||||
|
// local file system. If reading the exported files fails, then we may not be
|
||||||
|
// running on Linux, so we silently ignore all the read errors.
|
||||||
|
std::string res;
|
||||||
|
for (int cpu = 0; cpu < num_cpus; ++cpu) {
|
||||||
|
std::string governor_file =
|
||||||
|
StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor");
|
||||||
|
if (ReadFromFile(governor_file, &res) && res != "performance") return true;
|
||||||
}
|
}
|
||||||
return Value.compare(0, Key.size(), Key) == 0;
|
#endif
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
int CountSetBitsInCPUMap(std::string Val) {
|
||||||
|
auto CountBits = [](std::string Part) {
|
||||||
|
using CPUMask = std::bitset<sizeof(std::uintptr_t) * CHAR_BIT>;
|
||||||
|
Part = "0x" + Part;
|
||||||
|
CPUMask Mask(std::stoul(Part, nullptr, 16));
|
||||||
|
return static_cast<int>(Mask.count());
|
||||||
|
};
|
||||||
|
size_t Pos;
|
||||||
|
int total = 0;
|
||||||
|
while ((Pos = Val.find(',')) != std::string::npos) {
|
||||||
|
total += CountBits(Val.substr(0, Pos));
|
||||||
|
Val = Val.substr(Pos + 1);
|
||||||
|
}
|
||||||
|
if (!Val.empty()) {
|
||||||
|
total += CountBits(Val);
|
||||||
|
}
|
||||||
|
return total;
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_MAYBE_UNUSED
|
||||||
|
std::vector<CPUInfo::CacheInfo> GetCacheSizesFromKVFS() {
|
||||||
|
std::vector<CPUInfo::CacheInfo> res;
|
||||||
|
std::string dir = "/sys/devices/system/cpu/cpu0/cache/";
|
||||||
|
int Idx = 0;
|
||||||
|
while (true) {
|
||||||
|
CPUInfo::CacheInfo info;
|
||||||
|
std::string FPath = StrCat(dir, "index", Idx++, "/");
|
||||||
|
std::ifstream f(StrCat(FPath, "size").c_str());
|
||||||
|
if (!f.is_open()) break;
|
||||||
|
std::string suffix;
|
||||||
|
f >> info.size;
|
||||||
|
if (f.fail())
|
||||||
|
PrintErrorAndDie("Failed while reading file '", FPath, "size'");
|
||||||
|
if (f.good()) {
|
||||||
|
f >> suffix;
|
||||||
|
if (f.bad())
|
||||||
|
PrintErrorAndDie(
|
||||||
|
"Invalid cache size format: failed to read size suffix");
|
||||||
|
else if (f && suffix != "K")
|
||||||
|
PrintErrorAndDie("Invalid cache size format: Expected bytes ", suffix);
|
||||||
|
else if (suffix == "K")
|
||||||
|
info.size *= 1000;
|
||||||
|
}
|
||||||
|
if (!ReadFromFile(StrCat(FPath, "type"), &info.type))
|
||||||
|
PrintErrorAndDie("Failed to read from file ", FPath, "type");
|
||||||
|
if (!ReadFromFile(StrCat(FPath, "level"), &info.level))
|
||||||
|
PrintErrorAndDie("Failed to read from file ", FPath, "level");
|
||||||
|
std::string map_str;
|
||||||
|
if (!ReadFromFile(StrCat(FPath, "shared_cpu_map"), &map_str))
|
||||||
|
PrintErrorAndDie("Failed to read from file ", FPath, "shared_cpu_map");
|
||||||
|
info.num_sharing = CountSetBitsInCPUMap(map_str);
|
||||||
|
res.push_back(info);
|
||||||
|
}
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef BENCHMARK_OS_MACOSX
|
||||||
|
std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX() {
|
||||||
|
std::vector<CPUInfo::CacheInfo> res;
|
||||||
|
std::array<uint64_t, 4> CacheCounts{{0, 0, 0, 0}};
|
||||||
|
GetSysctl("hw.cacheconfig", &CacheCounts);
|
||||||
|
|
||||||
|
struct {
|
||||||
|
std::string name;
|
||||||
|
std::string type;
|
||||||
|
int level;
|
||||||
|
size_t num_sharing;
|
||||||
|
} Cases[] = {{"hw.l1dcachesize", "Data", 1, CacheCounts[1]},
|
||||||
|
{"hw.l1icachesize", "Instruction", 1, CacheCounts[1]},
|
||||||
|
{"hw.l2cachesize", "Unified", 2, CacheCounts[2]},
|
||||||
|
{"hw.l3cachesize", "Unified", 3, CacheCounts[3]}};
|
||||||
|
for (auto& C : Cases) {
|
||||||
|
int val;
|
||||||
|
if (!GetSysctl(C.name, &val)) continue;
|
||||||
|
CPUInfo::CacheInfo info;
|
||||||
|
info.type = C.type;
|
||||||
|
info.level = C.level;
|
||||||
|
info.size = val;
|
||||||
|
info.num_sharing = static_cast<int>(C.num_sharing);
|
||||||
|
res.push_back(std::move(info));
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
#elif defined(BENCHMARK_OS_WINDOWS)
|
||||||
|
std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
|
||||||
|
std::vector<CPUInfo::CacheInfo> res;
|
||||||
|
DWORD buffer_size = 0;
|
||||||
|
using PInfo = SYSTEM_LOGICAL_PROCESSOR_INFORMATION;
|
||||||
|
using CInfo = CACHE_DESCRIPTOR;
|
||||||
|
|
||||||
|
using UPtr = std::unique_ptr<PInfo, decltype(&std::free)>;
|
||||||
|
GetLogicalProcessorInformation(nullptr, &buffer_size);
|
||||||
|
UPtr buff((PInfo*)malloc(buffer_size), &std::free);
|
||||||
|
if (!GetLogicalProcessorInformation(buff.get(), &buffer_size))
|
||||||
|
PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ",
|
||||||
|
GetLastError());
|
||||||
|
|
||||||
|
PInfo* it = buff.get();
|
||||||
|
PInfo* end = buff.get() + (buffer_size / sizeof(PInfo));
|
||||||
|
|
||||||
|
for (; it != end; ++it) {
|
||||||
|
if (it->Relationship != RelationCache) continue;
|
||||||
|
using BitSet = std::bitset<sizeof(ULONG_PTR) * CHAR_BIT>;
|
||||||
|
BitSet B(it->ProcessorMask);
|
||||||
|
// To prevent duplicates, only consider caches where CPU 0 is specified
|
||||||
|
if (!B.test(0)) continue;
|
||||||
|
CInfo* Cache = &it->Cache;
|
||||||
|
CPUInfo::CacheInfo C;
|
||||||
|
C.num_sharing = static_cast<int>(B.count());
|
||||||
|
C.level = Cache->Level;
|
||||||
|
C.size = Cache->Size;
|
||||||
|
switch (Cache->Type) {
|
||||||
|
case CacheUnified:
|
||||||
|
C.type = "Unified";
|
||||||
|
break;
|
||||||
|
case CacheInstruction:
|
||||||
|
C.type = "Instruction";
|
||||||
|
break;
|
||||||
|
case CacheData:
|
||||||
|
C.type = "Data";
|
||||||
|
break;
|
||||||
|
case CacheTrace:
|
||||||
|
C.type = "Trace";
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
C.type = "Unknown";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
res.push_back(C);
|
||||||
|
}
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void InitializeSystemInfo() {
|
std::vector<CPUInfo::CacheInfo> GetCacheSizes() {
|
||||||
|
#ifdef BENCHMARK_OS_MACOSX
|
||||||
|
return GetCacheSizesMacOSX();
|
||||||
|
#elif defined(BENCHMARK_OS_WINDOWS)
|
||||||
|
return GetCacheSizesWindows();
|
||||||
|
#else
|
||||||
|
return GetCacheSizesFromKVFS();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
int GetNumCPUs() {
|
||||||
|
#ifdef BENCHMARK_HAS_SYSCTL
|
||||||
|
int NumCPU = -1;
|
||||||
|
if (GetSysctl("hw.ncpu", &NumCPU)) return NumCPU;
|
||||||
|
fprintf(stderr, "Err: %s\n", strerror(errno));
|
||||||
|
std::exit(EXIT_FAILURE);
|
||||||
|
#elif defined(BENCHMARK_OS_WINDOWS)
|
||||||
|
SYSTEM_INFO sysinfo;
|
||||||
|
// Use memset as opposed to = {} to avoid GCC missing initializer false
|
||||||
|
// positives.
|
||||||
|
std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO));
|
||||||
|
GetSystemInfo(&sysinfo);
|
||||||
|
return sysinfo.dwNumberOfProcessors; // number of logical
|
||||||
|
// processors in the current
|
||||||
|
// group
|
||||||
|
#elif defined(BENCHMARK_OS_SOLARIS)
|
||||||
|
// Returns -1 in case of a failure.
|
||||||
|
int NumCPU = sysconf(_SC_NPROCESSORS_ONLN);
|
||||||
|
if (NumCPU < 0) {
|
||||||
|
fprintf(stderr,
|
||||||
|
"sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n",
|
||||||
|
strerror(errno));
|
||||||
|
}
|
||||||
|
return NumCPU;
|
||||||
|
#else
|
||||||
|
int NumCPUs = 0;
|
||||||
|
int MaxID = -1;
|
||||||
|
std::ifstream f("/proc/cpuinfo");
|
||||||
|
if (!f.is_open()) {
|
||||||
|
std::cerr << "failed to open /proc/cpuinfo\n";
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
const std::string Key = "processor";
|
||||||
|
std::string ln;
|
||||||
|
while (std::getline(f, ln)) {
|
||||||
|
if (ln.empty()) continue;
|
||||||
|
size_t SplitIdx = ln.find(':');
|
||||||
|
std::string value;
|
||||||
|
if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
|
||||||
|
if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) {
|
||||||
|
NumCPUs++;
|
||||||
|
if (!value.empty()) {
|
||||||
|
int CurID = std::stoi(value);
|
||||||
|
MaxID = std::max(CurID, MaxID);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (f.bad()) {
|
||||||
|
std::cerr << "Failure reading /proc/cpuinfo\n";
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if (!f.eof()) {
|
||||||
|
std::cerr << "Failed to read to end of /proc/cpuinfo\n";
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
f.close();
|
||||||
|
|
||||||
|
if ((MaxID + 1) != NumCPUs) {
|
||||||
|
fprintf(stderr,
|
||||||
|
"CPU ID assignments in /proc/cpuinfo seem messed up."
|
||||||
|
" This is usually caused by a bad BIOS.\n");
|
||||||
|
}
|
||||||
|
return NumCPUs;
|
||||||
|
#endif
|
||||||
|
BENCHMARK_UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
double GetCPUCyclesPerSecond() {
|
||||||
#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
|
#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
|
||||||
char line[1024];
|
|
||||||
char* err;
|
|
||||||
long freq;
|
long freq;
|
||||||
|
|
||||||
bool saw_mhz = false;
|
|
||||||
|
|
||||||
// If the kernel is exporting the tsc frequency use that. There are issues
|
// If the kernel is exporting the tsc frequency use that. There are issues
|
||||||
// where cpuinfo_max_freq cannot be relied on because the BIOS may be
|
// where cpuinfo_max_freq cannot be relied on because the BIOS may be
|
||||||
// exporintg an invalid p-state (on x86) or p-states may be used to put the
|
// exporintg an invalid p-state (on x86) or p-states may be used to put the
|
||||||
// processor in a new mode (turbo mode). Essentially, those frequencies
|
// processor in a new mode (turbo mode). Essentially, those frequencies
|
||||||
// cannot always be relied upon. The same reasons apply to /proc/cpuinfo as
|
// cannot always be relied upon. The same reasons apply to /proc/cpuinfo as
|
||||||
// well.
|
// well.
|
||||||
if (!saw_mhz &&
|
if (ReadFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)
|
||||||
ReadIntFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) {
|
// If CPU scaling is in effect, we want to use the *maximum* frequency,
|
||||||
|
// not whatever CPU speed some random processor happens to be using now.
|
||||||
|
|| ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
|
||||||
|
&freq)) {
|
||||||
// The value is in kHz (as the file name suggests). For example, on a
|
// The value is in kHz (as the file name suggests). For example, on a
|
||||||
// 2GHz warpstation, the file contains the value "2000000".
|
// 2GHz warpstation, the file contains the value "2000000".
|
||||||
cpuinfo_cycles_per_second = freq * 1000.0;
|
return freq * 1000.0;
|
||||||
saw_mhz = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If CPU scaling is in effect, we want to use the *maximum* frequency,
|
const double error_value = -1;
|
||||||
// not whatever CPU speed some random processor happens to be using now.
|
double bogo_clock = error_value;
|
||||||
if (!saw_mhz &&
|
|
||||||
ReadIntFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
|
std::ifstream f("/proc/cpuinfo");
|
||||||
&freq)) {
|
if (!f.is_open()) {
|
||||||
// The value is in kHz. For example, on a 2GHz warpstation, the file
|
std::cerr << "failed to open /proc/cpuinfo\n";
|
||||||
// contains the value "2000000".
|
return error_value;
|
||||||
cpuinfo_cycles_per_second = freq * 1000.0;
|
|
||||||
saw_mhz = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read /proc/cpuinfo for other values, and if there is no cpuinfo_max_freq.
|
auto startsWithKey = [](std::string const& Value, std::string const& Key) {
|
||||||
const char* pname = "/proc/cpuinfo";
|
if (Key.size() > Value.size()) return false;
|
||||||
int fd = open(pname, O_RDONLY);
|
auto Cmp = [&](char X, char Y) {
|
||||||
if (fd == -1) {
|
return std::tolower(X) == std::tolower(Y);
|
||||||
perror(pname);
|
};
|
||||||
if (!saw_mhz) {
|
return std::equal(Key.begin(), Key.end(), Value.begin(), Cmp);
|
||||||
cpuinfo_cycles_per_second =
|
};
|
||||||
static_cast<double>(EstimateCyclesPerSecond());
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
double bogo_clock = 1.0;
|
|
||||||
bool saw_bogo = false;
|
|
||||||
long max_cpu_id = 0;
|
|
||||||
int num_cpus = 0;
|
|
||||||
line[0] = line[1] = '\0';
|
|
||||||
size_t chars_read = 0;
|
|
||||||
do { // we'll exit when the last read didn't read anything
|
|
||||||
// Move the next line to the beginning of the buffer
|
|
||||||
const size_t oldlinelen = strlen(line);
|
|
||||||
if (sizeof(line) == oldlinelen + 1) // oldlinelen took up entire line
|
|
||||||
line[0] = '\0';
|
|
||||||
else // still other lines left to save
|
|
||||||
memmove(line, line + oldlinelen + 1, sizeof(line) - (oldlinelen + 1));
|
|
||||||
// Terminate the new line, reading more if we can't find the newline
|
|
||||||
char* newline = strchr(line, '\n');
|
|
||||||
if (newline == nullptr) {
|
|
||||||
const size_t linelen = strlen(line);
|
|
||||||
const size_t bytes_to_read = sizeof(line) - 1 - linelen;
|
|
||||||
CHECK(bytes_to_read > 0); // because the memmove recovered >=1 bytes
|
|
||||||
chars_read = read(fd, line + linelen, bytes_to_read);
|
|
||||||
line[linelen + chars_read] = '\0';
|
|
||||||
newline = strchr(line, '\n');
|
|
||||||
}
|
|
||||||
if (newline != nullptr) *newline = '\0';
|
|
||||||
|
|
||||||
|
std::string ln;
|
||||||
|
while (std::getline(f, ln)) {
|
||||||
|
if (ln.empty()) continue;
|
||||||
|
size_t SplitIdx = ln.find(':');
|
||||||
|
std::string value;
|
||||||
|
if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
|
||||||
// When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only
|
// When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only
|
||||||
// accept postive values. Some environments (virtual machines) report zero,
|
// accept positive values. Some environments (virtual machines) report zero,
|
||||||
// which would cause infinite looping in WallTime_Init.
|
// which would cause infinite looping in WallTime_Init.
|
||||||
if (!saw_mhz && startsWithKey(line, "cpu MHz")) {
|
if (startsWithKey(ln, "cpu MHz")) {
|
||||||
const char* freqstr = strchr(line, ':');
|
if (!value.empty()) {
|
||||||
if (freqstr) {
|
double cycles_per_second = std::stod(value) * 1000000.0;
|
||||||
cpuinfo_cycles_per_second = strtod(freqstr + 1, &err) * 1000000.0;
|
if (cycles_per_second > 0) return cycles_per_second;
|
||||||
if (freqstr[1] != '\0' && *err == '\0' && cpuinfo_cycles_per_second > 0)
|
|
||||||
saw_mhz = true;
|
|
||||||
}
|
}
|
||||||
} else if (startsWithKey(line, "bogomips")) {
|
} else if (startsWithKey(ln, "bogomips")) {
|
||||||
const char* freqstr = strchr(line, ':');
|
if (!value.empty()) {
|
||||||
if (freqstr) {
|
bogo_clock = std::stod(value) * 1000000.0;
|
||||||
bogo_clock = strtod(freqstr + 1, &err) * 1000000.0;
|
if (bogo_clock < 0.0) bogo_clock = error_value;
|
||||||
if (freqstr[1] != '\0' && *err == '\0' && bogo_clock > 0)
|
|
||||||
saw_bogo = true;
|
|
||||||
}
|
}
|
||||||
} else if (startsWithKey(line, "processor", /*IgnoreCase*/false)) {
|
|
||||||
// The above comparison is case-sensitive because ARM kernels often
|
|
||||||
// include a "Processor" line that tells you about the CPU, distinct
|
|
||||||
// from the usual "processor" lines that give you CPU ids. No current
|
|
||||||
// Linux architecture is using "Processor" for CPU ids.
|
|
||||||
num_cpus++; // count up every time we see an "processor :" entry
|
|
||||||
const char* id_str = strchr(line, ':');
|
|
||||||
if (id_str) {
|
|
||||||
const long cpu_id = strtol(id_str + 1, &err, 10);
|
|
||||||
if (id_str[1] != '\0' && *err == '\0' && max_cpu_id < cpu_id)
|
|
||||||
max_cpu_id = cpu_id;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} while (chars_read > 0);
|
|
||||||
close(fd);
|
|
||||||
|
|
||||||
if (!saw_mhz) {
|
|
||||||
if (saw_bogo) {
|
|
||||||
// If we didn't find anything better, we'll use bogomips, but
|
|
||||||
// we're not happy about it.
|
|
||||||
cpuinfo_cycles_per_second = bogo_clock;
|
|
||||||
} else {
|
|
||||||
// If we don't even have bogomips, we'll use the slow estimation.
|
|
||||||
cpuinfo_cycles_per_second =
|
|
||||||
static_cast<double>(EstimateCyclesPerSecond());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (num_cpus == 0) {
|
if (f.bad()) {
|
||||||
fprintf(stderr, "Failed to read num. CPUs correctly from /proc/cpuinfo\n");
|
std::cerr << "Failure reading /proc/cpuinfo\n";
|
||||||
} else {
|
return error_value;
|
||||||
if ((max_cpu_id + 1) != num_cpus) {
|
|
||||||
fprintf(stderr,
|
|
||||||
"CPU ID assignments in /proc/cpuinfo seem messed up."
|
|
||||||
" This is usually caused by a bad BIOS.\n");
|
|
||||||
}
|
|
||||||
cpuinfo_num_cpus = num_cpus;
|
|
||||||
}
|
}
|
||||||
|
if (!f.eof()) {
|
||||||
|
std::cerr << "Failed to read to end of /proc/cpuinfo\n";
|
||||||
|
return error_value;
|
||||||
|
}
|
||||||
|
f.close();
|
||||||
|
// If we found the bogomips clock, but nothing better, we'll use it (but
|
||||||
|
// we're not happy about it); otherwise, fallback to the rough estimation
|
||||||
|
// below.
|
||||||
|
if (bogo_clock >= 0.0) return bogo_clock;
|
||||||
|
|
||||||
#elif defined BENCHMARK_OS_FREEBSD
|
#elif defined BENCHMARK_HAS_SYSCTL
|
||||||
// For this sysctl to work, the machine must be configured without
|
constexpr auto* FreqStr =
|
||||||
// SMP, APIC, or APM support. hz should be 64-bit in freebsd 7.0
|
#if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD)
|
||||||
// and later. Before that, it's a 32-bit quantity (and gives the
|
"machdep.tsc_freq";
|
||||||
// wrong answer on machines faster than 2^32 Hz). See
|
#elif defined BENCHMARK_OS_OPENBSD
|
||||||
// http://lists.freebsd.org/pipermail/freebsd-i386/2004-November/001846.html
|
"hw.cpuspeed";
|
||||||
// But also compare FreeBSD 7.0:
|
|
||||||
// http://fxr.watson.org/fxr/source/i386/i386/tsc.c?v=RELENG70#L223
|
|
||||||
// 231 error = sysctl_handle_quad(oidp, &freq, 0, req);
|
|
||||||
// To FreeBSD 6.3 (it's the same in 6-STABLE):
|
|
||||||
// http://fxr.watson.org/fxr/source/i386/i386/tsc.c?v=RELENG6#L131
|
|
||||||
// 139 error = sysctl_handle_int(oidp, &freq, sizeof(freq), req);
|
|
||||||
#if __FreeBSD__ >= 7
|
|
||||||
uint64_t hz = 0;
|
|
||||||
#else
|
#else
|
||||||
unsigned int hz = 0;
|
"hw.cpufrequency";
|
||||||
#endif
|
#endif
|
||||||
size_t sz = sizeof(hz);
|
unsigned long long hz = 0;
|
||||||
const char* sysctl_path = "machdep.tsc_freq";
|
#if defined BENCHMARK_OS_OPENBSD
|
||||||
if (sysctlbyname(sysctl_path, &hz, &sz, nullptr, 0) != 0) {
|
if (GetSysctl(FreqStr, &hz)) return hz * 1000000;
|
||||||
fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n",
|
#else
|
||||||
sysctl_path, strerror(errno));
|
if (GetSysctl(FreqStr, &hz)) return hz;
|
||||||
cpuinfo_cycles_per_second = static_cast<double>(EstimateCyclesPerSecond());
|
#endif
|
||||||
} else {
|
fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n",
|
||||||
cpuinfo_cycles_per_second = hz;
|
FreqStr, strerror(errno));
|
||||||
}
|
|
||||||
// TODO: also figure out cpuinfo_num_cpus
|
|
||||||
|
|
||||||
#elif defined BENCHMARK_OS_WINDOWS
|
#elif defined BENCHMARK_OS_WINDOWS
|
||||||
// In NT, read MHz from the registry. If we fail to do so or we're in win9x
|
// In NT, read MHz from the registry. If we fail to do so or we're in win9x
|
||||||
@ -267,89 +532,56 @@ void InitializeSystemInfo() {
|
|||||||
SHGetValueA(HKEY_LOCAL_MACHINE,
|
SHGetValueA(HKEY_LOCAL_MACHINE,
|
||||||
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
|
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
|
||||||
"~MHz", nullptr, &data, &data_size)))
|
"~MHz", nullptr, &data, &data_size)))
|
||||||
cpuinfo_cycles_per_second =
|
return static_cast<double>((int64_t)data *
|
||||||
static_cast<double>((int64_t)data * (int64_t)(1000 * 1000)); // was mhz
|
(int64_t)(1000 * 1000)); // was mhz
|
||||||
else
|
#elif defined (BENCHMARK_OS_SOLARIS)
|
||||||
cpuinfo_cycles_per_second = static_cast<double>(EstimateCyclesPerSecond());
|
kstat_ctl_t *kc = kstat_open();
|
||||||
|
if (!kc) {
|
||||||
SYSTEM_INFO sysinfo;
|
std::cerr << "failed to open /dev/kstat\n";
|
||||||
// Use memset as opposed to = {} to avoid GCC missing initializer false
|
return -1;
|
||||||
// positives.
|
|
||||||
std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO));
|
|
||||||
GetSystemInfo(&sysinfo);
|
|
||||||
cpuinfo_num_cpus = sysinfo.dwNumberOfProcessors; // number of logical
|
|
||||||
// processors in the current
|
|
||||||
// group
|
|
||||||
|
|
||||||
#elif defined BENCHMARK_OS_MACOSX
|
|
||||||
int32_t num_cpus = 0;
|
|
||||||
size_t size = sizeof(num_cpus);
|
|
||||||
if (::sysctlbyname("hw.ncpu", &num_cpus, &size, nullptr, 0) == 0 &&
|
|
||||||
(size == sizeof(num_cpus))) {
|
|
||||||
cpuinfo_num_cpus = num_cpus;
|
|
||||||
} else {
|
|
||||||
fprintf(stderr, "%s\n", strerror(errno));
|
|
||||||
std::exit(EXIT_FAILURE);
|
|
||||||
}
|
}
|
||||||
int64_t cpu_freq = 0;
|
kstat_t *ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0");
|
||||||
size = sizeof(cpu_freq);
|
if (!ksp) {
|
||||||
if (::sysctlbyname("hw.cpufrequency", &cpu_freq, &size, nullptr, 0) == 0 &&
|
std::cerr << "failed to lookup in /dev/kstat\n";
|
||||||
(size == sizeof(cpu_freq))) {
|
return -1;
|
||||||
cpuinfo_cycles_per_second = cpu_freq;
|
|
||||||
} else {
|
|
||||||
#if defined BENCHMARK_OS_IOS
|
|
||||||
fprintf(stderr, "CPU frequency cannot be detected. \n");
|
|
||||||
cpuinfo_cycles_per_second = 0;
|
|
||||||
#else
|
|
||||||
fprintf(stderr, "%s\n", strerror(errno));
|
|
||||||
std::exit(EXIT_FAILURE);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
#else
|
if (kstat_read(kc, ksp, NULL) < 0) {
|
||||||
// Generic cycles per second counter
|
std::cerr << "failed to read from /dev/kstat\n";
|
||||||
cpuinfo_cycles_per_second = static_cast<double>(EstimateCyclesPerSecond());
|
return -1;
|
||||||
|
}
|
||||||
|
kstat_named_t *knp =
|
||||||
|
(kstat_named_t*)kstat_data_lookup(ksp, (char*)"current_clock_Hz");
|
||||||
|
if (!knp) {
|
||||||
|
std::cerr << "failed to lookup data in /dev/kstat\n";
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if (knp->data_type != KSTAT_DATA_UINT64) {
|
||||||
|
std::cerr << "current_clock_Hz is of unexpected data type: "
|
||||||
|
<< knp->data_type << "\n";
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
double clock_hz = knp->value.ui64;
|
||||||
|
kstat_close(kc);
|
||||||
|
return clock_hz;
|
||||||
#endif
|
#endif
|
||||||
|
// If we've fallen through, attempt to roughly estimate the CPU clock rate.
|
||||||
|
const int estimate_time_ms = 1000;
|
||||||
|
const auto start_ticks = cycleclock::Now();
|
||||||
|
SleepForMilliseconds(estimate_time_ms);
|
||||||
|
return static_cast<double>(cycleclock::Now() - start_ticks);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // end namespace
|
} // end namespace
|
||||||
|
|
||||||
double CyclesPerSecond(void) {
|
const CPUInfo& CPUInfo::Get() {
|
||||||
std::call_once(cpuinfo_init, InitializeSystemInfo);
|
static const CPUInfo* info = new CPUInfo();
|
||||||
return cpuinfo_cycles_per_second;
|
return *info;
|
||||||
}
|
}
|
||||||
|
|
||||||
int NumCPUs(void) {
|
CPUInfo::CPUInfo()
|
||||||
std::call_once(cpuinfo_init, InitializeSystemInfo);
|
: num_cpus(GetNumCPUs()),
|
||||||
return cpuinfo_num_cpus;
|
cycles_per_second(GetCPUCyclesPerSecond()),
|
||||||
}
|
caches(GetCacheSizes()),
|
||||||
|
scaling_enabled(CpuScalingEnabled(num_cpus)) {}
|
||||||
// The ""'s catch people who don't pass in a literal for "str"
|
|
||||||
#define strliterallen(str) (sizeof("" str "") - 1)
|
|
||||||
|
|
||||||
// Must use a string literal for prefix.
|
|
||||||
#define memprefix(str, len, prefix) \
|
|
||||||
((((len) >= strliterallen(prefix)) && \
|
|
||||||
std::memcmp(str, prefix, strliterallen(prefix)) == 0) \
|
|
||||||
? str + strliterallen(prefix) \
|
|
||||||
: nullptr)
|
|
||||||
|
|
||||||
bool CpuScalingEnabled() {
|
|
||||||
#ifndef BENCHMARK_OS_WINDOWS
|
|
||||||
// On Linux, the CPUfreq subsystem exposes CPU information as files on the
|
|
||||||
// local file system. If reading the exported files fails, then we may not be
|
|
||||||
// running on Linux, so we silently ignore all the read errors.
|
|
||||||
for (int cpu = 0, num_cpus = NumCPUs(); cpu < num_cpus; ++cpu) {
|
|
||||||
std::string governor_file =
|
|
||||||
StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor");
|
|
||||||
FILE* file = fopen(governor_file.c_str(), "r");
|
|
||||||
if (!file) break;
|
|
||||||
char buff[16];
|
|
||||||
size_t bytes_read = fread(buff, 1, sizeof(buff), file);
|
|
||||||
fclose(file);
|
|
||||||
if (memprefix(buff, bytes_read, "performance") == nullptr) return true;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // end namespace benchmark
|
} // end namespace benchmark
|
||||||
|
10
vendor/github.com/google/benchmark/src/sysinfo.h
generated
vendored
10
vendor/github.com/google/benchmark/src/sysinfo.h
generated
vendored
@ -1,10 +0,0 @@
|
|||||||
#ifndef BENCHMARK_SYSINFO_H_
|
|
||||||
#define BENCHMARK_SYSINFO_H_
|
|
||||||
|
|
||||||
namespace benchmark {
|
|
||||||
int NumCPUs();
|
|
||||||
double CyclesPerSecond();
|
|
||||||
bool CpuScalingEnabled();
|
|
||||||
} // end namespace benchmark
|
|
||||||
|
|
||||||
#endif // BENCHMARK_SYSINFO_H_
|
|
66
vendor/github.com/google/benchmark/src/thread_manager.h
generated
vendored
Normal file
66
vendor/github.com/google/benchmark/src/thread_manager.h
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
#ifndef BENCHMARK_THREAD_MANAGER_H
|
||||||
|
#define BENCHMARK_THREAD_MANAGER_H
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
|
#include "benchmark/benchmark.h"
|
||||||
|
#include "mutex.h"
|
||||||
|
|
||||||
|
namespace benchmark {
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
class ThreadManager {
|
||||||
|
public:
|
||||||
|
ThreadManager(int num_threads)
|
||||||
|
: alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
|
||||||
|
|
||||||
|
Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
|
||||||
|
return benchmark_mutex_;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
|
||||||
|
return start_stop_barrier_.wait();
|
||||||
|
}
|
||||||
|
|
||||||
|
void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
|
||||||
|
start_stop_barrier_.removeThread();
|
||||||
|
if (--alive_threads_ == 0) {
|
||||||
|
MutexLock lock(end_cond_mutex_);
|
||||||
|
end_condition_.notify_all();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
|
||||||
|
MutexLock lock(end_cond_mutex_);
|
||||||
|
end_condition_.wait(lock.native_handle(),
|
||||||
|
[this]() { return alive_threads_ == 0; });
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
struct Result {
|
||||||
|
int64_t iterations = 0;
|
||||||
|
double real_time_used = 0;
|
||||||
|
double cpu_time_used = 0;
|
||||||
|
double manual_time_used = 0;
|
||||||
|
int64_t bytes_processed = 0;
|
||||||
|
int64_t items_processed = 0;
|
||||||
|
int64_t complexity_n = 0;
|
||||||
|
std::string report_label_;
|
||||||
|
std::string error_message_;
|
||||||
|
bool has_error_ = false;
|
||||||
|
UserCounters counters;
|
||||||
|
};
|
||||||
|
GUARDED_BY(GetBenchmarkMutex()) Result results;
|
||||||
|
|
||||||
|
private:
|
||||||
|
mutable Mutex benchmark_mutex_;
|
||||||
|
std::atomic<int> alive_threads_;
|
||||||
|
Barrier start_stop_barrier_;
|
||||||
|
Mutex end_cond_mutex_;
|
||||||
|
Condition end_condition_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace benchmark
|
||||||
|
|
||||||
|
#endif // BENCHMARK_THREAD_MANAGER_H
|
69
vendor/github.com/google/benchmark/src/thread_timer.h
generated
vendored
Normal file
69
vendor/github.com/google/benchmark/src/thread_timer.h
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
#ifndef BENCHMARK_THREAD_TIMER_H
|
||||||
|
#define BENCHMARK_THREAD_TIMER_H
|
||||||
|
|
||||||
|
#include "check.h"
|
||||||
|
#include "timers.h"
|
||||||
|
|
||||||
|
namespace benchmark {
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
class ThreadTimer {
|
||||||
|
public:
|
||||||
|
ThreadTimer() = default;
|
||||||
|
|
||||||
|
// Called by each thread
|
||||||
|
void StartTimer() {
|
||||||
|
running_ = true;
|
||||||
|
start_real_time_ = ChronoClockNow();
|
||||||
|
start_cpu_time_ = ThreadCPUUsage();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Called by each thread
|
||||||
|
void StopTimer() {
|
||||||
|
CHECK(running_);
|
||||||
|
running_ = false;
|
||||||
|
real_time_used_ += ChronoClockNow() - start_real_time_;
|
||||||
|
// Floating point error can result in the subtraction producing a negative
|
||||||
|
// time. Guard against that.
|
||||||
|
cpu_time_used_ += std::max<double>(ThreadCPUUsage() - start_cpu_time_, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Called by each thread
|
||||||
|
void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
|
||||||
|
|
||||||
|
bool running() const { return running_; }
|
||||||
|
|
||||||
|
// REQUIRES: timer is not running
|
||||||
|
double real_time_used() {
|
||||||
|
CHECK(!running_);
|
||||||
|
return real_time_used_;
|
||||||
|
}
|
||||||
|
|
||||||
|
// REQUIRES: timer is not running
|
||||||
|
double cpu_time_used() {
|
||||||
|
CHECK(!running_);
|
||||||
|
return cpu_time_used_;
|
||||||
|
}
|
||||||
|
|
||||||
|
// REQUIRES: timer is not running
|
||||||
|
double manual_time_used() {
|
||||||
|
CHECK(!running_);
|
||||||
|
return manual_time_used_;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
bool running_ = false; // Is the timer running
|
||||||
|
double start_real_time_ = 0; // If running_
|
||||||
|
double start_cpu_time_ = 0; // If running_
|
||||||
|
|
||||||
|
// Accumulated time so far (does not contain current slice if running_)
|
||||||
|
double real_time_used_ = 0;
|
||||||
|
double cpu_time_used_ = 0;
|
||||||
|
// Manually set iteration time. User sets this with SetIterationTime(seconds).
|
||||||
|
double manual_time_used_ = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace benchmark
|
||||||
|
|
||||||
|
#endif // BENCHMARK_THREAD_TIMER_H
|
11
vendor/github.com/google/benchmark/src/timers.cc
generated
vendored
11
vendor/github.com/google/benchmark/src/timers.cc
generated
vendored
@ -17,11 +17,14 @@
|
|||||||
|
|
||||||
#ifdef BENCHMARK_OS_WINDOWS
|
#ifdef BENCHMARK_OS_WINDOWS
|
||||||
#include <Shlwapi.h>
|
#include <Shlwapi.h>
|
||||||
|
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
|
||||||
#include <VersionHelpers.h>
|
#include <VersionHelpers.h>
|
||||||
#include <Windows.h>
|
#include <Windows.h>
|
||||||
#else
|
#else
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
|
#ifndef BENCHMARK_OS_FUCHSIA
|
||||||
#include <sys/resource.h>
|
#include <sys/resource.h>
|
||||||
|
#endif
|
||||||
#include <sys/time.h>
|
#include <sys/time.h>
|
||||||
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
|
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
@ -74,7 +77,7 @@ double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) {
|
|||||||
static_cast<double>(user.QuadPart)) *
|
static_cast<double>(user.QuadPart)) *
|
||||||
1e-7;
|
1e-7;
|
||||||
}
|
}
|
||||||
#else
|
#elif !defined(BENCHMARK_OS_FUCHSIA)
|
||||||
double MakeTime(struct rusage const& ru) {
|
double MakeTime(struct rusage const& ru) {
|
||||||
return (static_cast<double>(ru.ru_utime.tv_sec) +
|
return (static_cast<double>(ru.ru_utime.tv_sec) +
|
||||||
static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
|
static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
|
||||||
@ -162,6 +165,10 @@ double ThreadCPUUsage() {
|
|||||||
// RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See
|
// RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See
|
||||||
// https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c
|
// https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c
|
||||||
return ProcessCPUUsage();
|
return ProcessCPUUsage();
|
||||||
|
#elif defined(BENCHMARK_OS_SOLARIS)
|
||||||
|
struct rusage ru;
|
||||||
|
if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru);
|
||||||
|
DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed");
|
||||||
#elif defined(CLOCK_THREAD_CPUTIME_ID)
|
#elif defined(CLOCK_THREAD_CPUTIME_ID)
|
||||||
struct timespec ts;
|
struct timespec ts;
|
||||||
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts);
|
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts);
|
||||||
@ -186,7 +193,6 @@ std::string DateTimeString(bool local) {
|
|||||||
std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
|
std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
|
||||||
#else
|
#else
|
||||||
std::tm timeinfo;
|
std::tm timeinfo;
|
||||||
std::memset(&timeinfo, 0, sizeof(std::tm));
|
|
||||||
::localtime_r(&now, &timeinfo);
|
::localtime_r(&now, &timeinfo);
|
||||||
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
|
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
|
||||||
#endif
|
#endif
|
||||||
@ -195,7 +201,6 @@ std::string DateTimeString(bool local) {
|
|||||||
written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now));
|
written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now));
|
||||||
#else
|
#else
|
||||||
std::tm timeinfo;
|
std::tm timeinfo;
|
||||||
std::memset(&timeinfo, 0, sizeof(std::tm));
|
|
||||||
::gmtime_r(&now, &timeinfo);
|
::gmtime_r(&now, &timeinfo);
|
||||||
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
|
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
|
||||||
#endif
|
#endif
|
||||||
|
46
vendor/github.com/google/benchmark/test/AssemblyTests.cmake
generated
vendored
Normal file
46
vendor/github.com/google/benchmark/test/AssemblyTests.cmake
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
|
||||||
|
include(split_list)
|
||||||
|
|
||||||
|
set(ASM_TEST_FLAGS "")
|
||||||
|
check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
|
||||||
|
if (BENCHMARK_HAS_O3_FLAG)
|
||||||
|
list(APPEND ASM_TEST_FLAGS -O3)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
check_cxx_compiler_flag(-g0 BENCHMARK_HAS_G0_FLAG)
|
||||||
|
if (BENCHMARK_HAS_G0_FLAG)
|
||||||
|
list(APPEND ASM_TEST_FLAGS -g0)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
check_cxx_compiler_flag(-fno-stack-protector BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG)
|
||||||
|
if (BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG)
|
||||||
|
list(APPEND ASM_TEST_FLAGS -fno-stack-protector)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
split_list(ASM_TEST_FLAGS)
|
||||||
|
string(TOUPPER "${CMAKE_CXX_COMPILER_ID}" ASM_TEST_COMPILER)
|
||||||
|
|
||||||
|
macro(add_filecheck_test name)
|
||||||
|
cmake_parse_arguments(ARG "" "" "CHECK_PREFIXES" ${ARGV})
|
||||||
|
add_library(${name} OBJECT ${name}.cc)
|
||||||
|
set_target_properties(${name} PROPERTIES COMPILE_FLAGS "-S ${ASM_TEST_FLAGS}")
|
||||||
|
set(ASM_OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/${name}.s")
|
||||||
|
add_custom_target(copy_${name} ALL
|
||||||
|
COMMAND ${PROJECT_SOURCE_DIR}/tools/strip_asm.py
|
||||||
|
$<TARGET_OBJECTS:${name}>
|
||||||
|
${ASM_OUTPUT_FILE}
|
||||||
|
BYPRODUCTS ${ASM_OUTPUT_FILE})
|
||||||
|
add_dependencies(copy_${name} ${name})
|
||||||
|
if (NOT ARG_CHECK_PREFIXES)
|
||||||
|
set(ARG_CHECK_PREFIXES "CHECK")
|
||||||
|
endif()
|
||||||
|
foreach(prefix ${ARG_CHECK_PREFIXES})
|
||||||
|
add_test(NAME run_${name}_${prefix}
|
||||||
|
COMMAND
|
||||||
|
${LLVM_FILECHECK_EXE} ${name}.cc
|
||||||
|
--input-file=${ASM_OUTPUT_FILE}
|
||||||
|
--check-prefixes=CHECK,CHECK-${ASM_TEST_COMPILER}
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
|
endforeach()
|
||||||
|
endmacro()
|
||||||
|
|
65
vendor/github.com/google/benchmark/test/BUILD
generated
vendored
Normal file
65
vendor/github.com/google/benchmark/test/BUILD
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
TEST_COPTS = [
|
||||||
|
"-pedantic",
|
||||||
|
"-pedantic-errors",
|
||||||
|
"-std=c++11",
|
||||||
|
"-Wall",
|
||||||
|
"-Wextra",
|
||||||
|
"-Wshadow",
|
||||||
|
# "-Wshorten-64-to-32",
|
||||||
|
"-Wfloat-equal",
|
||||||
|
"-fstrict-aliasing",
|
||||||
|
]
|
||||||
|
|
||||||
|
PER_SRC_COPTS = ({
|
||||||
|
"cxx03_test.cc": ["-std=c++03"],
|
||||||
|
# Some of the issues with DoNotOptimize only occur when optimization is enabled
|
||||||
|
"donotoptimize_test.cc": ["-O3"],
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
TEST_ARGS = ["--benchmark_min_time=0.01"]
|
||||||
|
|
||||||
|
PER_SRC_TEST_ARGS = ({
|
||||||
|
"user_counters_tabular_test.cc": ["--benchmark_counters_tabular=true"],
|
||||||
|
})
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "output_test_helper",
|
||||||
|
testonly = 1,
|
||||||
|
srcs = ["output_test_helper.cc"],
|
||||||
|
hdrs = ["output_test.h"],
|
||||||
|
copts = TEST_COPTS,
|
||||||
|
deps = [
|
||||||
|
"//:benchmark",
|
||||||
|
"//:benchmark_internal_headers",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
[
|
||||||
|
cc_test(
|
||||||
|
name = test_src[:-len(".cc")],
|
||||||
|
size = "small",
|
||||||
|
srcs = [test_src],
|
||||||
|
args = TEST_ARGS + PER_SRC_TEST_ARGS.get(test_src, []),
|
||||||
|
copts = TEST_COPTS + PER_SRC_COPTS.get(test_src, []),
|
||||||
|
deps = [
|
||||||
|
":output_test_helper",
|
||||||
|
"//:benchmark",
|
||||||
|
"//:benchmark_internal_headers",
|
||||||
|
"@com_google_googletest//:gtest",
|
||||||
|
] + (
|
||||||
|
["@com_google_googletest//:gtest_main"] if (test_src[-len("gtest.cc"):] == "gtest.cc") else []
|
||||||
|
),
|
||||||
|
# FIXME: Add support for assembly tests to bazel.
|
||||||
|
# See Issue #556
|
||||||
|
# https://github.com/google/benchmark/issues/556
|
||||||
|
) for test_src in glob(["*test.cc"], exclude = ["*_assembly_test.cc", "link_main_test.cc"])
|
||||||
|
]
|
||||||
|
|
||||||
|
cc_test(
|
||||||
|
name = "link_main_test",
|
||||||
|
size = "small",
|
||||||
|
srcs = ["link_main_test.cc"],
|
||||||
|
copts = TEST_COPTS,
|
||||||
|
deps = ["//:benchmark_main"],
|
||||||
|
)
|
89
vendor/github.com/google/benchmark/test/CMakeLists.txt
generated
vendored
89
vendor/github.com/google/benchmark/test/CMakeLists.txt
generated
vendored
@ -22,6 +22,12 @@ if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" )
|
|||||||
endforeach()
|
endforeach()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
|
||||||
|
set(BENCHMARK_O3_FLAG "")
|
||||||
|
if (BENCHMARK_HAS_O3_FLAG)
|
||||||
|
set(BENCHMARK_O3_FLAG "-O3")
|
||||||
|
endif()
|
||||||
|
|
||||||
# NOTE: These flags must be added after find_package(Threads REQUIRED) otherwise
|
# NOTE: These flags must be added after find_package(Threads REQUIRED) otherwise
|
||||||
# they will break the configuration check.
|
# they will break the configuration check.
|
||||||
if (DEFINED BENCHMARK_CXX_LINKER_FLAGS)
|
if (DEFINED BENCHMARK_CXX_LINKER_FLAGS)
|
||||||
@ -35,6 +41,10 @@ macro(compile_benchmark_test name)
|
|||||||
target_link_libraries(${name} benchmark ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${name} benchmark ${CMAKE_THREAD_LIBS_INIT})
|
||||||
endmacro(compile_benchmark_test)
|
endmacro(compile_benchmark_test)
|
||||||
|
|
||||||
|
macro(compile_benchmark_test_with_main name)
|
||||||
|
add_executable(${name} "${name}.cc")
|
||||||
|
target_link_libraries(${name} benchmark_main)
|
||||||
|
endmacro(compile_benchmark_test_with_main)
|
||||||
|
|
||||||
macro(compile_output_test name)
|
macro(compile_output_test name)
|
||||||
add_executable(${name} "${name}.cc" output_test.h)
|
add_executable(${name} "${name}.cc" output_test.h)
|
||||||
@ -42,7 +52,6 @@ macro(compile_output_test name)
|
|||||||
${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
||||||
endmacro(compile_output_test)
|
endmacro(compile_output_test)
|
||||||
|
|
||||||
|
|
||||||
# Demonstration executable
|
# Demonstration executable
|
||||||
compile_benchmark_test(benchmark_test)
|
compile_benchmark_test(benchmark_test)
|
||||||
add_test(benchmark benchmark_test --benchmark_min_time=0.01)
|
add_test(benchmark benchmark_test --benchmark_min_time=0.01)
|
||||||
@ -54,14 +63,23 @@ macro(add_filter_test name filter expect)
|
|||||||
endmacro(add_filter_test)
|
endmacro(add_filter_test)
|
||||||
|
|
||||||
add_filter_test(filter_simple "Foo" 3)
|
add_filter_test(filter_simple "Foo" 3)
|
||||||
|
add_filter_test(filter_simple_negative "-Foo" 2)
|
||||||
add_filter_test(filter_suffix "BM_.*" 4)
|
add_filter_test(filter_suffix "BM_.*" 4)
|
||||||
|
add_filter_test(filter_suffix_negative "-BM_.*" 1)
|
||||||
add_filter_test(filter_regex_all ".*" 5)
|
add_filter_test(filter_regex_all ".*" 5)
|
||||||
|
add_filter_test(filter_regex_all_negative "-.*" 0)
|
||||||
add_filter_test(filter_regex_blank "" 5)
|
add_filter_test(filter_regex_blank "" 5)
|
||||||
|
add_filter_test(filter_regex_blank_negative "-" 0)
|
||||||
add_filter_test(filter_regex_none "monkey" 0)
|
add_filter_test(filter_regex_none "monkey" 0)
|
||||||
|
add_filter_test(filter_regex_none_negative "-monkey" 5)
|
||||||
add_filter_test(filter_regex_wildcard ".*Foo.*" 3)
|
add_filter_test(filter_regex_wildcard ".*Foo.*" 3)
|
||||||
|
add_filter_test(filter_regex_wildcard_negative "-.*Foo.*" 2)
|
||||||
add_filter_test(filter_regex_begin "^BM_.*" 4)
|
add_filter_test(filter_regex_begin "^BM_.*" 4)
|
||||||
|
add_filter_test(filter_regex_begin_negative "-^BM_.*" 1)
|
||||||
add_filter_test(filter_regex_begin2 "^N" 1)
|
add_filter_test(filter_regex_begin2 "^N" 1)
|
||||||
|
add_filter_test(filter_regex_begin2_negative "-^N" 4)
|
||||||
add_filter_test(filter_regex_end ".*Ba$" 1)
|
add_filter_test(filter_regex_end ".*Ba$" 1)
|
||||||
|
add_filter_test(filter_regex_end_negative "-.*Ba$" 4)
|
||||||
|
|
||||||
compile_benchmark_test(options_test)
|
compile_benchmark_test(options_test)
|
||||||
add_test(options_benchmarks options_test --benchmark_min_time=0.01)
|
add_test(options_benchmarks options_test --benchmark_min_time=0.01)
|
||||||
@ -95,9 +113,15 @@ add_test(map_test map_test --benchmark_min_time=0.01)
|
|||||||
compile_benchmark_test(multiple_ranges_test)
|
compile_benchmark_test(multiple_ranges_test)
|
||||||
add_test(multiple_ranges_test multiple_ranges_test --benchmark_min_time=0.01)
|
add_test(multiple_ranges_test multiple_ranges_test --benchmark_min_time=0.01)
|
||||||
|
|
||||||
|
compile_benchmark_test_with_main(link_main_test)
|
||||||
|
add_test(link_main_test link_main_test --benchmark_min_time=0.01)
|
||||||
|
|
||||||
compile_output_test(reporter_output_test)
|
compile_output_test(reporter_output_test)
|
||||||
add_test(reporter_output_test reporter_output_test --benchmark_min_time=0.01)
|
add_test(reporter_output_test reporter_output_test --benchmark_min_time=0.01)
|
||||||
|
|
||||||
|
compile_output_test(templated_fixture_test)
|
||||||
|
add_test(templated_fixture_test templated_fixture_test --benchmark_min_time=0.01)
|
||||||
|
|
||||||
compile_output_test(user_counters_test)
|
compile_output_test(user_counters_test)
|
||||||
add_test(user_counters_test user_counters_test --benchmark_min_time=0.01)
|
add_test(user_counters_test user_counters_test --benchmark_min_time=0.01)
|
||||||
|
|
||||||
@ -106,13 +130,20 @@ add_test(user_counters_tabular_test user_counters_tabular_test --benchmark_count
|
|||||||
|
|
||||||
check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG)
|
check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG)
|
||||||
if (BENCHMARK_HAS_CXX03_FLAG)
|
if (BENCHMARK_HAS_CXX03_FLAG)
|
||||||
set(CXX03_FLAGS "${CMAKE_CXX_FLAGS}")
|
|
||||||
string(REPLACE "-std=c++11" "-std=c++03" CXX03_FLAGS "${CXX03_FLAGS}")
|
|
||||||
string(REPLACE "-std=c++0x" "-std=c++03" CXX03_FLAGS "${CXX03_FLAGS}")
|
|
||||||
|
|
||||||
compile_benchmark_test(cxx03_test)
|
compile_benchmark_test(cxx03_test)
|
||||||
set_target_properties(cxx03_test
|
set_target_properties(cxx03_test
|
||||||
PROPERTIES COMPILE_FLAGS "${CXX03_FLAGS}")
|
PROPERTIES
|
||||||
|
COMPILE_FLAGS "-std=c++03")
|
||||||
|
# libstdc++ provides different definitions within <map> between dialects. When
|
||||||
|
# LTO is enabled and -Werror is specified GCC diagnoses this ODR violation
|
||||||
|
# causing the test to fail to compile. To prevent this we explicitly disable
|
||||||
|
# the warning.
|
||||||
|
check_cxx_compiler_flag(-Wno-odr BENCHMARK_HAS_WNO_ODR)
|
||||||
|
if (BENCHMARK_ENABLE_LTO AND BENCHMARK_HAS_WNO_ODR)
|
||||||
|
set_target_properties(cxx03_test
|
||||||
|
PROPERTIES
|
||||||
|
LINK_FLAGS "-Wno-odr")
|
||||||
|
endif()
|
||||||
add_test(cxx03 cxx03_test --benchmark_min_time=0.01)
|
add_test(cxx03 cxx03_test --benchmark_min_time=0.01)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -125,6 +156,52 @@ endif()
|
|||||||
compile_output_test(complexity_test)
|
compile_output_test(complexity_test)
|
||||||
add_test(complexity_benchmark complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME})
|
add_test(complexity_benchmark complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME})
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# GoogleTest Unit Tests
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
if (BENCHMARK_ENABLE_GTEST_TESTS)
|
||||||
|
macro(compile_gtest name)
|
||||||
|
add_executable(${name} "${name}.cc")
|
||||||
|
if (TARGET googletest)
|
||||||
|
add_dependencies(${name} googletest)
|
||||||
|
endif()
|
||||||
|
if (GTEST_INCLUDE_DIRS)
|
||||||
|
target_include_directories(${name} PRIVATE ${GTEST_INCLUDE_DIRS})
|
||||||
|
endif()
|
||||||
|
target_link_libraries(${name} benchmark
|
||||||
|
${GTEST_BOTH_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
endmacro(compile_gtest)
|
||||||
|
|
||||||
|
macro(add_gtest name)
|
||||||
|
compile_gtest(${name})
|
||||||
|
add_test(${name} ${name})
|
||||||
|
endmacro()
|
||||||
|
|
||||||
|
add_gtest(benchmark_gtest)
|
||||||
|
add_gtest(statistics_gtest)
|
||||||
|
endif(BENCHMARK_ENABLE_GTEST_TESTS)
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Assembly Unit Tests
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
if (BENCHMARK_ENABLE_ASSEMBLY_TESTS)
|
||||||
|
if (NOT LLVM_FILECHECK_EXE)
|
||||||
|
message(FATAL_ERROR "LLVM FileCheck is required when including this file")
|
||||||
|
endif()
|
||||||
|
include(AssemblyTests.cmake)
|
||||||
|
add_filecheck_test(donotoptimize_assembly_test)
|
||||||
|
add_filecheck_test(state_assembly_test)
|
||||||
|
add_filecheck_test(clobber_memory_assembly_test)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Code Coverage Configuration
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
# Add the coverage command(s)
|
# Add the coverage command(s)
|
||||||
if(CMAKE_BUILD_TYPE)
|
if(CMAKE_BUILD_TYPE)
|
||||||
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
|
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
|
||||||
|
55
vendor/github.com/google/benchmark/test/basic_test.cc
generated
vendored
55
vendor/github.com/google/benchmark/test/basic_test.cc
generated
vendored
@ -4,7 +4,7 @@
|
|||||||
#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
|
#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
|
||||||
|
|
||||||
void BM_empty(benchmark::State& state) {
|
void BM_empty(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
benchmark::DoNotOptimize(state.iterations());
|
benchmark::DoNotOptimize(state.iterations());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -12,7 +12,7 @@ BENCHMARK(BM_empty);
|
|||||||
BENCHMARK(BM_empty)->ThreadPerCpu();
|
BENCHMARK(BM_empty)->ThreadPerCpu();
|
||||||
|
|
||||||
void BM_spin_empty(benchmark::State& state) {
|
void BM_spin_empty(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
for (int x = 0; x < state.range(0); ++x) {
|
for (int x = 0; x < state.range(0); ++x) {
|
||||||
benchmark::DoNotOptimize(x);
|
benchmark::DoNotOptimize(x);
|
||||||
}
|
}
|
||||||
@ -25,7 +25,7 @@ void BM_spin_pause_before(benchmark::State& state) {
|
|||||||
for (int i = 0; i < state.range(0); ++i) {
|
for (int i = 0; i < state.range(0); ++i) {
|
||||||
benchmark::DoNotOptimize(i);
|
benchmark::DoNotOptimize(i);
|
||||||
}
|
}
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
for (int i = 0; i < state.range(0); ++i) {
|
for (int i = 0; i < state.range(0); ++i) {
|
||||||
benchmark::DoNotOptimize(i);
|
benchmark::DoNotOptimize(i);
|
||||||
}
|
}
|
||||||
@ -35,7 +35,7 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_before);
|
|||||||
BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
|
BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
|
||||||
|
|
||||||
void BM_spin_pause_during(benchmark::State& state) {
|
void BM_spin_pause_during(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
state.PauseTiming();
|
state.PauseTiming();
|
||||||
for (int i = 0; i < state.range(0); ++i) {
|
for (int i = 0; i < state.range(0); ++i) {
|
||||||
benchmark::DoNotOptimize(i);
|
benchmark::DoNotOptimize(i);
|
||||||
@ -50,7 +50,7 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_during);
|
|||||||
BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
|
BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
|
||||||
|
|
||||||
void BM_pause_during(benchmark::State& state) {
|
void BM_pause_during(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
state.PauseTiming();
|
state.PauseTiming();
|
||||||
state.ResumeTiming();
|
state.ResumeTiming();
|
||||||
}
|
}
|
||||||
@ -61,7 +61,7 @@ BENCHMARK(BM_pause_during)->UseRealTime();
|
|||||||
BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
|
BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
|
||||||
|
|
||||||
void BM_spin_pause_after(benchmark::State& state) {
|
void BM_spin_pause_after(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
for (int i = 0; i < state.range(0); ++i) {
|
for (int i = 0; i < state.range(0); ++i) {
|
||||||
benchmark::DoNotOptimize(i);
|
benchmark::DoNotOptimize(i);
|
||||||
}
|
}
|
||||||
@ -77,7 +77,7 @@ void BM_spin_pause_before_and_after(benchmark::State& state) {
|
|||||||
for (int i = 0; i < state.range(0); ++i) {
|
for (int i = 0; i < state.range(0); ++i) {
|
||||||
benchmark::DoNotOptimize(i);
|
benchmark::DoNotOptimize(i);
|
||||||
}
|
}
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
for (int i = 0; i < state.range(0); ++i) {
|
for (int i = 0; i < state.range(0); ++i) {
|
||||||
benchmark::DoNotOptimize(i);
|
benchmark::DoNotOptimize(i);
|
||||||
}
|
}
|
||||||
@ -90,10 +90,47 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after);
|
|||||||
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
|
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
|
||||||
|
|
||||||
void BM_empty_stop_start(benchmark::State& state) {
|
void BM_empty_stop_start(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_empty_stop_start);
|
BENCHMARK(BM_empty_stop_start);
|
||||||
BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
|
BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
|
||||||
|
|
||||||
BENCHMARK_MAIN()
|
|
||||||
|
void BM_KeepRunning(benchmark::State& state) {
|
||||||
|
size_t iter_count = 0;
|
||||||
|
assert(iter_count == state.iterations());
|
||||||
|
while (state.KeepRunning()) {
|
||||||
|
++iter_count;
|
||||||
|
}
|
||||||
|
assert(iter_count == state.iterations());
|
||||||
|
}
|
||||||
|
BENCHMARK(BM_KeepRunning);
|
||||||
|
|
||||||
|
void BM_KeepRunningBatch(benchmark::State& state) {
|
||||||
|
// Choose a prime batch size to avoid evenly dividing max_iterations.
|
||||||
|
const size_t batch_size = 101;
|
||||||
|
size_t iter_count = 0;
|
||||||
|
while (state.KeepRunningBatch(batch_size)) {
|
||||||
|
iter_count += batch_size;
|
||||||
|
}
|
||||||
|
assert(state.iterations() == iter_count);
|
||||||
|
}
|
||||||
|
BENCHMARK(BM_KeepRunningBatch);
|
||||||
|
|
||||||
|
void BM_RangedFor(benchmark::State& state) {
|
||||||
|
size_t iter_count = 0;
|
||||||
|
for (auto _ : state) {
|
||||||
|
++iter_count;
|
||||||
|
}
|
||||||
|
assert(iter_count == state.max_iterations);
|
||||||
|
}
|
||||||
|
BENCHMARK(BM_RangedFor);
|
||||||
|
|
||||||
|
// Ensure that StateIterator provides all the necessary typedefs required to
|
||||||
|
// instantiate std::iterator_traits.
|
||||||
|
static_assert(std::is_same<
|
||||||
|
typename std::iterator_traits<benchmark::State::StateIterator>::value_type,
|
||||||
|
typename benchmark::State::StateIterator::value_type>::value, "");
|
||||||
|
|
||||||
|
BENCHMARK_MAIN();
|
||||||
|
33
vendor/github.com/google/benchmark/test/benchmark_gtest.cc
generated
vendored
Normal file
33
vendor/github.com/google/benchmark/test/benchmark_gtest.cc
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "../src/benchmark_register.h"
|
||||||
|
#include "gmock/gmock.h"
|
||||||
|
#include "gtest/gtest.h"
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
TEST(AddRangeTest, Simple) {
|
||||||
|
std::vector<int> dst;
|
||||||
|
AddRange(&dst, 1, 2, 2);
|
||||||
|
EXPECT_THAT(dst, testing::ElementsAre(1, 2));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AddRangeTest, Simple64) {
|
||||||
|
std::vector<int64_t> dst;
|
||||||
|
AddRange(&dst, static_cast<int64_t>(1), static_cast<int64_t>(2), 2);
|
||||||
|
EXPECT_THAT(dst, testing::ElementsAre(1, 2));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AddRangeTest, Advanced) {
|
||||||
|
std::vector<int> dst;
|
||||||
|
AddRange(&dst, 5, 15, 2);
|
||||||
|
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AddRangeTest, Advanced64) {
|
||||||
|
std::vector<int64_t> dst;
|
||||||
|
AddRange(&dst, static_cast<int64_t>(5), static_cast<int64_t>(15), 2);
|
||||||
|
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
|
||||||
|
}
|
||||||
|
|
||||||
|
} // end namespace
|
65
vendor/github.com/google/benchmark/test/benchmark_test.cc
generated
vendored
65
vendor/github.com/google/benchmark/test/benchmark_test.cc
generated
vendored
@ -40,9 +40,9 @@ double CalculatePi(int depth) {
|
|||||||
return (pi - 1.0) * 4;
|
return (pi - 1.0) * 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::set<int> ConstructRandomSet(int size) {
|
std::set<int64_t> ConstructRandomSet(int64_t size) {
|
||||||
std::set<int> s;
|
std::set<int64_t> s;
|
||||||
for (int i = 0; i < size; ++i) s.insert(i);
|
for (int i = 0; i < size; ++i) s.insert(s.end(), i);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -53,7 +53,7 @@ std::vector<int>* test_vector = nullptr;
|
|||||||
|
|
||||||
static void BM_Factorial(benchmark::State& state) {
|
static void BM_Factorial(benchmark::State& state) {
|
||||||
int fac_42 = 0;
|
int fac_42 = 0;
|
||||||
while (state.KeepRunning()) fac_42 = Factorial(8);
|
for (auto _ : state) fac_42 = Factorial(8);
|
||||||
// Prevent compiler optimizations
|
// Prevent compiler optimizations
|
||||||
std::stringstream ss;
|
std::stringstream ss;
|
||||||
ss << fac_42;
|
ss << fac_42;
|
||||||
@ -64,7 +64,7 @@ BENCHMARK(BM_Factorial)->UseRealTime();
|
|||||||
|
|
||||||
static void BM_CalculatePiRange(benchmark::State& state) {
|
static void BM_CalculatePiRange(benchmark::State& state) {
|
||||||
double pi = 0.0;
|
double pi = 0.0;
|
||||||
while (state.KeepRunning()) pi = CalculatePi(state.range(0));
|
for (auto _ : state) pi = CalculatePi(static_cast<int>(state.range(0)));
|
||||||
std::stringstream ss;
|
std::stringstream ss;
|
||||||
ss << pi;
|
ss << pi;
|
||||||
state.SetLabel(ss.str());
|
state.SetLabel(ss.str());
|
||||||
@ -73,8 +73,8 @@ BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024);
|
|||||||
|
|
||||||
static void BM_CalculatePi(benchmark::State& state) {
|
static void BM_CalculatePi(benchmark::State& state) {
|
||||||
static const int depth = 1024;
|
static const int depth = 1024;
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
benchmark::DoNotOptimize(CalculatePi(depth));
|
benchmark::DoNotOptimize(CalculatePi(static_cast<int>(depth)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_CalculatePi)->Threads(8);
|
BENCHMARK(BM_CalculatePi)->Threads(8);
|
||||||
@ -82,26 +82,30 @@ BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32);
|
|||||||
BENCHMARK(BM_CalculatePi)->ThreadPerCpu();
|
BENCHMARK(BM_CalculatePi)->ThreadPerCpu();
|
||||||
|
|
||||||
static void BM_SetInsert(benchmark::State& state) {
|
static void BM_SetInsert(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
std::set<int64_t> data;
|
||||||
|
for (auto _ : state) {
|
||||||
state.PauseTiming();
|
state.PauseTiming();
|
||||||
std::set<int> data = ConstructRandomSet(state.range(0));
|
data = ConstructRandomSet(state.range(0));
|
||||||
state.ResumeTiming();
|
state.ResumeTiming();
|
||||||
for (int j = 0; j < state.range(1); ++j) data.insert(rand());
|
for (int j = 0; j < state.range(1); ++j) data.insert(rand());
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(state.iterations() * state.range(1));
|
state.SetItemsProcessed(state.iterations() * state.range(1));
|
||||||
state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int));
|
state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int));
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {1, 10}});
|
|
||||||
|
// Test many inserts at once to reduce the total iterations needed. Otherwise, the slower,
|
||||||
|
// non-timed part of each iteration will make the benchmark take forever.
|
||||||
|
BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}});
|
||||||
|
|
||||||
template <typename Container,
|
template <typename Container,
|
||||||
typename ValueType = typename Container::value_type>
|
typename ValueType = typename Container::value_type>
|
||||||
static void BM_Sequential(benchmark::State& state) {
|
static void BM_Sequential(benchmark::State& state) {
|
||||||
ValueType v = 42;
|
ValueType v = 42;
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
Container c;
|
Container c;
|
||||||
for (int i = state.range(0); --i;) c.push_back(v);
|
for (int64_t i = state.range(0); --i;) c.push_back(v);
|
||||||
}
|
}
|
||||||
const size_t items_processed = state.iterations() * state.range(0);
|
const int64_t items_processed = state.iterations() * state.range(0);
|
||||||
state.SetItemsProcessed(items_processed);
|
state.SetItemsProcessed(items_processed);
|
||||||
state.SetBytesProcessed(items_processed * sizeof(v));
|
state.SetBytesProcessed(items_processed * sizeof(v));
|
||||||
}
|
}
|
||||||
@ -109,14 +113,15 @@ BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int)
|
|||||||
->Range(1 << 0, 1 << 10);
|
->Range(1 << 0, 1 << 10);
|
||||||
BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10);
|
BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10);
|
||||||
// Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond.
|
// Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond.
|
||||||
#if __cplusplus >= 201103L
|
#ifdef BENCHMARK_HAS_CXX11
|
||||||
BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512);
|
BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void BM_StringCompare(benchmark::State& state) {
|
static void BM_StringCompare(benchmark::State& state) {
|
||||||
std::string s1(state.range(0), '-');
|
size_t len = static_cast<size_t>(state.range(0));
|
||||||
std::string s2(state.range(0), '-');
|
std::string s1(len, '-');
|
||||||
while (state.KeepRunning()) benchmark::DoNotOptimize(s1.compare(s2));
|
std::string s2(len, '-');
|
||||||
|
for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2));
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_StringCompare)->Range(1, 1 << 20);
|
BENCHMARK(BM_StringCompare)->Range(1, 1 << 20);
|
||||||
|
|
||||||
@ -126,7 +131,7 @@ static void BM_SetupTeardown(benchmark::State& state) {
|
|||||||
test_vector = new std::vector<int>();
|
test_vector = new std::vector<int>();
|
||||||
}
|
}
|
||||||
int i = 0;
|
int i = 0;
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
std::lock_guard<std::mutex> l(test_vector_mu);
|
std::lock_guard<std::mutex> l(test_vector_mu);
|
||||||
if (i % 2 == 0)
|
if (i % 2 == 0)
|
||||||
test_vector->push_back(i);
|
test_vector->push_back(i);
|
||||||
@ -142,7 +147,7 @@ BENCHMARK(BM_SetupTeardown)->ThreadPerCpu();
|
|||||||
|
|
||||||
static void BM_LongTest(benchmark::State& state) {
|
static void BM_LongTest(benchmark::State& state) {
|
||||||
double tracker = 0.0;
|
double tracker = 0.0;
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
for (int i = 0; i < state.range(0); ++i)
|
for (int i = 0; i < state.range(0); ++i)
|
||||||
benchmark::DoNotOptimize(tracker += i);
|
benchmark::DoNotOptimize(tracker += i);
|
||||||
}
|
}
|
||||||
@ -150,16 +155,16 @@ static void BM_LongTest(benchmark::State& state) {
|
|||||||
BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28);
|
BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28);
|
||||||
|
|
||||||
static void BM_ParallelMemset(benchmark::State& state) {
|
static void BM_ParallelMemset(benchmark::State& state) {
|
||||||
int size = state.range(0) / static_cast<int>(sizeof(int));
|
int64_t size = state.range(0) / static_cast<int64_t>(sizeof(int));
|
||||||
int thread_size = size / state.threads;
|
int thread_size = static_cast<int>(size) / state.threads;
|
||||||
int from = thread_size * state.thread_index;
|
int from = thread_size * state.thread_index;
|
||||||
int to = from + thread_size;
|
int to = from + thread_size;
|
||||||
|
|
||||||
if (state.thread_index == 0) {
|
if (state.thread_index == 0) {
|
||||||
test_vector = new std::vector<int>(size);
|
test_vector = new std::vector<int>(static_cast<size_t>(size));
|
||||||
}
|
}
|
||||||
|
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
for (int i = from; i < to; i++) {
|
for (int i = from; i < to; i++) {
|
||||||
// No need to lock test_vector_mu as ranges
|
// No need to lock test_vector_mu as ranges
|
||||||
// do not overlap between threads.
|
// do not overlap between threads.
|
||||||
@ -174,12 +179,12 @@ static void BM_ParallelMemset(benchmark::State& state) {
|
|||||||
BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4);
|
BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4);
|
||||||
|
|
||||||
static void BM_ManualTiming(benchmark::State& state) {
|
static void BM_ManualTiming(benchmark::State& state) {
|
||||||
size_t slept_for = 0;
|
int64_t slept_for = 0;
|
||||||
int microseconds = state.range(0);
|
int64_t microseconds = state.range(0);
|
||||||
std::chrono::duration<double, std::micro> sleep_duration{
|
std::chrono::duration<double, std::micro> sleep_duration{
|
||||||
static_cast<double>(microseconds)};
|
static_cast<double>(microseconds)};
|
||||||
|
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
auto start = std::chrono::high_resolution_clock::now();
|
auto start = std::chrono::high_resolution_clock::now();
|
||||||
// Simulate some useful workload with a sleep
|
// Simulate some useful workload with a sleep
|
||||||
std::this_thread::sleep_for(
|
std::this_thread::sleep_for(
|
||||||
@ -197,11 +202,11 @@ static void BM_ManualTiming(benchmark::State& state) {
|
|||||||
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime();
|
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime();
|
||||||
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime();
|
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime();
|
||||||
|
|
||||||
#if __cplusplus >= 201103L
|
#ifdef BENCHMARK_HAS_CXX11
|
||||||
|
|
||||||
template <class... Args>
|
template <class... Args>
|
||||||
void BM_with_args(benchmark::State& state, Args&&...) {
|
void BM_with_args(benchmark::State& state, Args&&...) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44);
|
BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44);
|
||||||
@ -213,7 +218,7 @@ void BM_non_template_args(benchmark::State& state, int, double) {
|
|||||||
}
|
}
|
||||||
BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0);
|
BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0);
|
||||||
|
|
||||||
#endif // __cplusplus >= 201103L
|
#endif // BENCHMARK_HAS_CXX11
|
||||||
|
|
||||||
static void BM_DenseThreadRanges(benchmark::State& st) {
|
static void BM_DenseThreadRanges(benchmark::State& st) {
|
||||||
switch (st.range(0)) {
|
switch (st.range(0)) {
|
||||||
@ -237,4 +242,4 @@ BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3);
|
|||||||
BENCHMARK(BM_DenseThreadRanges)->Arg(2)->DenseThreadRange(1, 4, 2);
|
BENCHMARK(BM_DenseThreadRanges)->Arg(2)->DenseThreadRange(1, 4, 2);
|
||||||
BENCHMARK(BM_DenseThreadRanges)->Arg(3)->DenseThreadRange(5, 14, 3);
|
BENCHMARK(BM_DenseThreadRanges)->Arg(3)->DenseThreadRange(5, 14, 3);
|
||||||
|
|
||||||
BENCHMARK_MAIN()
|
BENCHMARK_MAIN();
|
||||||
|
64
vendor/github.com/google/benchmark/test/clobber_memory_assembly_test.cc
generated
vendored
Normal file
64
vendor/github.com/google/benchmark/test/clobber_memory_assembly_test.cc
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
#include <benchmark/benchmark.h>
|
||||||
|
|
||||||
|
#ifdef __clang__
|
||||||
|
#pragma clang diagnostic ignored "-Wreturn-type"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
extern int ExternInt;
|
||||||
|
extern int ExternInt2;
|
||||||
|
extern int ExternInt3;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_basic:
|
||||||
|
extern "C" void test_basic() {
|
||||||
|
int x;
|
||||||
|
benchmark::DoNotOptimize(&x);
|
||||||
|
x = 101;
|
||||||
|
benchmark::ClobberMemory();
|
||||||
|
// CHECK: leaq [[DEST:[^,]+]], %rax
|
||||||
|
// CHECK: movl $101, [[DEST]]
|
||||||
|
// CHECK: ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_redundant_store:
|
||||||
|
extern "C" void test_redundant_store() {
|
||||||
|
ExternInt = 3;
|
||||||
|
benchmark::ClobberMemory();
|
||||||
|
ExternInt = 51;
|
||||||
|
// CHECK-DAG: ExternInt
|
||||||
|
// CHECK-DAG: movl $3
|
||||||
|
// CHECK: movl $51
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_redundant_read:
|
||||||
|
extern "C" void test_redundant_read() {
|
||||||
|
int x;
|
||||||
|
benchmark::DoNotOptimize(&x);
|
||||||
|
x = ExternInt;
|
||||||
|
benchmark::ClobberMemory();
|
||||||
|
x = ExternInt2;
|
||||||
|
// CHECK: leaq [[DEST:[^,]+]], %rax
|
||||||
|
// CHECK: ExternInt(%rip)
|
||||||
|
// CHECK: movl %eax, [[DEST]]
|
||||||
|
// CHECK-NOT: ExternInt2
|
||||||
|
// CHECK: ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_redundant_read2:
|
||||||
|
extern "C" void test_redundant_read2() {
|
||||||
|
int x;
|
||||||
|
benchmark::DoNotOptimize(&x);
|
||||||
|
x = ExternInt;
|
||||||
|
benchmark::ClobberMemory();
|
||||||
|
x = ExternInt2;
|
||||||
|
benchmark::ClobberMemory();
|
||||||
|
// CHECK: leaq [[DEST:[^,]+]], %rax
|
||||||
|
// CHECK: ExternInt(%rip)
|
||||||
|
// CHECK: movl %eax, [[DEST]]
|
||||||
|
// CHECK: ExternInt2(%rip)
|
||||||
|
// CHECK: movl %eax, [[DEST]]
|
||||||
|
// CHECK: ret
|
||||||
|
}
|
24
vendor/github.com/google/benchmark/test/complexity_test.cc
generated
vendored
24
vendor/github.com/google/benchmark/test/complexity_test.cc
generated
vendored
@ -25,8 +25,8 @@ int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name,
|
|||||||
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
|
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
|
||||||
{"^%rms_name %rms %rms[ ]*$", MR_Next}});
|
{"^%rms_name %rms %rms[ ]*$", MR_Next}});
|
||||||
AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"},
|
AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"},
|
||||||
{"\"cpu_coefficient\": [0-9]+,$", MR_Next},
|
{"\"cpu_coefficient\": %float,$", MR_Next},
|
||||||
{"\"real_coefficient\": [0-9]{1,5},$", MR_Next},
|
{"\"real_coefficient\": %float,$", MR_Next},
|
||||||
{"\"big_o\": \"%bigo\",$", MR_Next},
|
{"\"big_o\": \"%bigo\",$", MR_Next},
|
||||||
{"\"time_unit\": \"ns\"$", MR_Next},
|
{"\"time_unit\": \"ns\"$", MR_Next},
|
||||||
{"}", MR_Next},
|
{"}", MR_Next},
|
||||||
@ -46,7 +46,7 @@ int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name,
|
|||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
void BM_Complexity_O1(benchmark::State& state) {
|
void BM_Complexity_O1(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
for (int i = 0; i < 1024; ++i) {
|
for (int i = 0; i < 1024; ++i) {
|
||||||
benchmark::DoNotOptimize(&i);
|
benchmark::DoNotOptimize(&i);
|
||||||
}
|
}
|
||||||
@ -55,7 +55,7 @@ void BM_Complexity_O1(benchmark::State& state) {
|
|||||||
}
|
}
|
||||||
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
|
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
|
||||||
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity();
|
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity();
|
||||||
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](int) {
|
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](int64_t) {
|
||||||
return 1.0;
|
return 1.0;
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -81,9 +81,9 @@ ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1);
|
|||||||
// --------------------------- Testing BigO O(N) --------------------------- //
|
// --------------------------- Testing BigO O(N) --------------------------- //
|
||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
std::vector<int> ConstructRandomVector(int size) {
|
std::vector<int> ConstructRandomVector(int64_t size) {
|
||||||
std::vector<int> v;
|
std::vector<int> v;
|
||||||
v.reserve(size);
|
v.reserve(static_cast<int>(size));
|
||||||
for (int i = 0; i < size; ++i) {
|
for (int i = 0; i < size; ++i) {
|
||||||
v.push_back(std::rand() % size);
|
v.push_back(std::rand() % size);
|
||||||
}
|
}
|
||||||
@ -92,9 +92,9 @@ std::vector<int> ConstructRandomVector(int size) {
|
|||||||
|
|
||||||
void BM_Complexity_O_N(benchmark::State& state) {
|
void BM_Complexity_O_N(benchmark::State& state) {
|
||||||
auto v = ConstructRandomVector(state.range(0));
|
auto v = ConstructRandomVector(state.range(0));
|
||||||
const int item_not_in_vector =
|
// Test worst case scenario (item not in vector)
|
||||||
state.range(0) * 2; // Test worst case scenario (item not in vector)
|
const int64_t item_not_in_vector = state.range(0) * 2;
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
|
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
|
||||||
}
|
}
|
||||||
state.SetComplexityN(state.range(0));
|
state.SetComplexityN(state.range(0));
|
||||||
@ -106,7 +106,7 @@ BENCHMARK(BM_Complexity_O_N)
|
|||||||
BENCHMARK(BM_Complexity_O_N)
|
BENCHMARK(BM_Complexity_O_N)
|
||||||
->RangeMultiplier(2)
|
->RangeMultiplier(2)
|
||||||
->Range(1 << 10, 1 << 16)
|
->Range(1 << 10, 1 << 16)
|
||||||
->Complexity([](int n) -> double { return n; });
|
->Complexity([](int64_t n) -> double { return n; });
|
||||||
BENCHMARK(BM_Complexity_O_N)
|
BENCHMARK(BM_Complexity_O_N)
|
||||||
->RangeMultiplier(2)
|
->RangeMultiplier(2)
|
||||||
->Range(1 << 10, 1 << 16)
|
->Range(1 << 10, 1 << 16)
|
||||||
@ -129,7 +129,7 @@ ADD_COMPLEXITY_CASES(big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n);
|
|||||||
|
|
||||||
static void BM_Complexity_O_N_log_N(benchmark::State& state) {
|
static void BM_Complexity_O_N_log_N(benchmark::State& state) {
|
||||||
auto v = ConstructRandomVector(state.range(0));
|
auto v = ConstructRandomVector(state.range(0));
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
std::sort(v.begin(), v.end());
|
std::sort(v.begin(), v.end());
|
||||||
}
|
}
|
||||||
state.SetComplexityN(state.range(0));
|
state.SetComplexityN(state.range(0));
|
||||||
@ -141,7 +141,7 @@ BENCHMARK(BM_Complexity_O_N_log_N)
|
|||||||
BENCHMARK(BM_Complexity_O_N_log_N)
|
BENCHMARK(BM_Complexity_O_N_log_N)
|
||||||
->RangeMultiplier(2)
|
->RangeMultiplier(2)
|
||||||
->Range(1 << 10, 1 << 16)
|
->Range(1 << 10, 1 << 16)
|
||||||
->Complexity([](int n) { return n * log2(n); });
|
->Complexity([](int64_t n) { return n * log2(n); });
|
||||||
BENCHMARK(BM_Complexity_O_N_log_N)
|
BENCHMARK(BM_Complexity_O_N_log_N)
|
||||||
->RangeMultiplier(2)
|
->RangeMultiplier(2)
|
||||||
->Range(1 << 10, 1 << 16)
|
->Range(1 << 10, 1 << 16)
|
||||||
|
17
vendor/github.com/google/benchmark/test/cxx03_test.cc
generated
vendored
17
vendor/github.com/google/benchmark/test/cxx03_test.cc
generated
vendored
@ -8,6 +8,10 @@
|
|||||||
#error C++11 or greater detected. Should be C++03.
|
#error C++11 or greater detected. Should be C++03.
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef BENCHMARK_HAS_CXX11
|
||||||
|
#error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined.
|
||||||
|
#endif
|
||||||
|
|
||||||
void BM_empty(benchmark::State& state) {
|
void BM_empty(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
while (state.KeepRunning()) {
|
||||||
volatile std::size_t x = state.iterations();
|
volatile std::size_t x = state.iterations();
|
||||||
@ -39,10 +43,21 @@ void BM_template1(benchmark::State& state) {
|
|||||||
BENCHMARK_TEMPLATE(BM_template1, long);
|
BENCHMARK_TEMPLATE(BM_template1, long);
|
||||||
BENCHMARK_TEMPLATE1(BM_template1, int);
|
BENCHMARK_TEMPLATE1(BM_template1, int);
|
||||||
|
|
||||||
|
template <class T>
|
||||||
|
struct BM_Fixture : public ::benchmark::Fixture {
|
||||||
|
};
|
||||||
|
|
||||||
|
BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) {
|
||||||
|
BM_empty(state);
|
||||||
|
}
|
||||||
|
BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) {
|
||||||
|
BM_empty(state);
|
||||||
|
}
|
||||||
|
|
||||||
void BM_counters(benchmark::State& state) {
|
void BM_counters(benchmark::State& state) {
|
||||||
BM_empty(state);
|
BM_empty(state);
|
||||||
state.counters["Foo"] = 2;
|
state.counters["Foo"] = 2;
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_counters);
|
BENCHMARK(BM_counters);
|
||||||
|
|
||||||
BENCHMARK_MAIN()
|
BENCHMARK_MAIN();
|
||||||
|
18
vendor/github.com/google/benchmark/test/diagnostics_test.cc
generated
vendored
18
vendor/github.com/google/benchmark/test/diagnostics_test.cc
generated
vendored
@ -47,7 +47,7 @@ void BM_diagnostic_test(benchmark::State& state) {
|
|||||||
|
|
||||||
if (called_once == false) try_invalid_pause_resume(state);
|
if (called_once == false) try_invalid_pause_resume(state);
|
||||||
|
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
benchmark::DoNotOptimize(state.iterations());
|
benchmark::DoNotOptimize(state.iterations());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,6 +57,22 @@ void BM_diagnostic_test(benchmark::State& state) {
|
|||||||
}
|
}
|
||||||
BENCHMARK(BM_diagnostic_test);
|
BENCHMARK(BM_diagnostic_test);
|
||||||
|
|
||||||
|
|
||||||
|
void BM_diagnostic_test_keep_running(benchmark::State& state) {
|
||||||
|
static bool called_once = false;
|
||||||
|
|
||||||
|
if (called_once == false) try_invalid_pause_resume(state);
|
||||||
|
|
||||||
|
while(state.KeepRunning()) {
|
||||||
|
benchmark::DoNotOptimize(state.iterations());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (called_once == false) try_invalid_pause_resume(state);
|
||||||
|
|
||||||
|
called_once = true;
|
||||||
|
}
|
||||||
|
BENCHMARK(BM_diagnostic_test_keep_running);
|
||||||
|
|
||||||
int main(int argc, char* argv[]) {
|
int main(int argc, char* argv[]) {
|
||||||
benchmark::internal::GetAbortHandler() = &TestHandler;
|
benchmark::internal::GetAbortHandler() = &TestHandler;
|
||||||
benchmark::Initialize(&argc, argv);
|
benchmark::Initialize(&argc, argv);
|
||||||
|
163
vendor/github.com/google/benchmark/test/donotoptimize_assembly_test.cc
generated
vendored
Normal file
163
vendor/github.com/google/benchmark/test/donotoptimize_assembly_test.cc
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
#include <benchmark/benchmark.h>
|
||||||
|
|
||||||
|
#ifdef __clang__
|
||||||
|
#pragma clang diagnostic ignored "-Wreturn-type"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
extern int ExternInt;
|
||||||
|
extern int ExternInt2;
|
||||||
|
extern int ExternInt3;
|
||||||
|
|
||||||
|
inline int Add42(int x) { return x + 42; }
|
||||||
|
|
||||||
|
struct NotTriviallyCopyable {
|
||||||
|
NotTriviallyCopyable();
|
||||||
|
explicit NotTriviallyCopyable(int x) : value(x) {}
|
||||||
|
NotTriviallyCopyable(NotTriviallyCopyable const&);
|
||||||
|
int value;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Large {
|
||||||
|
int value;
|
||||||
|
int data[2];
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
// CHECK-LABEL: test_with_rvalue:
|
||||||
|
extern "C" void test_with_rvalue() {
|
||||||
|
benchmark::DoNotOptimize(Add42(0));
|
||||||
|
// CHECK: movl $42, %eax
|
||||||
|
// CHECK: ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_with_large_rvalue:
|
||||||
|
extern "C" void test_with_large_rvalue() {
|
||||||
|
benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}});
|
||||||
|
// CHECK: ExternInt(%rip)
|
||||||
|
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]
|
||||||
|
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
|
||||||
|
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
|
||||||
|
// CHECK: ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_with_non_trivial_rvalue:
|
||||||
|
extern "C" void test_with_non_trivial_rvalue() {
|
||||||
|
benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt));
|
||||||
|
// CHECK: mov{{l|q}} ExternInt(%rip)
|
||||||
|
// CHECK: ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_with_lvalue:
|
||||||
|
extern "C" void test_with_lvalue() {
|
||||||
|
int x = 101;
|
||||||
|
benchmark::DoNotOptimize(x);
|
||||||
|
// CHECK-GNU: movl $101, %eax
|
||||||
|
// CHECK-CLANG: movl $101, -{{[0-9]+}}(%[[REG:[a-z]+]])
|
||||||
|
// CHECK: ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_with_large_lvalue:
|
||||||
|
extern "C" void test_with_large_lvalue() {
|
||||||
|
Large L{ExternInt, {ExternInt, ExternInt}};
|
||||||
|
benchmark::DoNotOptimize(L);
|
||||||
|
// CHECK: ExternInt(%rip)
|
||||||
|
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
|
||||||
|
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
|
||||||
|
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
|
||||||
|
// CHECK: ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_with_non_trivial_lvalue:
|
||||||
|
extern "C" void test_with_non_trivial_lvalue() {
|
||||||
|
NotTriviallyCopyable NTC(ExternInt);
|
||||||
|
benchmark::DoNotOptimize(NTC);
|
||||||
|
// CHECK: ExternInt(%rip)
|
||||||
|
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
|
||||||
|
// CHECK: ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_with_const_lvalue:
|
||||||
|
extern "C" void test_with_const_lvalue() {
|
||||||
|
const int x = 123;
|
||||||
|
benchmark::DoNotOptimize(x);
|
||||||
|
// CHECK: movl $123, %eax
|
||||||
|
// CHECK: ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_with_large_const_lvalue:
|
||||||
|
extern "C" void test_with_large_const_lvalue() {
|
||||||
|
const Large L{ExternInt, {ExternInt, ExternInt}};
|
||||||
|
benchmark::DoNotOptimize(L);
|
||||||
|
// CHECK: ExternInt(%rip)
|
||||||
|
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
|
||||||
|
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
|
||||||
|
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
|
||||||
|
// CHECK: ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_with_non_trivial_const_lvalue:
|
||||||
|
extern "C" void test_with_non_trivial_const_lvalue() {
|
||||||
|
const NotTriviallyCopyable Obj(ExternInt);
|
||||||
|
benchmark::DoNotOptimize(Obj);
|
||||||
|
// CHECK: mov{{q|l}} ExternInt(%rip)
|
||||||
|
// CHECK: ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_div_by_two:
|
||||||
|
extern "C" int test_div_by_two(int input) {
|
||||||
|
int divisor = 2;
|
||||||
|
benchmark::DoNotOptimize(divisor);
|
||||||
|
return input / divisor;
|
||||||
|
// CHECK: movl $2, [[DEST:.*]]
|
||||||
|
// CHECK: idivl [[DEST]]
|
||||||
|
// CHECK: ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_inc_integer:
|
||||||
|
extern "C" int test_inc_integer() {
|
||||||
|
int x = 0;
|
||||||
|
for (int i=0; i < 5; ++i)
|
||||||
|
benchmark::DoNotOptimize(++x);
|
||||||
|
// CHECK: movl $1, [[DEST:.*]]
|
||||||
|
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
|
||||||
|
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
|
||||||
|
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
|
||||||
|
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
|
||||||
|
// CHECK-CLANG: movl [[DEST]], %eax
|
||||||
|
// CHECK: ret
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_pointer_rvalue
|
||||||
|
extern "C" void test_pointer_rvalue() {
|
||||||
|
// CHECK: movl $42, [[DEST:.*]]
|
||||||
|
// CHECK: leaq [[DEST]], %rax
|
||||||
|
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
|
||||||
|
// CHECK: ret
|
||||||
|
int x = 42;
|
||||||
|
benchmark::DoNotOptimize(&x);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_pointer_const_lvalue:
|
||||||
|
extern "C" void test_pointer_const_lvalue() {
|
||||||
|
// CHECK: movl $42, [[DEST:.*]]
|
||||||
|
// CHECK: leaq [[DEST]], %rax
|
||||||
|
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
|
||||||
|
// CHECK: ret
|
||||||
|
int x = 42;
|
||||||
|
int * const xp = &x;
|
||||||
|
benchmark::DoNotOptimize(xp);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_pointer_lvalue:
|
||||||
|
extern "C" void test_pointer_lvalue() {
|
||||||
|
// CHECK: movl $42, [[DEST:.*]]
|
||||||
|
// CHECK: leaq [[DEST]], %rax
|
||||||
|
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]])
|
||||||
|
// CHECK: ret
|
||||||
|
int x = 42;
|
||||||
|
int *xp = &x;
|
||||||
|
benchmark::DoNotOptimize(xp);
|
||||||
|
}
|
6
vendor/github.com/google/benchmark/test/donotoptimize_test.cc
generated
vendored
6
vendor/github.com/google/benchmark/test/donotoptimize_test.cc
generated
vendored
@ -28,13 +28,13 @@ private:
|
|||||||
int main(int, char*[]) {
|
int main(int, char*[]) {
|
||||||
// this test verifies compilation of DoNotOptimize() for some types
|
// this test verifies compilation of DoNotOptimize() for some types
|
||||||
|
|
||||||
char buffer8[8];
|
char buffer8[8] = "";
|
||||||
benchmark::DoNotOptimize(buffer8);
|
benchmark::DoNotOptimize(buffer8);
|
||||||
|
|
||||||
char buffer20[20];
|
char buffer20[20] = "";
|
||||||
benchmark::DoNotOptimize(buffer20);
|
benchmark::DoNotOptimize(buffer20);
|
||||||
|
|
||||||
char buffer1024[1024];
|
char buffer1024[1024] = "";
|
||||||
benchmark::DoNotOptimize(buffer1024);
|
benchmark::DoNotOptimize(buffer1024);
|
||||||
benchmark::DoNotOptimize(&buffer1024[0]);
|
benchmark::DoNotOptimize(&buffer1024[0]);
|
||||||
|
|
||||||
|
10
vendor/github.com/google/benchmark/test/filter_test.cc
generated
vendored
10
vendor/github.com/google/benchmark/test/filter_test.cc
generated
vendored
@ -36,31 +36,31 @@ class TestReporter : public benchmark::ConsoleReporter {
|
|||||||
} // end namespace
|
} // end namespace
|
||||||
|
|
||||||
static void NoPrefix(benchmark::State& state) {
|
static void NoPrefix(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(NoPrefix);
|
BENCHMARK(NoPrefix);
|
||||||
|
|
||||||
static void BM_Foo(benchmark::State& state) {
|
static void BM_Foo(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_Foo);
|
BENCHMARK(BM_Foo);
|
||||||
|
|
||||||
static void BM_Bar(benchmark::State& state) {
|
static void BM_Bar(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_Bar);
|
BENCHMARK(BM_Bar);
|
||||||
|
|
||||||
static void BM_FooBar(benchmark::State& state) {
|
static void BM_FooBar(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_FooBar);
|
BENCHMARK(BM_FooBar);
|
||||||
|
|
||||||
static void BM_FooBa(benchmark::State& state) {
|
static void BM_FooBa(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_FooBa);
|
BENCHMARK(BM_FooBa);
|
||||||
|
6
vendor/github.com/google/benchmark/test/fixture_test.cc
generated
vendored
6
vendor/github.com/google/benchmark/test/fixture_test.cc
generated
vendored
@ -28,7 +28,7 @@ class MyFixture : public ::benchmark::Fixture {
|
|||||||
BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) {
|
BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) {
|
||||||
assert(data.get() != nullptr);
|
assert(data.get() != nullptr);
|
||||||
assert(*data == 42);
|
assert(*data == 42);
|
||||||
while (st.KeepRunning()) {
|
for (auto _ : st) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -37,7 +37,7 @@ BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) {
|
|||||||
assert(data.get() != nullptr);
|
assert(data.get() != nullptr);
|
||||||
assert(*data == 42);
|
assert(*data == 42);
|
||||||
}
|
}
|
||||||
while (st.KeepRunning()) {
|
for (auto _ : st) {
|
||||||
assert(data.get() != nullptr);
|
assert(data.get() != nullptr);
|
||||||
assert(*data == 42);
|
assert(*data == 42);
|
||||||
}
|
}
|
||||||
@ -46,4 +46,4 @@ BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) {
|
|||||||
BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42);
|
BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42);
|
||||||
BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu();
|
BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu();
|
||||||
|
|
||||||
BENCHMARK_MAIN()
|
BENCHMARK_MAIN();
|
||||||
|
8
vendor/github.com/google/benchmark/test/link_main_test.cc
generated
vendored
Normal file
8
vendor/github.com/google/benchmark/test/link_main_test.cc
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
#include "benchmark/benchmark.h"
|
||||||
|
|
||||||
|
void BM_empty(benchmark::State& state) {
|
||||||
|
for (auto _ : state) {
|
||||||
|
benchmark::DoNotOptimize(state.iterations());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BENCHMARK(BM_empty);
|
21
vendor/github.com/google/benchmark/test/map_test.cc
generated
vendored
21
vendor/github.com/google/benchmark/test/map_test.cc
generated
vendored
@ -8,7 +8,7 @@ namespace {
|
|||||||
std::map<int, int> ConstructRandomMap(int size) {
|
std::map<int, int> ConstructRandomMap(int size) {
|
||||||
std::map<int, int> m;
|
std::map<int, int> m;
|
||||||
for (int i = 0; i < size; ++i) {
|
for (int i = 0; i < size; ++i) {
|
||||||
m.insert(std::make_pair(rand() % size, rand() % size));
|
m.insert(std::make_pair(std::rand() % size, std::rand() % size));
|
||||||
}
|
}
|
||||||
return m;
|
return m;
|
||||||
}
|
}
|
||||||
@ -17,13 +17,14 @@ std::map<int, int> ConstructRandomMap(int size) {
|
|||||||
|
|
||||||
// Basic version.
|
// Basic version.
|
||||||
static void BM_MapLookup(benchmark::State& state) {
|
static void BM_MapLookup(benchmark::State& state) {
|
||||||
const int size = state.range(0);
|
const int size = static_cast<int>(state.range(0));
|
||||||
while (state.KeepRunning()) {
|
std::map<int, int> m;
|
||||||
|
for (auto _ : state) {
|
||||||
state.PauseTiming();
|
state.PauseTiming();
|
||||||
std::map<int, int> m = ConstructRandomMap(size);
|
m = ConstructRandomMap(size);
|
||||||
state.ResumeTiming();
|
state.ResumeTiming();
|
||||||
for (int i = 0; i < size; ++i) {
|
for (int i = 0; i < size; ++i) {
|
||||||
benchmark::DoNotOptimize(m.find(rand() % size));
|
benchmark::DoNotOptimize(m.find(std::rand() % size));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(state.iterations() * size);
|
state.SetItemsProcessed(state.iterations() * size);
|
||||||
@ -34,7 +35,7 @@ BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12);
|
|||||||
class MapFixture : public ::benchmark::Fixture {
|
class MapFixture : public ::benchmark::Fixture {
|
||||||
public:
|
public:
|
||||||
void SetUp(const ::benchmark::State& st) {
|
void SetUp(const ::benchmark::State& st) {
|
||||||
m = ConstructRandomMap(st.range(0));
|
m = ConstructRandomMap(static_cast<int>(st.range(0)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TearDown(const ::benchmark::State&) { m.clear(); }
|
void TearDown(const ::benchmark::State&) { m.clear(); }
|
||||||
@ -43,14 +44,14 @@ class MapFixture : public ::benchmark::Fixture {
|
|||||||
};
|
};
|
||||||
|
|
||||||
BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) {
|
BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) {
|
||||||
const int size = state.range(0);
|
const int size = static_cast<int>(state.range(0));
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
for (int i = 0; i < size; ++i) {
|
for (int i = 0; i < size; ++i) {
|
||||||
benchmark::DoNotOptimize(m.find(rand() % size));
|
benchmark::DoNotOptimize(m.find(std::rand() % size));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(state.iterations() * size);
|
state.SetItemsProcessed(state.iterations() * size);
|
||||||
}
|
}
|
||||||
BENCHMARK_REGISTER_F(MapFixture, Lookup)->Range(1 << 3, 1 << 12);
|
BENCHMARK_REGISTER_F(MapFixture, Lookup)->Range(1 << 3, 1 << 12);
|
||||||
|
|
||||||
BENCHMARK_MAIN()
|
BENCHMARK_MAIN();
|
||||||
|
41
vendor/github.com/google/benchmark/test/multiple_ranges_test.cc
generated
vendored
41
vendor/github.com/google/benchmark/test/multiple_ranges_test.cc
generated
vendored
@ -1,7 +1,9 @@
|
|||||||
#include "benchmark/benchmark.h"
|
#include "benchmark/benchmark.h"
|
||||||
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
#include <iostream>
|
||||||
#include <set>
|
#include <set>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
class MultipleRangesFixture : public ::benchmark::Fixture {
|
class MultipleRangesFixture : public ::benchmark::Fixture {
|
||||||
public:
|
public:
|
||||||
@ -27,25 +29,46 @@ class MultipleRangesFixture : public ::benchmark::Fixture {
|
|||||||
{7, 6, 3}}) {}
|
{7, 6, 3}}) {}
|
||||||
|
|
||||||
void SetUp(const ::benchmark::State& state) {
|
void SetUp(const ::benchmark::State& state) {
|
||||||
std::vector<int> ranges = {state.range(0), state.range(1), state.range(2)};
|
std::vector<int64_t> ranges = {state.range(0), state.range(1),
|
||||||
|
state.range(2)};
|
||||||
|
|
||||||
assert(expectedValues.find(ranges) != expectedValues.end());
|
assert(expectedValues.find(ranges) != expectedValues.end());
|
||||||
|
|
||||||
actualValues.insert(ranges);
|
actualValues.insert(ranges);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NOTE: This is not TearDown as we want to check after _all_ runs are
|
||||||
|
// complete.
|
||||||
virtual ~MultipleRangesFixture() {
|
virtual ~MultipleRangesFixture() {
|
||||||
assert(actualValues.size() == expectedValues.size());
|
assert(actualValues.size() == expectedValues.size());
|
||||||
|
if (actualValues.size() != expectedValues.size()) {
|
||||||
|
std::cout << "EXPECTED\n";
|
||||||
|
for (auto v : expectedValues) {
|
||||||
|
std::cout << "{";
|
||||||
|
for (int64_t iv : v) {
|
||||||
|
std::cout << iv << ", ";
|
||||||
|
}
|
||||||
|
std::cout << "}\n";
|
||||||
|
}
|
||||||
|
std::cout << "ACTUAL\n";
|
||||||
|
for (auto v : actualValues) {
|
||||||
|
std::cout << "{";
|
||||||
|
for (int64_t iv : v) {
|
||||||
|
std::cout << iv << ", ";
|
||||||
|
}
|
||||||
|
std::cout << "}\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::set<std::vector<int>> expectedValues;
|
std::set<std::vector<int64_t>> expectedValues;
|
||||||
std::set<std::vector<int>> actualValues;
|
std::set<std::vector<int64_t>> actualValues;
|
||||||
};
|
};
|
||||||
|
|
||||||
BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) {
|
BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
int product = state.range(0) * state.range(1) * state.range(2);
|
int64_t product = state.range(0) * state.range(1) * state.range(2);
|
||||||
for (int x = 0; x < product; x++) {
|
for (int64_t x = 0; x < product; x++) {
|
||||||
benchmark::DoNotOptimize(x);
|
benchmark::DoNotOptimize(x);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -60,15 +83,15 @@ void BM_CheckDefaultArgument(benchmark::State& state) {
|
|||||||
// Test that the 'range()' without an argument is the same as 'range(0)'.
|
// Test that the 'range()' without an argument is the same as 'range(0)'.
|
||||||
assert(state.range() == state.range(0));
|
assert(state.range() == state.range(0));
|
||||||
assert(state.range() != state.range(1));
|
assert(state.range() != state.range(1));
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}});
|
BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}});
|
||||||
|
|
||||||
static void BM_MultipleRanges(benchmark::State& st) {
|
static void BM_MultipleRanges(benchmark::State& st) {
|
||||||
while (st.KeepRunning()) {
|
for (auto _ : st) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}});
|
BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}});
|
||||||
|
|
||||||
BENCHMARK_MAIN()
|
BENCHMARK_MAIN();
|
||||||
|
16
vendor/github.com/google/benchmark/test/options_test.cc
generated
vendored
16
vendor/github.com/google/benchmark/test/options_test.cc
generated
vendored
@ -8,13 +8,13 @@
|
|||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
|
||||||
void BM_basic(benchmark::State& state) {
|
void BM_basic(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void BM_basic_slow(benchmark::State& state) {
|
void BM_basic_slow(benchmark::State& state) {
|
||||||
std::chrono::milliseconds sleep_duration(state.range(0));
|
std::chrono::milliseconds sleep_duration(state.range(0));
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
std::this_thread::sleep_for(
|
std::this_thread::sleep_for(
|
||||||
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
|
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
|
||||||
}
|
}
|
||||||
@ -44,7 +44,7 @@ void CustomArgs(benchmark::internal::Benchmark* b) {
|
|||||||
|
|
||||||
BENCHMARK(BM_basic)->Apply(CustomArgs);
|
BENCHMARK(BM_basic)->Apply(CustomArgs);
|
||||||
|
|
||||||
void BM_explicit_iteration_count(benchmark::State& st) {
|
void BM_explicit_iteration_count(benchmark::State& state) {
|
||||||
// Test that benchmarks specified with an explicit iteration count are
|
// Test that benchmarks specified with an explicit iteration count are
|
||||||
// only run once.
|
// only run once.
|
||||||
static bool invoked_before = false;
|
static bool invoked_before = false;
|
||||||
@ -52,14 +52,14 @@ void BM_explicit_iteration_count(benchmark::State& st) {
|
|||||||
invoked_before = true;
|
invoked_before = true;
|
||||||
|
|
||||||
// Test that the requested iteration count is respected.
|
// Test that the requested iteration count is respected.
|
||||||
assert(st.max_iterations == 42);
|
assert(state.max_iterations == 42);
|
||||||
size_t actual_iterations = 0;
|
size_t actual_iterations = 0;
|
||||||
while (st.KeepRunning())
|
for (auto _ : state)
|
||||||
++actual_iterations;
|
++actual_iterations;
|
||||||
assert(st.iterations() == st.max_iterations);
|
assert(state.iterations() == state.max_iterations);
|
||||||
assert(st.iterations() == 42);
|
assert(state.iterations() == 42);
|
||||||
|
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_explicit_iteration_count)->Iterations(42);
|
BENCHMARK(BM_explicit_iteration_count)->Iterations(42);
|
||||||
|
|
||||||
BENCHMARK_MAIN()
|
BENCHMARK_MAIN();
|
||||||
|
4
vendor/github.com/google/benchmark/test/output_test_helper.cc
generated
vendored
4
vendor/github.com/google/benchmark/test/output_test_helper.cc
generated
vendored
@ -40,8 +40,8 @@ SubMap& GetSubstitutions() {
|
|||||||
{"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"},
|
{"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"},
|
||||||
{"%int", "[ ]*[0-9]+"},
|
{"%int", "[ ]*[0-9]+"},
|
||||||
{" %s ", "[ ]+"},
|
{" %s ", "[ ]+"},
|
||||||
{"%time", "[ ]*[0-9]{1,5} ns"},
|
{"%time", "[ ]*[0-9]{1,6} ns"},
|
||||||
{"%console_report", "[ ]*[0-9]{1,5} ns [ ]*[0-9]{1,5} ns [ ]*[0-9]+"},
|
{"%console_report", "[ ]*[0-9]{1,6} ns [ ]*[0-9]{1,6} ns [ ]*[0-9]+"},
|
||||||
{"%console_us_report", "[ ]*[0-9] us [ ]*[0-9] us [ ]*[0-9]+"},
|
{"%console_us_report", "[ ]*[0-9] us [ ]*[0-9] us [ ]*[0-9]+"},
|
||||||
{"%csv_header",
|
{"%csv_header",
|
||||||
"name,iterations,real_time,cpu_time,time_unit,bytes_per_second,"
|
"name,iterations,real_time,cpu_time,time_unit,bytes_per_second,"
|
||||||
|
8
vendor/github.com/google/benchmark/test/register_benchmark_test.cc
generated
vendored
8
vendor/github.com/google/benchmark/test/register_benchmark_test.cc
generated
vendored
@ -61,7 +61,7 @@ typedef benchmark::internal::Benchmark* ReturnVal;
|
|||||||
// Test RegisterBenchmark with no additional arguments
|
// Test RegisterBenchmark with no additional arguments
|
||||||
//----------------------------------------------------------------------------//
|
//----------------------------------------------------------------------------//
|
||||||
void BM_function(benchmark::State& state) {
|
void BM_function(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_function);
|
BENCHMARK(BM_function);
|
||||||
@ -77,7 +77,7 @@ ADD_CASES({"BM_function"}, {"BM_function_manual_registration"});
|
|||||||
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
|
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
|
||||||
|
|
||||||
void BM_extra_args(benchmark::State& st, const char* label) {
|
void BM_extra_args(benchmark::State& st, const char* label) {
|
||||||
while (st.KeepRunning()) {
|
for (auto _ : st) {
|
||||||
}
|
}
|
||||||
st.SetLabel(label);
|
st.SetLabel(label);
|
||||||
}
|
}
|
||||||
@ -99,7 +99,7 @@ ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"});
|
|||||||
|
|
||||||
struct CustomFixture {
|
struct CustomFixture {
|
||||||
void operator()(benchmark::State& st) {
|
void operator()(benchmark::State& st) {
|
||||||
while (st.KeepRunning()) {
|
for (auto _ : st) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -116,7 +116,7 @@ void TestRegistrationAtRuntime() {
|
|||||||
{
|
{
|
||||||
const char* x = "42";
|
const char* x = "42";
|
||||||
auto capturing_lam = [=](benchmark::State& st) {
|
auto capturing_lam = [=](benchmark::State& st) {
|
||||||
while (st.KeepRunning()) {
|
for (auto _ : st) {
|
||||||
}
|
}
|
||||||
st.SetLabel(x);
|
st.SetLabel(x);
|
||||||
};
|
};
|
||||||
|
176
vendor/github.com/google/benchmark/test/reporter_output_test.cc
generated
vendored
176
vendor/github.com/google/benchmark/test/reporter_output_test.cc
generated
vendored
@ -13,6 +13,43 @@ ADD_CASES(TC_ConsoleOut,
|
|||||||
{{"^[-]+$", MR_Next},
|
{{"^[-]+$", MR_Next},
|
||||||
{"^Benchmark %s Time %s CPU %s Iterations$", MR_Next},
|
{"^Benchmark %s Time %s CPU %s Iterations$", MR_Next},
|
||||||
{"^[-]+$", MR_Next}});
|
{"^[-]+$", MR_Next}});
|
||||||
|
static int AddContextCases() {
|
||||||
|
AddCases(TC_ConsoleErr,
|
||||||
|
{
|
||||||
|
{"%int[-/]%int[-/]%int %int:%int:%int$", MR_Default},
|
||||||
|
{"Running .*/reporter_output_test(\\.exe)?$", MR_Next},
|
||||||
|
{"Run on \\(%int X %float MHz CPU s\\)", MR_Next},
|
||||||
|
});
|
||||||
|
AddCases(TC_JSONOut, {{"^\\{", MR_Default},
|
||||||
|
{"\"context\":", MR_Next},
|
||||||
|
{"\"date\": \"", MR_Next},
|
||||||
|
{"\"executable\": \".*/reporter_output_test(\\.exe)?\",", MR_Next},
|
||||||
|
{"\"num_cpus\": %int,$", MR_Next},
|
||||||
|
{"\"mhz_per_cpu\": %float,$", MR_Next},
|
||||||
|
{"\"cpu_scaling_enabled\": ", MR_Next},
|
||||||
|
{"\"caches\": \\[$", MR_Next}});
|
||||||
|
auto const& Caches = benchmark::CPUInfo::Get().caches;
|
||||||
|
if (!Caches.empty()) {
|
||||||
|
AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}});
|
||||||
|
}
|
||||||
|
for (size_t I = 0; I < Caches.size(); ++I) {
|
||||||
|
std::string num_caches_str =
|
||||||
|
Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$";
|
||||||
|
AddCases(
|
||||||
|
TC_ConsoleErr,
|
||||||
|
{{"L%int (Data|Instruction|Unified) %intK" + num_caches_str, MR_Next}});
|
||||||
|
AddCases(TC_JSONOut, {{"\\{$", MR_Next},
|
||||||
|
{"\"type\": \"", MR_Next},
|
||||||
|
{"\"level\": %int,$", MR_Next},
|
||||||
|
{"\"size\": %int,$", MR_Next},
|
||||||
|
{"\"num_sharing\": %int$", MR_Next},
|
||||||
|
{"}[,]{0,1}$", MR_Next}});
|
||||||
|
}
|
||||||
|
|
||||||
|
AddCases(TC_JSONOut, {{"],$"}});
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
int dummy_register = AddContextCases();
|
||||||
ADD_CASES(TC_CSVOut, {{"%csv_header"}});
|
ADD_CASES(TC_CSVOut, {{"%csv_header"}});
|
||||||
|
|
||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
@ -20,7 +57,7 @@ ADD_CASES(TC_CSVOut, {{"%csv_header"}});
|
|||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
void BM_basic(benchmark::State& state) {
|
void BM_basic(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_basic);
|
BENCHMARK(BM_basic);
|
||||||
@ -28,8 +65,8 @@ BENCHMARK(BM_basic);
|
|||||||
ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}});
|
ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}});
|
||||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"},
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"},
|
||||||
{"\"iterations\": %int,$", MR_Next},
|
{"\"iterations\": %int,$", MR_Next},
|
||||||
{"\"real_time\": %int,$", MR_Next},
|
{"\"real_time\": %float,$", MR_Next},
|
||||||
{"\"cpu_time\": %int,$", MR_Next},
|
{"\"cpu_time\": %float,$", MR_Next},
|
||||||
{"\"time_unit\": \"ns\"$", MR_Next},
|
{"\"time_unit\": \"ns\"$", MR_Next},
|
||||||
{"}", MR_Next}});
|
{"}", MR_Next}});
|
||||||
ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
|
ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
|
||||||
@ -39,20 +76,20 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
|
|||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
void BM_bytes_per_second(benchmark::State& state) {
|
void BM_bytes_per_second(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
state.SetBytesProcessed(1);
|
state.SetBytesProcessed(1);
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_bytes_per_second);
|
BENCHMARK(BM_bytes_per_second);
|
||||||
|
|
||||||
ADD_CASES(TC_ConsoleOut,
|
ADD_CASES(TC_ConsoleOut,
|
||||||
{{"^BM_bytes_per_second %console_report +%floatB/s$"}});
|
{{"^BM_bytes_per_second %console_report +%float[kM]{0,1}B/s$"}});
|
||||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"},
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"},
|
||||||
{"\"iterations\": %int,$", MR_Next},
|
{"\"iterations\": %int,$", MR_Next},
|
||||||
{"\"real_time\": %int,$", MR_Next},
|
{"\"real_time\": %float,$", MR_Next},
|
||||||
{"\"cpu_time\": %int,$", MR_Next},
|
{"\"cpu_time\": %float,$", MR_Next},
|
||||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||||
{"\"bytes_per_second\": %int$", MR_Next},
|
{"\"bytes_per_second\": %float$", MR_Next},
|
||||||
{"}", MR_Next}});
|
{"}", MR_Next}});
|
||||||
ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
|
ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
|
||||||
|
|
||||||
@ -61,20 +98,20 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
|
|||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
void BM_items_per_second(benchmark::State& state) {
|
void BM_items_per_second(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(1);
|
state.SetItemsProcessed(1);
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_items_per_second);
|
BENCHMARK(BM_items_per_second);
|
||||||
|
|
||||||
ADD_CASES(TC_ConsoleOut,
|
ADD_CASES(TC_ConsoleOut,
|
||||||
{{"^BM_items_per_second %console_report +%float items/s$"}});
|
{{"^BM_items_per_second %console_report +%float[kM]{0,1} items/s$"}});
|
||||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"},
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"},
|
||||||
{"\"iterations\": %int,$", MR_Next},
|
{"\"iterations\": %int,$", MR_Next},
|
||||||
{"\"real_time\": %int,$", MR_Next},
|
{"\"real_time\": %float,$", MR_Next},
|
||||||
{"\"cpu_time\": %int,$", MR_Next},
|
{"\"cpu_time\": %float,$", MR_Next},
|
||||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||||
{"\"items_per_second\": %int$", MR_Next},
|
{"\"items_per_second\": %float$", MR_Next},
|
||||||
{"}", MR_Next}});
|
{"}", MR_Next}});
|
||||||
ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}});
|
ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}});
|
||||||
|
|
||||||
@ -83,7 +120,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}});
|
|||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
void BM_label(benchmark::State& state) {
|
void BM_label(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
state.SetLabel("some label");
|
state.SetLabel("some label");
|
||||||
}
|
}
|
||||||
@ -92,8 +129,8 @@ BENCHMARK(BM_label);
|
|||||||
ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}});
|
ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}});
|
||||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"},
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"},
|
||||||
{"\"iterations\": %int,$", MR_Next},
|
{"\"iterations\": %int,$", MR_Next},
|
||||||
{"\"real_time\": %int,$", MR_Next},
|
{"\"real_time\": %float,$", MR_Next},
|
||||||
{"\"cpu_time\": %int,$", MR_Next},
|
{"\"cpu_time\": %float,$", MR_Next},
|
||||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||||
{"\"label\": \"some label\"$", MR_Next},
|
{"\"label\": \"some label\"$", MR_Next},
|
||||||
{"}", MR_Next}});
|
{"}", MR_Next}});
|
||||||
@ -106,7 +143,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some "
|
|||||||
|
|
||||||
void BM_error(benchmark::State& state) {
|
void BM_error(benchmark::State& state) {
|
||||||
state.SkipWithError("message");
|
state.SkipWithError("message");
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_error);
|
BENCHMARK(BM_error);
|
||||||
@ -123,7 +160,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_error\",,,,,,,,true,\"message\"$"}});
|
|||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
void BM_no_arg_name(benchmark::State& state) {
|
void BM_no_arg_name(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_no_arg_name)->Arg(3);
|
BENCHMARK(BM_no_arg_name)->Arg(3);
|
||||||
@ -136,7 +173,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}});
|
|||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
void BM_arg_name(benchmark::State& state) {
|
void BM_arg_name(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3);
|
BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3);
|
||||||
@ -149,7 +186,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}});
|
|||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
void BM_arg_names(benchmark::State& state) {
|
void BM_arg_names(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"});
|
BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"});
|
||||||
@ -163,7 +200,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}});
|
|||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
void BM_Complexity_O1(benchmark::State& state) {
|
void BM_Complexity_O1(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
state.SetComplexityN(state.range(0));
|
state.SetComplexityN(state.range(0));
|
||||||
}
|
}
|
||||||
@ -179,30 +216,74 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"},
|
|||||||
|
|
||||||
// Test that non-aggregate data is printed by default
|
// Test that non-aggregate data is printed by default
|
||||||
void BM_Repeat(benchmark::State& state) {
|
void BM_Repeat(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// need two repetitions min to be able to output any aggregate output
|
||||||
|
BENCHMARK(BM_Repeat)->Repetitions(2);
|
||||||
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:2 %console_report$"},
|
||||||
|
{"^BM_Repeat/repeats:2 %console_report$"},
|
||||||
|
{"^BM_Repeat/repeats:2_mean %console_report$"},
|
||||||
|
{"^BM_Repeat/repeats:2_median %console_report$"},
|
||||||
|
{"^BM_Repeat/repeats:2_stddev %console_report$"}});
|
||||||
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"},
|
||||||
|
{"\"name\": \"BM_Repeat/repeats:2\",$"},
|
||||||
|
{"\"name\": \"BM_Repeat/repeats:2_mean\",$"},
|
||||||
|
{"\"name\": \"BM_Repeat/repeats:2_median\",$"},
|
||||||
|
{"\"name\": \"BM_Repeat/repeats:2_stddev\",$"}});
|
||||||
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"},
|
||||||
|
{"^\"BM_Repeat/repeats:2\",%csv_report$"},
|
||||||
|
{"^\"BM_Repeat/repeats:2_mean\",%csv_report$"},
|
||||||
|
{"^\"BM_Repeat/repeats:2_median\",%csv_report$"},
|
||||||
|
{"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}});
|
||||||
|
// but for two repetitions, mean and median is the same, so let's repeat..
|
||||||
BENCHMARK(BM_Repeat)->Repetitions(3);
|
BENCHMARK(BM_Repeat)->Repetitions(3);
|
||||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:3 %console_report$"},
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:3 %console_report$"},
|
||||||
{"^BM_Repeat/repeats:3 %console_report$"},
|
{"^BM_Repeat/repeats:3 %console_report$"},
|
||||||
{"^BM_Repeat/repeats:3 %console_report$"},
|
{"^BM_Repeat/repeats:3 %console_report$"},
|
||||||
{"^BM_Repeat/repeats:3_mean %console_report$"},
|
{"^BM_Repeat/repeats:3_mean %console_report$"},
|
||||||
|
{"^BM_Repeat/repeats:3_median %console_report$"},
|
||||||
{"^BM_Repeat/repeats:3_stddev %console_report$"}});
|
{"^BM_Repeat/repeats:3_stddev %console_report$"}});
|
||||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"},
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"},
|
||||||
{"\"name\": \"BM_Repeat/repeats:3\",$"},
|
{"\"name\": \"BM_Repeat/repeats:3\",$"},
|
||||||
{"\"name\": \"BM_Repeat/repeats:3\",$"},
|
{"\"name\": \"BM_Repeat/repeats:3\",$"},
|
||||||
{"\"name\": \"BM_Repeat/repeats:3_mean\",$"},
|
{"\"name\": \"BM_Repeat/repeats:3_mean\",$"},
|
||||||
|
{"\"name\": \"BM_Repeat/repeats:3_median\",$"},
|
||||||
{"\"name\": \"BM_Repeat/repeats:3_stddev\",$"}});
|
{"\"name\": \"BM_Repeat/repeats:3_stddev\",$"}});
|
||||||
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
|
||||||
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
|
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
|
||||||
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
|
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
|
||||||
{"^\"BM_Repeat/repeats:3_mean\",%csv_report$"},
|
{"^\"BM_Repeat/repeats:3_mean\",%csv_report$"},
|
||||||
|
{"^\"BM_Repeat/repeats:3_median\",%csv_report$"},
|
||||||
{"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}});
|
{"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}});
|
||||||
|
// median differs between even/odd number of repetitions, so just to be sure
|
||||||
|
BENCHMARK(BM_Repeat)->Repetitions(4);
|
||||||
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:4 %console_report$"},
|
||||||
|
{"^BM_Repeat/repeats:4 %console_report$"},
|
||||||
|
{"^BM_Repeat/repeats:4 %console_report$"},
|
||||||
|
{"^BM_Repeat/repeats:4 %console_report$"},
|
||||||
|
{"^BM_Repeat/repeats:4_mean %console_report$"},
|
||||||
|
{"^BM_Repeat/repeats:4_median %console_report$"},
|
||||||
|
{"^BM_Repeat/repeats:4_stddev %console_report$"}});
|
||||||
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"},
|
||||||
|
{"\"name\": \"BM_Repeat/repeats:4\",$"},
|
||||||
|
{"\"name\": \"BM_Repeat/repeats:4\",$"},
|
||||||
|
{"\"name\": \"BM_Repeat/repeats:4\",$"},
|
||||||
|
{"\"name\": \"BM_Repeat/repeats:4_mean\",$"},
|
||||||
|
{"\"name\": \"BM_Repeat/repeats:4_median\",$"},
|
||||||
|
{"\"name\": \"BM_Repeat/repeats:4_stddev\",$"}});
|
||||||
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"},
|
||||||
|
{"^\"BM_Repeat/repeats:4\",%csv_report$"},
|
||||||
|
{"^\"BM_Repeat/repeats:4\",%csv_report$"},
|
||||||
|
{"^\"BM_Repeat/repeats:4\",%csv_report$"},
|
||||||
|
{"^\"BM_Repeat/repeats:4_mean\",%csv_report$"},
|
||||||
|
{"^\"BM_Repeat/repeats:4_median\",%csv_report$"},
|
||||||
|
{"^\"BM_Repeat/repeats:4_stddev\",%csv_report$"}});
|
||||||
|
|
||||||
// Test that a non-repeated test still prints non-aggregate results even when
|
// Test that a non-repeated test still prints non-aggregate results even when
|
||||||
// only-aggregate reports have been requested
|
// only-aggregate reports have been requested
|
||||||
void BM_RepeatOnce(benchmark::State& state) {
|
void BM_RepeatOnce(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly();
|
BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly();
|
||||||
@ -212,23 +293,26 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}});
|
|||||||
|
|
||||||
// Test that non-aggregate data is not reported
|
// Test that non-aggregate data is not reported
|
||||||
void BM_SummaryRepeat(benchmark::State& state) {
|
void BM_SummaryRepeat(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
|
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
|
||||||
ADD_CASES(TC_ConsoleOut,
|
ADD_CASES(TC_ConsoleOut,
|
||||||
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
|
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
|
||||||
{"^BM_SummaryRepeat/repeats:3_mean %console_report$"},
|
{"^BM_SummaryRepeat/repeats:3_mean %console_report$"},
|
||||||
|
{"^BM_SummaryRepeat/repeats:3_median %console_report$"},
|
||||||
{"^BM_SummaryRepeat/repeats:3_stddev %console_report$"}});
|
{"^BM_SummaryRepeat/repeats:3_stddev %console_report$"}});
|
||||||
ADD_CASES(TC_JSONOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
|
ADD_CASES(TC_JSONOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
|
||||||
{"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
|
{"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
|
||||||
|
{"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"},
|
||||||
{"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"}});
|
{"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"}});
|
||||||
ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
|
ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
|
||||||
{"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"},
|
{"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"},
|
||||||
|
{"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"},
|
||||||
{"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}});
|
{"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}});
|
||||||
|
|
||||||
void BM_RepeatTimeUnit(benchmark::State& state) {
|
void BM_RepeatTimeUnit(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_RepeatTimeUnit)
|
BENCHMARK(BM_RepeatTimeUnit)
|
||||||
@ -238,17 +322,59 @@ BENCHMARK(BM_RepeatTimeUnit)
|
|||||||
ADD_CASES(TC_ConsoleOut,
|
ADD_CASES(TC_ConsoleOut,
|
||||||
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
|
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
|
||||||
{"^BM_RepeatTimeUnit/repeats:3_mean %console_us_report$"},
|
{"^BM_RepeatTimeUnit/repeats:3_mean %console_us_report$"},
|
||||||
|
{"^BM_RepeatTimeUnit/repeats:3_median %console_us_report$"},
|
||||||
{"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_report$"}});
|
{"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_report$"}});
|
||||||
ADD_CASES(TC_JSONOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
|
ADD_CASES(TC_JSONOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
|
||||||
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
|
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
|
||||||
{"\"time_unit\": \"us\",?$"},
|
{"\"time_unit\": \"us\",?$"},
|
||||||
|
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"},
|
||||||
|
{"\"time_unit\": \"us\",?$"},
|
||||||
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"},
|
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"},
|
||||||
{"\"time_unit\": \"us\",?$"}});
|
{"\"time_unit\": \"us\",?$"}});
|
||||||
ADD_CASES(TC_CSVOut,
|
ADD_CASES(TC_CSVOut,
|
||||||
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
|
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
|
||||||
{"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"},
|
{"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"},
|
||||||
|
{"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"},
|
||||||
{"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}});
|
{"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}});
|
||||||
|
|
||||||
|
// ========================================================================= //
|
||||||
|
// -------------------- Testing user-provided statistics ------------------- //
|
||||||
|
// ========================================================================= //
|
||||||
|
|
||||||
|
const auto UserStatistics = [](const std::vector<double>& v) {
|
||||||
|
return v.back();
|
||||||
|
};
|
||||||
|
void BM_UserStats(benchmark::State& state) {
|
||||||
|
for (auto _ : state) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BENCHMARK(BM_UserStats)
|
||||||
|
->Repetitions(3)
|
||||||
|
->ComputeStatistics("", UserStatistics);
|
||||||
|
// check that user-provided stats is calculated, and is after the default-ones
|
||||||
|
// empty string as name is intentional, it would sort before anything else
|
||||||
|
ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/repeats:3 %console_report$"},
|
||||||
|
{"^BM_UserStats/repeats:3 %console_report$"},
|
||||||
|
{"^BM_UserStats/repeats:3 %console_report$"},
|
||||||
|
{"^BM_UserStats/repeats:3_mean %console_report$"},
|
||||||
|
{"^BM_UserStats/repeats:3_median %console_report$"},
|
||||||
|
{"^BM_UserStats/repeats:3_stddev %console_report$"},
|
||||||
|
{"^BM_UserStats/repeats:3_ %console_report$"}});
|
||||||
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_UserStats/repeats:3\",$"},
|
||||||
|
{"\"name\": \"BM_UserStats/repeats:3\",$"},
|
||||||
|
{"\"name\": \"BM_UserStats/repeats:3\",$"},
|
||||||
|
{"\"name\": \"BM_UserStats/repeats:3_mean\",$"},
|
||||||
|
{"\"name\": \"BM_UserStats/repeats:3_median\",$"},
|
||||||
|
{"\"name\": \"BM_UserStats/repeats:3_stddev\",$"},
|
||||||
|
{"\"name\": \"BM_UserStats/repeats:3_\",$"}});
|
||||||
|
ADD_CASES(TC_CSVOut, {{"^\"BM_UserStats/repeats:3\",%csv_report$"},
|
||||||
|
{"^\"BM_UserStats/repeats:3\",%csv_report$"},
|
||||||
|
{"^\"BM_UserStats/repeats:3\",%csv_report$"},
|
||||||
|
{"^\"BM_UserStats/repeats:3_mean\",%csv_report$"},
|
||||||
|
{"^\"BM_UserStats/repeats:3_median\",%csv_report$"},
|
||||||
|
{"^\"BM_UserStats/repeats:3_stddev\",%csv_report$"},
|
||||||
|
{"^\"BM_UserStats/repeats:3_\",%csv_report$"}});
|
||||||
|
|
||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
// --------------------------- TEST CASES END ------------------------------ //
|
// --------------------------- TEST CASES END ------------------------------ //
|
||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
44
vendor/github.com/google/benchmark/test/skip_with_error_test.cc
generated
vendored
44
vendor/github.com/google/benchmark/test/skip_with_error_test.cc
generated
vendored
@ -70,6 +70,25 @@ void BM_error_before_running(benchmark::State& state) {
|
|||||||
BENCHMARK(BM_error_before_running);
|
BENCHMARK(BM_error_before_running);
|
||||||
ADD_CASES("BM_error_before_running", {{"", true, "error message"}});
|
ADD_CASES("BM_error_before_running", {{"", true, "error message"}});
|
||||||
|
|
||||||
|
|
||||||
|
void BM_error_before_running_batch(benchmark::State& state) {
|
||||||
|
state.SkipWithError("error message");
|
||||||
|
while (state.KeepRunningBatch(17)) {
|
||||||
|
assert(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BENCHMARK(BM_error_before_running_batch);
|
||||||
|
ADD_CASES("BM_error_before_running_batch", {{"", true, "error message"}});
|
||||||
|
|
||||||
|
void BM_error_before_running_range_for(benchmark::State& state) {
|
||||||
|
state.SkipWithError("error message");
|
||||||
|
for (auto _ : state) {
|
||||||
|
assert(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BENCHMARK(BM_error_before_running_range_for);
|
||||||
|
ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}});
|
||||||
|
|
||||||
void BM_error_during_running(benchmark::State& state) {
|
void BM_error_during_running(benchmark::State& state) {
|
||||||
int first_iter = true;
|
int first_iter = true;
|
||||||
while (state.KeepRunning()) {
|
while (state.KeepRunning()) {
|
||||||
@ -93,8 +112,31 @@ ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"},
|
|||||||
{"/2/threads:4", false, ""},
|
{"/2/threads:4", false, ""},
|
||||||
{"/2/threads:8", false, ""}});
|
{"/2/threads:8", false, ""}});
|
||||||
|
|
||||||
|
void BM_error_during_running_ranged_for(benchmark::State& state) {
|
||||||
|
assert(state.max_iterations > 3 && "test requires at least a few iterations");
|
||||||
|
int first_iter = true;
|
||||||
|
// NOTE: Users should not write the for loop explicitly.
|
||||||
|
for (auto It = state.begin(), End = state.end(); It != End; ++It) {
|
||||||
|
if (state.range(0) == 1) {
|
||||||
|
assert(first_iter);
|
||||||
|
first_iter = false;
|
||||||
|
state.SkipWithError("error message");
|
||||||
|
// Test the unfortunate but documented behavior that the ranged-for loop
|
||||||
|
// doesn't automatically terminate when SkipWithError is set.
|
||||||
|
assert(++It != End);
|
||||||
|
break; // Required behavior
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5);
|
||||||
|
ADD_CASES("BM_error_during_running_ranged_for",
|
||||||
|
{{"/1/iterations:5", true, "error message"},
|
||||||
|
{"/2/iterations:5", false, ""}});
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
void BM_error_after_running(benchmark::State& state) {
|
void BM_error_after_running(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
benchmark::DoNotOptimize(state.iterations());
|
benchmark::DoNotOptimize(state.iterations());
|
||||||
}
|
}
|
||||||
if (state.thread_index <= (state.threads / 2))
|
if (state.thread_index <= (state.threads / 2))
|
||||||
|
66
vendor/github.com/google/benchmark/test/state_assembly_test.cc
generated
vendored
Normal file
66
vendor/github.com/google/benchmark/test/state_assembly_test.cc
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
#include <benchmark/benchmark.h>
|
||||||
|
|
||||||
|
#ifdef __clang__
|
||||||
|
#pragma clang diagnostic ignored "-Wreturn-type"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
extern int ExternInt;
|
||||||
|
benchmark::State& GetState();
|
||||||
|
void Fn();
|
||||||
|
}
|
||||||
|
|
||||||
|
using benchmark::State;
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_for_auto_loop:
|
||||||
|
extern "C" int test_for_auto_loop() {
|
||||||
|
State& S = GetState();
|
||||||
|
int x = 42;
|
||||||
|
// CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv
|
||||||
|
// CHECK-NEXT: testq %rbx, %rbx
|
||||||
|
// CHECK-NEXT: je [[LOOP_END:.*]]
|
||||||
|
|
||||||
|
for (auto _ : S) {
|
||||||
|
// CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]:
|
||||||
|
// CHECK-GNU-NEXT: subq $1, %rbx
|
||||||
|
// CHECK-CLANG-NEXT: {{(addq \$1,|incq)}} %rax
|
||||||
|
// CHECK-NEXT: jne .L[[LOOP_HEAD]]
|
||||||
|
benchmark::DoNotOptimize(x);
|
||||||
|
}
|
||||||
|
// CHECK: [[LOOP_END]]:
|
||||||
|
// CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv
|
||||||
|
|
||||||
|
// CHECK: movl $101, %eax
|
||||||
|
// CHECK: ret
|
||||||
|
return 101;
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_while_loop:
|
||||||
|
extern "C" int test_while_loop() {
|
||||||
|
State& S = GetState();
|
||||||
|
int x = 42;
|
||||||
|
|
||||||
|
// CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]]
|
||||||
|
// CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]:
|
||||||
|
while (S.KeepRunning()) {
|
||||||
|
// CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]]
|
||||||
|
// CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]]
|
||||||
|
// CHECK: movq %[[IREG]], [[DEST:.*]]
|
||||||
|
benchmark::DoNotOptimize(x);
|
||||||
|
}
|
||||||
|
// CHECK-DAG: movq [[DEST]], %[[IREG]]
|
||||||
|
// CHECK-DAG: testq %[[IREG]], %[[IREG]]
|
||||||
|
// CHECK-DAG: jne .L[[LOOP_BODY]]
|
||||||
|
// CHECK-DAG: .L[[LOOP_HEADER]]:
|
||||||
|
|
||||||
|
// CHECK: cmpb $0
|
||||||
|
// CHECK-NEXT: jne .L[[LOOP_END:[a-zA-Z0-9_]+]]
|
||||||
|
// CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv
|
||||||
|
|
||||||
|
// CHECK: .L[[LOOP_END]]:
|
||||||
|
// CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv
|
||||||
|
|
||||||
|
// CHECK: movl $101, %eax
|
||||||
|
// CHECK: ret
|
||||||
|
return 101;
|
||||||
|
}
|
61
vendor/github.com/google/benchmark/test/statistics_gtest.cc
generated
vendored
Normal file
61
vendor/github.com/google/benchmark/test/statistics_gtest.cc
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
//===---------------------------------------------------------------------===//
|
||||||
|
// statistics_test - Unit tests for src/statistics.cc
|
||||||
|
//===---------------------------------------------------------------------===//
|
||||||
|
|
||||||
|
#include "../src/statistics.h"
|
||||||
|
#include "gtest/gtest.h"
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
TEST(StatisticsTest, Mean) {
|
||||||
|
std::vector<double> Inputs;
|
||||||
|
{
|
||||||
|
Inputs = {42, 42, 42, 42};
|
||||||
|
double Res = benchmark::StatisticsMean(Inputs);
|
||||||
|
EXPECT_DOUBLE_EQ(Res, 42.0);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Inputs = {1, 2, 3, 4};
|
||||||
|
double Res = benchmark::StatisticsMean(Inputs);
|
||||||
|
EXPECT_DOUBLE_EQ(Res, 2.5);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Inputs = {1, 2, 5, 10, 10, 14};
|
||||||
|
double Res = benchmark::StatisticsMean(Inputs);
|
||||||
|
EXPECT_DOUBLE_EQ(Res, 7.0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(StatisticsTest, Median) {
|
||||||
|
std::vector<double> Inputs;
|
||||||
|
{
|
||||||
|
Inputs = {42, 42, 42, 42};
|
||||||
|
double Res = benchmark::StatisticsMedian(Inputs);
|
||||||
|
EXPECT_DOUBLE_EQ(Res, 42.0);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Inputs = {1, 2, 3, 4};
|
||||||
|
double Res = benchmark::StatisticsMedian(Inputs);
|
||||||
|
EXPECT_DOUBLE_EQ(Res, 2.5);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Inputs = {1, 2, 5, 10, 10};
|
||||||
|
double Res = benchmark::StatisticsMedian(Inputs);
|
||||||
|
EXPECT_DOUBLE_EQ(Res, 5.0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(StatisticsTest, StdDev) {
|
||||||
|
std::vector<double> Inputs;
|
||||||
|
{
|
||||||
|
Inputs = {101, 101, 101, 101};
|
||||||
|
double Res = benchmark::StatisticsStdDev(Inputs);
|
||||||
|
EXPECT_DOUBLE_EQ(Res, 0.0);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Inputs = {1, 2, 3};
|
||||||
|
double Res = benchmark::StatisticsStdDev(Inputs);
|
||||||
|
EXPECT_DOUBLE_EQ(Res, 1.0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // end namespace
|
28
vendor/github.com/google/benchmark/test/templated_fixture_test.cc
generated
vendored
Normal file
28
vendor/github.com/google/benchmark/test/templated_fixture_test.cc
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
|
||||||
|
#include "benchmark/benchmark.h"
|
||||||
|
|
||||||
|
#include <cassert>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
class MyFixture : public ::benchmark::Fixture {
|
||||||
|
public:
|
||||||
|
MyFixture() : data(0) {}
|
||||||
|
|
||||||
|
T data;
|
||||||
|
};
|
||||||
|
|
||||||
|
BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State &st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
data += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State& st) {
|
||||||
|
for (auto _ : st) {
|
||||||
|
data += 1.0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BENCHMARK_REGISTER_F(MyFixture, Bar);
|
||||||
|
|
||||||
|
BENCHMARK_MAIN();
|
30
vendor/github.com/google/benchmark/test/user_counters_tabular_test.cc
generated
vendored
30
vendor/github.com/google/benchmark/test/user_counters_tabular_test.cc
generated
vendored
@ -54,7 +54,7 @@ ADD_CASES(TC_CSVOut, {{"%csv_header,"
|
|||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
void BM_Counters_Tabular(benchmark::State& state) {
|
void BM_Counters_Tabular(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
namespace bm = benchmark;
|
namespace bm = benchmark;
|
||||||
state.counters.insert({
|
state.counters.insert({
|
||||||
@ -69,8 +69,8 @@ void BM_Counters_Tabular(benchmark::State& state) {
|
|||||||
BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16);
|
BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16);
|
||||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"},
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"},
|
||||||
{"\"iterations\": %int,$", MR_Next},
|
{"\"iterations\": %int,$", MR_Next},
|
||||||
{"\"real_time\": %int,$", MR_Next},
|
{"\"real_time\": %float,$", MR_Next},
|
||||||
{"\"cpu_time\": %int,$", MR_Next},
|
{"\"cpu_time\": %float,$", MR_Next},
|
||||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||||
{"\"Bar\": %float,$", MR_Next},
|
{"\"Bar\": %float,$", MR_Next},
|
||||||
{"\"Bat\": %float,$", MR_Next},
|
{"\"Bat\": %float,$", MR_Next},
|
||||||
@ -98,7 +98,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular);
|
|||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
void BM_CounterRates_Tabular(benchmark::State& state) {
|
void BM_CounterRates_Tabular(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
namespace bm = benchmark;
|
namespace bm = benchmark;
|
||||||
state.counters.insert({
|
state.counters.insert({
|
||||||
@ -113,8 +113,8 @@ void BM_CounterRates_Tabular(benchmark::State& state) {
|
|||||||
BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16);
|
BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16);
|
||||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
|
||||||
{"\"iterations\": %int,$", MR_Next},
|
{"\"iterations\": %int,$", MR_Next},
|
||||||
{"\"real_time\": %int,$", MR_Next},
|
{"\"real_time\": %float,$", MR_Next},
|
||||||
{"\"cpu_time\": %int,$", MR_Next},
|
{"\"cpu_time\": %float,$", MR_Next},
|
||||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||||
{"\"Bar\": %float,$", MR_Next},
|
{"\"Bar\": %float,$", MR_Next},
|
||||||
{"\"Bat\": %float,$", MR_Next},
|
{"\"Bat\": %float,$", MR_Next},
|
||||||
@ -145,7 +145,7 @@ CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int",
|
|||||||
|
|
||||||
// set only some of the counters
|
// set only some of the counters
|
||||||
void BM_CounterSet0_Tabular(benchmark::State& state) {
|
void BM_CounterSet0_Tabular(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
namespace bm = benchmark;
|
namespace bm = benchmark;
|
||||||
state.counters.insert({
|
state.counters.insert({
|
||||||
@ -157,8 +157,8 @@ void BM_CounterSet0_Tabular(benchmark::State& state) {
|
|||||||
BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16);
|
BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16);
|
||||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
|
||||||
{"\"iterations\": %int,$", MR_Next},
|
{"\"iterations\": %int,$", MR_Next},
|
||||||
{"\"real_time\": %int,$", MR_Next},
|
{"\"real_time\": %float,$", MR_Next},
|
||||||
{"\"cpu_time\": %int,$", MR_Next},
|
{"\"cpu_time\": %float,$", MR_Next},
|
||||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||||
{"\"Bar\": %float,$", MR_Next},
|
{"\"Bar\": %float,$", MR_Next},
|
||||||
{"\"Baz\": %float,$", MR_Next},
|
{"\"Baz\": %float,$", MR_Next},
|
||||||
@ -177,7 +177,7 @@ CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0);
|
|||||||
|
|
||||||
// again.
|
// again.
|
||||||
void BM_CounterSet1_Tabular(benchmark::State& state) {
|
void BM_CounterSet1_Tabular(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
namespace bm = benchmark;
|
namespace bm = benchmark;
|
||||||
state.counters.insert({
|
state.counters.insert({
|
||||||
@ -189,8 +189,8 @@ void BM_CounterSet1_Tabular(benchmark::State& state) {
|
|||||||
BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16);
|
BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16);
|
||||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
|
||||||
{"\"iterations\": %int,$", MR_Next},
|
{"\"iterations\": %int,$", MR_Next},
|
||||||
{"\"real_time\": %int,$", MR_Next},
|
{"\"real_time\": %float,$", MR_Next},
|
||||||
{"\"cpu_time\": %int,$", MR_Next},
|
{"\"cpu_time\": %float,$", MR_Next},
|
||||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||||
{"\"Bar\": %float,$", MR_Next},
|
{"\"Bar\": %float,$", MR_Next},
|
||||||
{"\"Baz\": %float,$", MR_Next},
|
{"\"Baz\": %float,$", MR_Next},
|
||||||
@ -213,7 +213,7 @@ CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1);
|
|||||||
|
|
||||||
// set only some of the counters, different set now.
|
// set only some of the counters, different set now.
|
||||||
void BM_CounterSet2_Tabular(benchmark::State& state) {
|
void BM_CounterSet2_Tabular(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
namespace bm = benchmark;
|
namespace bm = benchmark;
|
||||||
state.counters.insert({
|
state.counters.insert({
|
||||||
@ -225,8 +225,8 @@ void BM_CounterSet2_Tabular(benchmark::State& state) {
|
|||||||
BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16);
|
BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16);
|
||||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
|
||||||
{"\"iterations\": %int,$", MR_Next},
|
{"\"iterations\": %int,$", MR_Next},
|
||||||
{"\"real_time\": %int,$", MR_Next},
|
{"\"real_time\": %float,$", MR_Next},
|
||||||
{"\"cpu_time\": %int,$", MR_Next},
|
{"\"cpu_time\": %float,$", MR_Next},
|
||||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||||
{"\"Bat\": %float,$", MR_Next},
|
{"\"Bat\": %float,$", MR_Next},
|
||||||
{"\"Baz\": %float,$", MR_Next},
|
{"\"Baz\": %float,$", MR_Next},
|
||||||
|
40
vendor/github.com/google/benchmark/test/user_counters_test.cc
generated
vendored
40
vendor/github.com/google/benchmark/test/user_counters_test.cc
generated
vendored
@ -19,7 +19,7 @@ ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}});
|
|||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
void BM_Counters_Simple(benchmark::State& state) {
|
void BM_Counters_Simple(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
state.counters["foo"] = 1;
|
state.counters["foo"] = 1;
|
||||||
state.counters["bar"] = 2 * (double)state.iterations();
|
state.counters["bar"] = 2 * (double)state.iterations();
|
||||||
@ -28,8 +28,8 @@ BENCHMARK(BM_Counters_Simple);
|
|||||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
|
||||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
|
||||||
{"\"iterations\": %int,$", MR_Next},
|
{"\"iterations\": %int,$", MR_Next},
|
||||||
{"\"real_time\": %int,$", MR_Next},
|
{"\"real_time\": %float,$", MR_Next},
|
||||||
{"\"cpu_time\": %int,$", MR_Next},
|
{"\"cpu_time\": %float,$", MR_Next},
|
||||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||||
{"\"bar\": %float,$", MR_Next},
|
{"\"bar\": %float,$", MR_Next},
|
||||||
{"\"foo\": %float$", MR_Next},
|
{"\"foo\": %float$", MR_Next},
|
||||||
@ -51,7 +51,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple);
|
|||||||
|
|
||||||
namespace { int num_calls1 = 0; }
|
namespace { int num_calls1 = 0; }
|
||||||
void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
|
void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
state.counters["foo"] = 1;
|
state.counters["foo"] = 1;
|
||||||
state.counters["bar"] = ++num_calls1;
|
state.counters["bar"] = ++num_calls1;
|
||||||
@ -64,11 +64,11 @@ ADD_CASES(TC_ConsoleOut,
|
|||||||
"bar=%hrfloat foo=%hrfloat +%hrfloatB/s +%hrfloat items/s$"}});
|
"bar=%hrfloat foo=%hrfloat +%hrfloatB/s +%hrfloat items/s$"}});
|
||||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
|
||||||
{"\"iterations\": %int,$", MR_Next},
|
{"\"iterations\": %int,$", MR_Next},
|
||||||
{"\"real_time\": %int,$", MR_Next},
|
{"\"real_time\": %float,$", MR_Next},
|
||||||
{"\"cpu_time\": %int,$", MR_Next},
|
{"\"cpu_time\": %float,$", MR_Next},
|
||||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||||
{"\"bytes_per_second\": %int,$", MR_Next},
|
{"\"bytes_per_second\": %float,$", MR_Next},
|
||||||
{"\"items_per_second\": %int,$", MR_Next},
|
{"\"items_per_second\": %float,$", MR_Next},
|
||||||
{"\"bar\": %float,$", MR_Next},
|
{"\"bar\": %float,$", MR_Next},
|
||||||
{"\"foo\": %float$", MR_Next},
|
{"\"foo\": %float$", MR_Next},
|
||||||
{"}", MR_Next}});
|
{"}", MR_Next}});
|
||||||
@ -92,7 +92,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec",
|
|||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
void BM_Counters_Rate(benchmark::State& state) {
|
void BM_Counters_Rate(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
namespace bm = benchmark;
|
namespace bm = benchmark;
|
||||||
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate};
|
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate};
|
||||||
@ -102,8 +102,8 @@ BENCHMARK(BM_Counters_Rate);
|
|||||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
||||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
|
||||||
{"\"iterations\": %int,$", MR_Next},
|
{"\"iterations\": %int,$", MR_Next},
|
||||||
{"\"real_time\": %int,$", MR_Next},
|
{"\"real_time\": %float,$", MR_Next},
|
||||||
{"\"cpu_time\": %int,$", MR_Next},
|
{"\"cpu_time\": %float,$", MR_Next},
|
||||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||||
{"\"bar\": %float,$", MR_Next},
|
{"\"bar\": %float,$", MR_Next},
|
||||||
{"\"foo\": %float$", MR_Next},
|
{"\"foo\": %float$", MR_Next},
|
||||||
@ -124,7 +124,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
|
|||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
void BM_Counters_Threads(benchmark::State& state) {
|
void BM_Counters_Threads(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
state.counters["foo"] = 1;
|
state.counters["foo"] = 1;
|
||||||
state.counters["bar"] = 2;
|
state.counters["bar"] = 2;
|
||||||
@ -133,8 +133,8 @@ BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8);
|
|||||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}});
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}});
|
||||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
|
||||||
{"\"iterations\": %int,$", MR_Next},
|
{"\"iterations\": %int,$", MR_Next},
|
||||||
{"\"real_time\": %int,$", MR_Next},
|
{"\"real_time\": %float,$", MR_Next},
|
||||||
{"\"cpu_time\": %int,$", MR_Next},
|
{"\"cpu_time\": %float,$", MR_Next},
|
||||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||||
{"\"bar\": %float,$", MR_Next},
|
{"\"bar\": %float,$", MR_Next},
|
||||||
{"\"foo\": %float$", MR_Next},
|
{"\"foo\": %float$", MR_Next},
|
||||||
@ -153,7 +153,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads);
|
|||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
void BM_Counters_AvgThreads(benchmark::State& state) {
|
void BM_Counters_AvgThreads(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
namespace bm = benchmark;
|
namespace bm = benchmark;
|
||||||
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads};
|
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads};
|
||||||
@ -163,8 +163,8 @@ BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8);
|
|||||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}});
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}});
|
||||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
|
||||||
{"\"iterations\": %int,$", MR_Next},
|
{"\"iterations\": %int,$", MR_Next},
|
||||||
{"\"real_time\": %int,$", MR_Next},
|
{"\"real_time\": %float,$", MR_Next},
|
||||||
{"\"cpu_time\": %int,$", MR_Next},
|
{"\"cpu_time\": %float,$", MR_Next},
|
||||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||||
{"\"bar\": %float,$", MR_Next},
|
{"\"bar\": %float,$", MR_Next},
|
||||||
{"\"foo\": %float$", MR_Next},
|
{"\"foo\": %float$", MR_Next},
|
||||||
@ -184,7 +184,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int",
|
|||||||
// ========================================================================= //
|
// ========================================================================= //
|
||||||
|
|
||||||
void BM_Counters_AvgThreadsRate(benchmark::State& state) {
|
void BM_Counters_AvgThreadsRate(benchmark::State& state) {
|
||||||
while (state.KeepRunning()) {
|
for (auto _ : state) {
|
||||||
}
|
}
|
||||||
namespace bm = benchmark;
|
namespace bm = benchmark;
|
||||||
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate};
|
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate};
|
||||||
@ -194,8 +194,8 @@ BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8);
|
|||||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
||||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
|
||||||
{"\"iterations\": %int,$", MR_Next},
|
{"\"iterations\": %int,$", MR_Next},
|
||||||
{"\"real_time\": %int,$", MR_Next},
|
{"\"real_time\": %float,$", MR_Next},
|
||||||
{"\"cpu_time\": %int,$", MR_Next},
|
{"\"cpu_time\": %float,$", MR_Next},
|
||||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||||
{"\"bar\": %float,$", MR_Next},
|
{"\"bar\": %float,$", MR_Next},
|
||||||
{"\"foo\": %float$", MR_Next},
|
{"\"foo\": %float$", MR_Next},
|
||||||
|
316
vendor/github.com/google/benchmark/tools/compare.py
generated
vendored
Executable file
316
vendor/github.com/google/benchmark/tools/compare.py
generated
vendored
Executable file
@ -0,0 +1,316 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
"""
|
||||||
|
compare.py - versatile benchmark output compare tool
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
import sys
|
||||||
|
import gbench
|
||||||
|
from gbench import util, report
|
||||||
|
from gbench.util import *
|
||||||
|
|
||||||
|
|
||||||
|
def check_inputs(in1, in2, flags):
|
||||||
|
"""
|
||||||
|
Perform checking on the user provided inputs and diagnose any abnormalities
|
||||||
|
"""
|
||||||
|
in1_kind, in1_err = classify_input_file(in1)
|
||||||
|
in2_kind, in2_err = classify_input_file(in2)
|
||||||
|
output_file = find_benchmark_flag('--benchmark_out=', flags)
|
||||||
|
output_type = find_benchmark_flag('--benchmark_out_format=', flags)
|
||||||
|
if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
|
||||||
|
print(("WARNING: '--benchmark_out=%s' will be passed to both "
|
||||||
|
"benchmarks causing it to be overwritten") % output_file)
|
||||||
|
if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
|
||||||
|
print("WARNING: passing optional flags has no effect since both "
|
||||||
|
"inputs are JSON")
|
||||||
|
if output_type is not None and output_type != 'json':
|
||||||
|
print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
|
||||||
|
" is not supported.") % output_type)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def create_parser():
|
||||||
|
parser = ArgumentParser(
|
||||||
|
description='versatile benchmark output compare tool')
|
||||||
|
subparsers = parser.add_subparsers(
|
||||||
|
help='This tool has multiple modes of operation:',
|
||||||
|
dest='mode')
|
||||||
|
|
||||||
|
parser_a = subparsers.add_parser(
|
||||||
|
'benchmarks',
|
||||||
|
help='The most simple use-case, compare all the output of these two benchmarks')
|
||||||
|
baseline = parser_a.add_argument_group(
|
||||||
|
'baseline', 'The benchmark baseline')
|
||||||
|
baseline.add_argument(
|
||||||
|
'test_baseline',
|
||||||
|
metavar='test_baseline',
|
||||||
|
type=argparse.FileType('r'),
|
||||||
|
nargs=1,
|
||||||
|
help='A benchmark executable or JSON output file')
|
||||||
|
contender = parser_a.add_argument_group(
|
||||||
|
'contender', 'The benchmark that will be compared against the baseline')
|
||||||
|
contender.add_argument(
|
||||||
|
'test_contender',
|
||||||
|
metavar='test_contender',
|
||||||
|
type=argparse.FileType('r'),
|
||||||
|
nargs=1,
|
||||||
|
help='A benchmark executable or JSON output file')
|
||||||
|
parser_a.add_argument(
|
||||||
|
'benchmark_options',
|
||||||
|
metavar='benchmark_options',
|
||||||
|
nargs=argparse.REMAINDER,
|
||||||
|
help='Arguments to pass when running benchmark executables')
|
||||||
|
|
||||||
|
parser_b = subparsers.add_parser(
|
||||||
|
'filters', help='Compare filter one with the filter two of benchmark')
|
||||||
|
baseline = parser_b.add_argument_group(
|
||||||
|
'baseline', 'The benchmark baseline')
|
||||||
|
baseline.add_argument(
|
||||||
|
'test',
|
||||||
|
metavar='test',
|
||||||
|
type=argparse.FileType('r'),
|
||||||
|
nargs=1,
|
||||||
|
help='A benchmark executable or JSON output file')
|
||||||
|
baseline.add_argument(
|
||||||
|
'filter_baseline',
|
||||||
|
metavar='filter_baseline',
|
||||||
|
type=str,
|
||||||
|
nargs=1,
|
||||||
|
help='The first filter, that will be used as baseline')
|
||||||
|
contender = parser_b.add_argument_group(
|
||||||
|
'contender', 'The benchmark that will be compared against the baseline')
|
||||||
|
contender.add_argument(
|
||||||
|
'filter_contender',
|
||||||
|
metavar='filter_contender',
|
||||||
|
type=str,
|
||||||
|
nargs=1,
|
||||||
|
help='The second filter, that will be compared against the baseline')
|
||||||
|
parser_b.add_argument(
|
||||||
|
'benchmark_options',
|
||||||
|
metavar='benchmark_options',
|
||||||
|
nargs=argparse.REMAINDER,
|
||||||
|
help='Arguments to pass when running benchmark executables')
|
||||||
|
|
||||||
|
parser_c = subparsers.add_parser(
|
||||||
|
'benchmarksfiltered',
|
||||||
|
help='Compare filter one of first benchmark with filter two of the second benchmark')
|
||||||
|
baseline = parser_c.add_argument_group(
|
||||||
|
'baseline', 'The benchmark baseline')
|
||||||
|
baseline.add_argument(
|
||||||
|
'test_baseline',
|
||||||
|
metavar='test_baseline',
|
||||||
|
type=argparse.FileType('r'),
|
||||||
|
nargs=1,
|
||||||
|
help='A benchmark executable or JSON output file')
|
||||||
|
baseline.add_argument(
|
||||||
|
'filter_baseline',
|
||||||
|
metavar='filter_baseline',
|
||||||
|
type=str,
|
||||||
|
nargs=1,
|
||||||
|
help='The first filter, that will be used as baseline')
|
||||||
|
contender = parser_c.add_argument_group(
|
||||||
|
'contender', 'The benchmark that will be compared against the baseline')
|
||||||
|
contender.add_argument(
|
||||||
|
'test_contender',
|
||||||
|
metavar='test_contender',
|
||||||
|
type=argparse.FileType('r'),
|
||||||
|
nargs=1,
|
||||||
|
help='The second benchmark executable or JSON output file, that will be compared against the baseline')
|
||||||
|
contender.add_argument(
|
||||||
|
'filter_contender',
|
||||||
|
metavar='filter_contender',
|
||||||
|
type=str,
|
||||||
|
nargs=1,
|
||||||
|
help='The second filter, that will be compared against the baseline')
|
||||||
|
parser_c.add_argument(
|
||||||
|
'benchmark_options',
|
||||||
|
metavar='benchmark_options',
|
||||||
|
nargs=argparse.REMAINDER,
|
||||||
|
help='Arguments to pass when running benchmark executables')
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Parse the command line flags
|
||||||
|
parser = create_parser()
|
||||||
|
args, unknown_args = parser.parse_known_args()
|
||||||
|
if args.mode is None:
|
||||||
|
parser.print_help()
|
||||||
|
exit(1)
|
||||||
|
assert not unknown_args
|
||||||
|
benchmark_options = args.benchmark_options
|
||||||
|
|
||||||
|
if args.mode == 'benchmarks':
|
||||||
|
test_baseline = args.test_baseline[0].name
|
||||||
|
test_contender = args.test_contender[0].name
|
||||||
|
filter_baseline = ''
|
||||||
|
filter_contender = ''
|
||||||
|
|
||||||
|
# NOTE: if test_baseline == test_contender, you are analyzing the stdev
|
||||||
|
|
||||||
|
description = 'Comparing %s to %s' % (test_baseline, test_contender)
|
||||||
|
elif args.mode == 'filters':
|
||||||
|
test_baseline = args.test[0].name
|
||||||
|
test_contender = args.test[0].name
|
||||||
|
filter_baseline = args.filter_baseline[0]
|
||||||
|
filter_contender = args.filter_contender[0]
|
||||||
|
|
||||||
|
# NOTE: if filter_baseline == filter_contender, you are analyzing the
|
||||||
|
# stdev
|
||||||
|
|
||||||
|
description = 'Comparing %s to %s (from %s)' % (
|
||||||
|
filter_baseline, filter_contender, args.test[0].name)
|
||||||
|
elif args.mode == 'benchmarksfiltered':
|
||||||
|
test_baseline = args.test_baseline[0].name
|
||||||
|
test_contender = args.test_contender[0].name
|
||||||
|
filter_baseline = args.filter_baseline[0]
|
||||||
|
filter_contender = args.filter_contender[0]
|
||||||
|
|
||||||
|
# NOTE: if test_baseline == test_contender and
|
||||||
|
# filter_baseline == filter_contender, you are analyzing the stdev
|
||||||
|
|
||||||
|
description = 'Comparing %s (from %s) to %s (from %s)' % (
|
||||||
|
filter_baseline, test_baseline, filter_contender, test_contender)
|
||||||
|
else:
|
||||||
|
# should never happen
|
||||||
|
print("Unrecognized mode of operation: '%s'" % args.mode)
|
||||||
|
parser.print_help()
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
check_inputs(test_baseline, test_contender, benchmark_options)
|
||||||
|
|
||||||
|
options_baseline = []
|
||||||
|
options_contender = []
|
||||||
|
|
||||||
|
if filter_baseline and filter_contender:
|
||||||
|
options_baseline = ['--benchmark_filter=%s' % filter_baseline]
|
||||||
|
options_contender = ['--benchmark_filter=%s' % filter_contender]
|
||||||
|
|
||||||
|
# Run the benchmarks and report the results
|
||||||
|
json1 = json1_orig = gbench.util.run_or_load_benchmark(
|
||||||
|
test_baseline, benchmark_options + options_baseline)
|
||||||
|
json2 = json2_orig = gbench.util.run_or_load_benchmark(
|
||||||
|
test_contender, benchmark_options + options_contender)
|
||||||
|
|
||||||
|
# Now, filter the benchmarks so that the difference report can work
|
||||||
|
if filter_baseline and filter_contender:
|
||||||
|
replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
|
||||||
|
json1 = gbench.report.filter_benchmark(
|
||||||
|
json1_orig, filter_baseline, replacement)
|
||||||
|
json2 = gbench.report.filter_benchmark(
|
||||||
|
json2_orig, filter_contender, replacement)
|
||||||
|
|
||||||
|
# Diff and output
|
||||||
|
output_lines = gbench.report.generate_difference_report(json1, json2)
|
||||||
|
print(description)
|
||||||
|
for ln in output_lines:
|
||||||
|
print(ln)
|
||||||
|
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
|
||||||
|
class TestParser(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.parser = create_parser()
|
||||||
|
testInputs = os.path.join(
|
||||||
|
os.path.dirname(
|
||||||
|
os.path.realpath(__file__)),
|
||||||
|
'gbench',
|
||||||
|
'Inputs')
|
||||||
|
self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
|
||||||
|
self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
|
||||||
|
|
||||||
|
def test_benchmarks_basic(self):
|
||||||
|
parsed = self.parser.parse_args(
|
||||||
|
['benchmarks', self.testInput0, self.testInput1])
|
||||||
|
self.assertEqual(parsed.mode, 'benchmarks')
|
||||||
|
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
|
||||||
|
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
|
||||||
|
self.assertFalse(parsed.benchmark_options)
|
||||||
|
|
||||||
|
def test_benchmarks_with_remainder(self):
|
||||||
|
parsed = self.parser.parse_args(
|
||||||
|
['benchmarks', self.testInput0, self.testInput1, 'd'])
|
||||||
|
self.assertEqual(parsed.mode, 'benchmarks')
|
||||||
|
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
|
||||||
|
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
|
||||||
|
self.assertEqual(parsed.benchmark_options, ['d'])
|
||||||
|
|
||||||
|
def test_benchmarks_with_remainder_after_doubleminus(self):
|
||||||
|
parsed = self.parser.parse_args(
|
||||||
|
['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
|
||||||
|
self.assertEqual(parsed.mode, 'benchmarks')
|
||||||
|
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
|
||||||
|
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
|
||||||
|
self.assertEqual(parsed.benchmark_options, ['e'])
|
||||||
|
|
||||||
|
def test_filters_basic(self):
|
||||||
|
parsed = self.parser.parse_args(
|
||||||
|
['filters', self.testInput0, 'c', 'd'])
|
||||||
|
self.assertEqual(parsed.mode, 'filters')
|
||||||
|
self.assertEqual(parsed.test[0].name, self.testInput0)
|
||||||
|
self.assertEqual(parsed.filter_baseline[0], 'c')
|
||||||
|
self.assertEqual(parsed.filter_contender[0], 'd')
|
||||||
|
self.assertFalse(parsed.benchmark_options)
|
||||||
|
|
||||||
|
def test_filters_with_remainder(self):
|
||||||
|
parsed = self.parser.parse_args(
|
||||||
|
['filters', self.testInput0, 'c', 'd', 'e'])
|
||||||
|
self.assertEqual(parsed.mode, 'filters')
|
||||||
|
self.assertEqual(parsed.test[0].name, self.testInput0)
|
||||||
|
self.assertEqual(parsed.filter_baseline[0], 'c')
|
||||||
|
self.assertEqual(parsed.filter_contender[0], 'd')
|
||||||
|
self.assertEqual(parsed.benchmark_options, ['e'])
|
||||||
|
|
||||||
|
def test_filters_with_remainder_after_doubleminus(self):
|
||||||
|
parsed = self.parser.parse_args(
|
||||||
|
['filters', self.testInput0, 'c', 'd', '--', 'f'])
|
||||||
|
self.assertEqual(parsed.mode, 'filters')
|
||||||
|
self.assertEqual(parsed.test[0].name, self.testInput0)
|
||||||
|
self.assertEqual(parsed.filter_baseline[0], 'c')
|
||||||
|
self.assertEqual(parsed.filter_contender[0], 'd')
|
||||||
|
self.assertEqual(parsed.benchmark_options, ['f'])
|
||||||
|
|
||||||
|
def test_benchmarksfiltered_basic(self):
|
||||||
|
parsed = self.parser.parse_args(
|
||||||
|
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
|
||||||
|
self.assertEqual(parsed.mode, 'benchmarksfiltered')
|
||||||
|
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
|
||||||
|
self.assertEqual(parsed.filter_baseline[0], 'c')
|
||||||
|
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
|
||||||
|
self.assertEqual(parsed.filter_contender[0], 'e')
|
||||||
|
self.assertFalse(parsed.benchmark_options)
|
||||||
|
|
||||||
|
def test_benchmarksfiltered_with_remainder(self):
|
||||||
|
parsed = self.parser.parse_args(
|
||||||
|
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
|
||||||
|
self.assertEqual(parsed.mode, 'benchmarksfiltered')
|
||||||
|
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
|
||||||
|
self.assertEqual(parsed.filter_baseline[0], 'c')
|
||||||
|
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
|
||||||
|
self.assertEqual(parsed.filter_contender[0], 'e')
|
||||||
|
self.assertEqual(parsed.benchmark_options[0], 'f')
|
||||||
|
|
||||||
|
def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
|
||||||
|
parsed = self.parser.parse_args(
|
||||||
|
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
|
||||||
|
self.assertEqual(parsed.mode, 'benchmarksfiltered')
|
||||||
|
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
|
||||||
|
self.assertEqual(parsed.filter_baseline[0], 'c')
|
||||||
|
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
|
||||||
|
self.assertEqual(parsed.filter_contender[0], 'e')
|
||||||
|
self.assertEqual(parsed.benchmark_options[0], 'g')
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# unittest.main()
|
||||||
|
main()
|
||||||
|
|
||||||
|
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
||||||
|
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
|
||||||
|
# kate: indent-mode python; remove-trailing-spaces modified;
|
11
vendor/github.com/google/benchmark/tools/compare_bench.py
generated
vendored
11
vendor/github.com/google/benchmark/tools/compare_bench.py
generated
vendored
@ -39,21 +39,20 @@ def main():
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'test2', metavar='test2', type=str, nargs=1,
|
'test2', metavar='test2', type=str, nargs=1,
|
||||||
help='A benchmark executable or JSON output file')
|
help='A benchmark executable or JSON output file')
|
||||||
# FIXME this is a dummy argument which will never actually match
|
|
||||||
# any --benchmark flags but it helps generate a better usage message
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'benchmark_options', metavar='benchmark_option', nargs='*',
|
'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER,
|
||||||
help='Arguments to pass when running benchmark executables'
|
help='Arguments to pass when running benchmark executables'
|
||||||
)
|
)
|
||||||
args, unknown_args = parser.parse_known_args()
|
args, unknown_args = parser.parse_known_args()
|
||||||
# Parse the command line flags
|
# Parse the command line flags
|
||||||
test1 = args.test1[0]
|
test1 = args.test1[0]
|
||||||
test2 = args.test2[0]
|
test2 = args.test2[0]
|
||||||
if args.benchmark_options:
|
if unknown_args:
|
||||||
|
# should never happen
|
||||||
print("Unrecognized positional argument arguments: '%s'"
|
print("Unrecognized positional argument arguments: '%s'"
|
||||||
% args.benchmark_options)
|
% unknown_args)
|
||||||
exit(1)
|
exit(1)
|
||||||
benchmark_options = unknown_args
|
benchmark_options = args.benchmark_options
|
||||||
check_inputs(test1, test2, benchmark_options)
|
check_inputs(test1, test2, benchmark_options)
|
||||||
# Run the benchmarks and report the results
|
# Run the benchmarks and report the results
|
||||||
json1 = gbench.util.run_or_load_benchmark(test1, benchmark_options)
|
json1 = gbench.util.run_or_load_benchmark(test1, benchmark_options)
|
||||||
|
44
vendor/github.com/google/benchmark/tools/gbench/Inputs/test1_run1.json
generated
vendored
44
vendor/github.com/google/benchmark/tools/gbench/Inputs/test1_run1.json
generated
vendored
@ -28,6 +28,20 @@
|
|||||||
"cpu_time": 50,
|
"cpu_time": 50,
|
||||||
"time_unit": "ns"
|
"time_unit": "ns"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_1PercentFaster",
|
||||||
|
"iterations": 1000,
|
||||||
|
"real_time": 100,
|
||||||
|
"cpu_time": 100,
|
||||||
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_1PercentSlower",
|
||||||
|
"iterations": 1000,
|
||||||
|
"real_time": 100,
|
||||||
|
"cpu_time": 100,
|
||||||
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "BM_10PercentFaster",
|
"name": "BM_10PercentFaster",
|
||||||
"iterations": 1000,
|
"iterations": 1000,
|
||||||
@ -55,6 +69,34 @@
|
|||||||
"real_time": 10000,
|
"real_time": 10000,
|
||||||
"cpu_time": 10000,
|
"cpu_time": 10000,
|
||||||
"time_unit": "ns"
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_10PercentCPUToTime",
|
||||||
|
"iterations": 1000,
|
||||||
|
"real_time": 100,
|
||||||
|
"cpu_time": 100,
|
||||||
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_ThirdFaster",
|
||||||
|
"iterations": 1000,
|
||||||
|
"real_time": 100,
|
||||||
|
"cpu_time": 100,
|
||||||
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_BadTimeUnit",
|
||||||
|
"iterations": 1000,
|
||||||
|
"real_time": 0.4,
|
||||||
|
"cpu_time": 0.5,
|
||||||
|
"time_unit": "s"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_DifferentTimeUnit",
|
||||||
|
"iterations": 1,
|
||||||
|
"real_time": 1,
|
||||||
|
"cpu_time": 1,
|
||||||
|
"time_unit": "s"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
48
vendor/github.com/google/benchmark/tools/gbench/Inputs/test1_run2.json
generated
vendored
48
vendor/github.com/google/benchmark/tools/gbench/Inputs/test1_run2.json
generated
vendored
@ -28,6 +28,20 @@
|
|||||||
"cpu_time": 100,
|
"cpu_time": 100,
|
||||||
"time_unit": "ns"
|
"time_unit": "ns"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_1PercentFaster",
|
||||||
|
"iterations": 1000,
|
||||||
|
"real_time": 98.9999999,
|
||||||
|
"cpu_time": 98.9999999,
|
||||||
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_1PercentSlower",
|
||||||
|
"iterations": 1000,
|
||||||
|
"real_time": 100.9999999,
|
||||||
|
"cpu_time": 100.9999999,
|
||||||
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "BM_10PercentFaster",
|
"name": "BM_10PercentFaster",
|
||||||
"iterations": 1000,
|
"iterations": 1000,
|
||||||
@ -45,8 +59,8 @@
|
|||||||
{
|
{
|
||||||
"name": "BM_100xSlower",
|
"name": "BM_100xSlower",
|
||||||
"iterations": 1000,
|
"iterations": 1000,
|
||||||
"real_time": 10000,
|
"real_time": 1.0000e+04,
|
||||||
"cpu_time": 10000,
|
"cpu_time": 1.0000e+04,
|
||||||
"time_unit": "ns"
|
"time_unit": "ns"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -55,6 +69,34 @@
|
|||||||
"real_time": 100,
|
"real_time": 100,
|
||||||
"cpu_time": 100,
|
"cpu_time": 100,
|
||||||
"time_unit": "ns"
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_10PercentCPUToTime",
|
||||||
|
"iterations": 1000,
|
||||||
|
"real_time": 110,
|
||||||
|
"cpu_time": 90,
|
||||||
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_ThirdFaster",
|
||||||
|
"iterations": 1000,
|
||||||
|
"real_time": 66.665,
|
||||||
|
"cpu_time": 66.664,
|
||||||
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_BadTimeUnit",
|
||||||
|
"iterations": 1000,
|
||||||
|
"real_time": 0.04,
|
||||||
|
"cpu_time": 0.6,
|
||||||
|
"time_unit": "s"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_DifferentTimeUnit",
|
||||||
|
"iterations": 1,
|
||||||
|
"real_time": 1,
|
||||||
|
"cpu_time": 1,
|
||||||
|
"time_unit": "ns"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
81
vendor/github.com/google/benchmark/tools/gbench/Inputs/test2_run.json
generated
vendored
Normal file
81
vendor/github.com/google/benchmark/tools/gbench/Inputs/test2_run.json
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
{
|
||||||
|
"context": {
|
||||||
|
"date": "2016-08-02 17:44:46",
|
||||||
|
"num_cpus": 4,
|
||||||
|
"mhz_per_cpu": 4228,
|
||||||
|
"cpu_scaling_enabled": false,
|
||||||
|
"library_build_type": "release"
|
||||||
|
},
|
||||||
|
"benchmarks": [
|
||||||
|
{
|
||||||
|
"name": "BM_Hi",
|
||||||
|
"iterations": 1234,
|
||||||
|
"real_time": 42,
|
||||||
|
"cpu_time": 24,
|
||||||
|
"time_unit": "ms"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_Zero",
|
||||||
|
"iterations": 1000,
|
||||||
|
"real_time": 10,
|
||||||
|
"cpu_time": 10,
|
||||||
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_Zero/4",
|
||||||
|
"iterations": 4000,
|
||||||
|
"real_time": 40,
|
||||||
|
"cpu_time": 40,
|
||||||
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Prefix/BM_Zero",
|
||||||
|
"iterations": 2000,
|
||||||
|
"real_time": 20,
|
||||||
|
"cpu_time": 20,
|
||||||
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Prefix/BM_Zero/3",
|
||||||
|
"iterations": 3000,
|
||||||
|
"real_time": 30,
|
||||||
|
"cpu_time": 30,
|
||||||
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_One",
|
||||||
|
"iterations": 5000,
|
||||||
|
"real_time": 5,
|
||||||
|
"cpu_time": 5,
|
||||||
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_One/4",
|
||||||
|
"iterations": 2000,
|
||||||
|
"real_time": 20,
|
||||||
|
"cpu_time": 20,
|
||||||
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Prefix/BM_One",
|
||||||
|
"iterations": 1000,
|
||||||
|
"real_time": 10,
|
||||||
|
"cpu_time": 10,
|
||||||
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Prefix/BM_One/3",
|
||||||
|
"iterations": 1500,
|
||||||
|
"real_time": 15,
|
||||||
|
"cpu_time": 15,
|
||||||
|
"time_unit": "ns"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "BM_Bye",
|
||||||
|
"iterations": 5321,
|
||||||
|
"real_time": 11,
|
||||||
|
"cpu_time": 63,
|
||||||
|
"time_unit": "ns"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
88
vendor/github.com/google/benchmark/tools/gbench/report.py
generated
vendored
88
vendor/github.com/google/benchmark/tools/gbench/report.py
generated
vendored
@ -1,6 +1,8 @@
|
|||||||
"""report.py - Utilities for reporting statistics about benchmark results
|
"""report.py - Utilities for reporting statistics about benchmark results
|
||||||
"""
|
"""
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
|
import copy
|
||||||
|
|
||||||
class BenchmarkColor(object):
|
class BenchmarkColor(object):
|
||||||
def __init__(self, name, code):
|
def __init__(self, name, code):
|
||||||
@ -66,19 +68,36 @@ def calculate_change(old_val, new_val):
|
|||||||
return float(new_val - old_val) / abs(old_val)
|
return float(new_val - old_val) / abs(old_val)
|
||||||
|
|
||||||
|
|
||||||
|
def filter_benchmark(json_orig, family, replacement=""):
|
||||||
|
"""
|
||||||
|
Apply a filter to the json, and only leave the 'family' of benchmarks.
|
||||||
|
"""
|
||||||
|
regex = re.compile(family)
|
||||||
|
filtered = {}
|
||||||
|
filtered['benchmarks'] = []
|
||||||
|
for be in json_orig['benchmarks']:
|
||||||
|
if not regex.search(be['name']):
|
||||||
|
continue
|
||||||
|
filteredbench = copy.deepcopy(be) # Do NOT modify the old name!
|
||||||
|
filteredbench['name'] = regex.sub(replacement, filteredbench['name'])
|
||||||
|
filtered['benchmarks'].append(filteredbench)
|
||||||
|
return filtered
|
||||||
|
|
||||||
|
|
||||||
def generate_difference_report(json1, json2, use_color=True):
|
def generate_difference_report(json1, json2, use_color=True):
|
||||||
"""
|
"""
|
||||||
Calculate and report the difference between each test of two benchmarks
|
Calculate and report the difference between each test of two benchmarks
|
||||||
runs specified as 'json1' and 'json2'.
|
runs specified as 'json1' and 'json2'.
|
||||||
"""
|
"""
|
||||||
first_col_width = find_longest_name(json1['benchmarks']) + 5
|
first_col_width = find_longest_name(json1['benchmarks'])
|
||||||
def find_test(name):
|
def find_test(name):
|
||||||
for b in json2['benchmarks']:
|
for b in json2['benchmarks']:
|
||||||
if b['name'] == name:
|
if b['name'] == name:
|
||||||
return b
|
return b
|
||||||
return None
|
return None
|
||||||
first_line = "{:<{}s} Time CPU Old New".format(
|
first_col_width = max(first_col_width, len('Benchmark'))
|
||||||
'Benchmark', first_col_width)
|
first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format(
|
||||||
|
'Benchmark', 12 + first_col_width)
|
||||||
output_strs = [first_line, '-' * len(first_line)]
|
output_strs = [first_line, '-' * len(first_line)]
|
||||||
|
|
||||||
gen = (bn for bn in json1['benchmarks'] if 'real_time' in bn and 'cpu_time' in bn)
|
gen = (bn for bn in json1['benchmarks'] if 'real_time' in bn and 'cpu_time' in bn)
|
||||||
@ -87,6 +106,9 @@ def generate_difference_report(json1, json2, use_color=True):
|
|||||||
if not other_bench:
|
if not other_bench:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if bn['time_unit'] != other_bench['time_unit']:
|
||||||
|
continue
|
||||||
|
|
||||||
def get_color(res):
|
def get_color(res):
|
||||||
if res > 0.05:
|
if res > 0.05:
|
||||||
return BC_FAIL
|
return BC_FAIL
|
||||||
@ -94,12 +116,13 @@ def generate_difference_report(json1, json2, use_color=True):
|
|||||||
return BC_WHITE
|
return BC_WHITE
|
||||||
else:
|
else:
|
||||||
return BC_CYAN
|
return BC_CYAN
|
||||||
fmt_str = "{}{:<{}s}{endc}{}{:+9.2f}{endc}{}{:+14.2f}{endc}{:14d}{:14d}"
|
fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
|
||||||
tres = calculate_change(bn['real_time'], other_bench['real_time'])
|
tres = calculate_change(bn['real_time'], other_bench['real_time'])
|
||||||
cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
|
cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
|
||||||
output_strs += [color_format(use_color, fmt_str,
|
output_strs += [color_format(use_color, fmt_str,
|
||||||
BC_HEADER, bn['name'], first_col_width,
|
BC_HEADER, bn['name'], first_col_width,
|
||||||
get_color(tres), tres, get_color(cpures), cpures,
|
get_color(tres), tres, get_color(cpures), cpures,
|
||||||
|
bn['real_time'], other_bench['real_time'],
|
||||||
bn['cpu_time'], other_bench['cpu_time'],
|
bn['cpu_time'], other_bench['cpu_time'],
|
||||||
endc=BC_ENDC)]
|
endc=BC_ENDC)]
|
||||||
return output_strs
|
return output_strs
|
||||||
@ -123,24 +146,63 @@ class TestReportDifference(unittest.TestCase):
|
|||||||
|
|
||||||
def test_basic(self):
|
def test_basic(self):
|
||||||
expect_lines = [
|
expect_lines = [
|
||||||
['BM_SameTimes', '+0.00', '+0.00', '10', '10'],
|
['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'],
|
||||||
['BM_2xFaster', '-0.50', '-0.50', '50', '25'],
|
['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'],
|
||||||
['BM_2xSlower', '+1.00', '+1.00', '50', '100'],
|
['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'],
|
||||||
['BM_10PercentFaster', '-0.10', '-0.10', '100', '90'],
|
['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'],
|
||||||
['BM_10PercentSlower', '+0.10', '+0.10', '100', '110'],
|
['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'],
|
||||||
['BM_100xSlower', '+99.00', '+99.00', '100', '10000'],
|
['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'],
|
||||||
['BM_100xFaster', '-0.99', '-0.99', '10000', '100'],
|
['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'],
|
||||||
|
['BM_100xSlower', '+99.0000', '+99.0000', '100', '10000', '100', '10000'],
|
||||||
|
['BM_100xFaster', '-0.9900', '-0.9900', '10000', '100', '10000', '100'],
|
||||||
|
['BM_10PercentCPUToTime', '+0.1000', '-0.1000', '100', '110', '100', '90'],
|
||||||
|
['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'],
|
||||||
|
['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
|
||||||
]
|
]
|
||||||
json1, json2 = self.load_results()
|
json1, json2 = self.load_results()
|
||||||
output_lines_with_header = generate_difference_report(json1, json2, use_color=False)
|
output_lines_with_header = generate_difference_report(json1, json2, use_color=False)
|
||||||
output_lines = output_lines_with_header[2:]
|
output_lines = output_lines_with_header[2:]
|
||||||
print("\n".join(output_lines_with_header))
|
print("\n".join(output_lines_with_header))
|
||||||
self.assertEqual(len(output_lines), len(expect_lines))
|
self.assertEqual(len(output_lines), len(expect_lines))
|
||||||
for i in xrange(0, len(output_lines)):
|
for i in range(0, len(output_lines)):
|
||||||
parts = [x for x in output_lines[i].split(' ') if x]
|
parts = [x for x in output_lines[i].split(' ') if x]
|
||||||
self.assertEqual(len(parts), 5)
|
self.assertEqual(len(parts), 7)
|
||||||
|
self.assertEqual(parts, expect_lines[i])
|
||||||
|
|
||||||
|
|
||||||
|
class TestReportDifferenceBetweenFamilies(unittest.TestCase):
|
||||||
|
def load_result(self):
|
||||||
|
import json
|
||||||
|
testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs')
|
||||||
|
testOutput = os.path.join(testInputs, 'test2_run.json')
|
||||||
|
with open(testOutput, 'r') as f:
|
||||||
|
json = json.load(f)
|
||||||
|
return json
|
||||||
|
|
||||||
|
def test_basic(self):
|
||||||
|
expect_lines = [
|
||||||
|
['.', '-0.5000', '-0.5000', '10', '5', '10', '5'],
|
||||||
|
['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'],
|
||||||
|
['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'],
|
||||||
|
['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'],
|
||||||
|
]
|
||||||
|
json = self.load_result()
|
||||||
|
json1 = filter_benchmark(json, "BM_Z.ro", ".")
|
||||||
|
json2 = filter_benchmark(json, "BM_O.e", ".")
|
||||||
|
output_lines_with_header = generate_difference_report(json1, json2, use_color=False)
|
||||||
|
output_lines = output_lines_with_header[2:]
|
||||||
|
print("\n")
|
||||||
|
print("\n".join(output_lines_with_header))
|
||||||
|
self.assertEqual(len(output_lines), len(expect_lines))
|
||||||
|
for i in range(0, len(output_lines)):
|
||||||
|
parts = [x for x in output_lines[i].split(' ') if x]
|
||||||
|
self.assertEqual(len(parts), 7)
|
||||||
self.assertEqual(parts, expect_lines[i])
|
self.assertEqual(parts, expect_lines[i])
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|
||||||
|
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
||||||
|
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
|
||||||
|
# kate: indent-mode python; remove-trailing-spaces modified;
|
||||||
|
151
vendor/github.com/google/benchmark/tools/strip_asm.py
generated
vendored
Executable file
151
vendor/github.com/google/benchmark/tools/strip_asm.py
generated
vendored
Executable file
@ -0,0 +1,151 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
"""
|
||||||
|
strip_asm.py - Cleanup ASM output for the specified file
|
||||||
|
"""
|
||||||
|
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
def find_used_labels(asm):
|
||||||
|
found = set()
|
||||||
|
label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)")
|
||||||
|
for l in asm.splitlines():
|
||||||
|
m = label_re.match(l)
|
||||||
|
if m:
|
||||||
|
found.add('.L%s' % m.group(1))
|
||||||
|
return found
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_labels(asm):
|
||||||
|
decls = set()
|
||||||
|
label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
|
||||||
|
for l in asm.splitlines():
|
||||||
|
m = label_decl.match(l)
|
||||||
|
if m:
|
||||||
|
decls.add(m.group(0))
|
||||||
|
if len(decls) == 0:
|
||||||
|
return asm
|
||||||
|
needs_dot = next(iter(decls))[0] != '.'
|
||||||
|
if not needs_dot:
|
||||||
|
return asm
|
||||||
|
for ld in decls:
|
||||||
|
asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm)
|
||||||
|
return asm
|
||||||
|
|
||||||
|
|
||||||
|
def transform_labels(asm):
|
||||||
|
asm = normalize_labels(asm)
|
||||||
|
used_decls = find_used_labels(asm)
|
||||||
|
new_asm = ''
|
||||||
|
label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
|
||||||
|
for l in asm.splitlines():
|
||||||
|
m = label_decl.match(l)
|
||||||
|
if not m or m.group(0) in used_decls:
|
||||||
|
new_asm += l
|
||||||
|
new_asm += '\n'
|
||||||
|
return new_asm
|
||||||
|
|
||||||
|
|
||||||
|
def is_identifier(tk):
|
||||||
|
if len(tk) == 0:
|
||||||
|
return False
|
||||||
|
first = tk[0]
|
||||||
|
if not first.isalpha() and first != '_':
|
||||||
|
return False
|
||||||
|
for i in range(1, len(tk)):
|
||||||
|
c = tk[i]
|
||||||
|
if not c.isalnum() and c != '_':
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def process_identifiers(l):
|
||||||
|
"""
|
||||||
|
process_identifiers - process all identifiers and modify them to have
|
||||||
|
consistent names across all platforms; specifically across ELF and MachO.
|
||||||
|
For example, MachO inserts an additional understore at the beginning of
|
||||||
|
names. This function removes that.
|
||||||
|
"""
|
||||||
|
parts = re.split(r'([a-zA-Z0-9_]+)', l)
|
||||||
|
new_line = ''
|
||||||
|
for tk in parts:
|
||||||
|
if is_identifier(tk):
|
||||||
|
if tk.startswith('__Z'):
|
||||||
|
tk = tk[1:]
|
||||||
|
elif tk.startswith('_') and len(tk) > 1 and \
|
||||||
|
tk[1].isalpha() and tk[1] != 'Z':
|
||||||
|
tk = tk[1:]
|
||||||
|
new_line += tk
|
||||||
|
return new_line
|
||||||
|
|
||||||
|
|
||||||
|
def process_asm(asm):
|
||||||
|
"""
|
||||||
|
Strip the ASM of unwanted directives and lines
|
||||||
|
"""
|
||||||
|
new_contents = ''
|
||||||
|
asm = transform_labels(asm)
|
||||||
|
|
||||||
|
# TODO: Add more things we want to remove
|
||||||
|
discard_regexes = [
|
||||||
|
re.compile("\s+\..*$"), # directive
|
||||||
|
re.compile("\s*#(NO_APP|APP)$"), #inline ASM
|
||||||
|
re.compile("\s*#.*$"), # comment line
|
||||||
|
re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive
|
||||||
|
re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"),
|
||||||
|
]
|
||||||
|
keep_regexes = [
|
||||||
|
|
||||||
|
]
|
||||||
|
fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:")
|
||||||
|
for l in asm.splitlines():
|
||||||
|
# Remove Mach-O attribute
|
||||||
|
l = l.replace('@GOTPCREL', '')
|
||||||
|
add_line = True
|
||||||
|
for reg in discard_regexes:
|
||||||
|
if reg.match(l) is not None:
|
||||||
|
add_line = False
|
||||||
|
break
|
||||||
|
for reg in keep_regexes:
|
||||||
|
if reg.match(l) is not None:
|
||||||
|
add_line = True
|
||||||
|
break
|
||||||
|
if add_line:
|
||||||
|
if fn_label_def.match(l) and len(new_contents) != 0:
|
||||||
|
new_contents += '\n'
|
||||||
|
l = process_identifiers(l)
|
||||||
|
new_contents += l
|
||||||
|
new_contents += '\n'
|
||||||
|
return new_contents
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = ArgumentParser(
|
||||||
|
description='generate a stripped assembly file')
|
||||||
|
parser.add_argument(
|
||||||
|
'input', metavar='input', type=str, nargs=1,
|
||||||
|
help='An input assembly file')
|
||||||
|
parser.add_argument(
|
||||||
|
'out', metavar='output', type=str, nargs=1,
|
||||||
|
help='The output file')
|
||||||
|
args, unknown_args = parser.parse_known_args()
|
||||||
|
input = args.input[0]
|
||||||
|
output = args.out[0]
|
||||||
|
if not os.path.isfile(input):
|
||||||
|
print(("ERROR: input file '%s' does not exist") % input)
|
||||||
|
sys.exit(1)
|
||||||
|
contents = None
|
||||||
|
with open(input, 'r') as f:
|
||||||
|
contents = f.read()
|
||||||
|
new_contents = process_asm(contents)
|
||||||
|
with open(output, 'w') as f:
|
||||||
|
f.write(new_contents)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
||||||
|
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
||||||
|
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
|
||||||
|
# kate: indent-mode python; remove-trailing-spaces modified;
|
4
vendor/github.com/google/googletest/.clang-format
generated
vendored
Normal file
4
vendor/github.com/google/googletest/.clang-format
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
# Run manually to reformat a file:
|
||||||
|
# clang-format -i --style=file <file>
|
||||||
|
Language: Cpp
|
||||||
|
BasedOnStyle: Google
|
82
vendor/github.com/google/googletest/.gitignore
generated
vendored
82
vendor/github.com/google/googletest/.gitignore
generated
vendored
@ -1,2 +1,84 @@
|
|||||||
# Ignore CI build directory
|
# Ignore CI build directory
|
||||||
build/
|
build/
|
||||||
|
xcuserdata
|
||||||
|
cmake-build-debug/
|
||||||
|
.idea/
|
||||||
|
bazel-bin
|
||||||
|
bazel-genfiles
|
||||||
|
bazel-googletest
|
||||||
|
bazel-out
|
||||||
|
bazel-testlogs
|
||||||
|
# python
|
||||||
|
*.pyc
|
||||||
|
|
||||||
|
# Visual Studio files
|
||||||
|
.vs
|
||||||
|
*.sdf
|
||||||
|
*.opensdf
|
||||||
|
*.VC.opendb
|
||||||
|
*.suo
|
||||||
|
*.user
|
||||||
|
_ReSharper.Caches/
|
||||||
|
Win32-Debug/
|
||||||
|
Win32-Release/
|
||||||
|
x64-Debug/
|
||||||
|
x64-Release/
|
||||||
|
|
||||||
|
# Ignore autoconf / automake files
|
||||||
|
Makefile.in
|
||||||
|
aclocal.m4
|
||||||
|
configure
|
||||||
|
build-aux/
|
||||||
|
autom4te.cache/
|
||||||
|
googletest/m4/libtool.m4
|
||||||
|
googletest/m4/ltoptions.m4
|
||||||
|
googletest/m4/ltsugar.m4
|
||||||
|
googletest/m4/ltversion.m4
|
||||||
|
googletest/m4/lt~obsolete.m4
|
||||||
|
googlemock/m4
|
||||||
|
|
||||||
|
# Ignore generated directories.
|
||||||
|
googlemock/fused-src/
|
||||||
|
googletest/fused-src/
|
||||||
|
|
||||||
|
# macOS files
|
||||||
|
.DS_Store
|
||||||
|
googletest/.DS_Store
|
||||||
|
googletest/xcode/.DS_Store
|
||||||
|
|
||||||
|
# Ignore cmake generated directories and files.
|
||||||
|
CMakeFiles
|
||||||
|
CTestTestfile.cmake
|
||||||
|
Makefile
|
||||||
|
cmake_install.cmake
|
||||||
|
googlemock/CMakeFiles
|
||||||
|
googlemock/CTestTestfile.cmake
|
||||||
|
googlemock/Makefile
|
||||||
|
googlemock/cmake_install.cmake
|
||||||
|
googlemock/gtest
|
||||||
|
/bin
|
||||||
|
/googlemock/gmock.dir
|
||||||
|
/googlemock/gmock_main.dir
|
||||||
|
/googlemock/RUN_TESTS.vcxproj.filters
|
||||||
|
/googlemock/RUN_TESTS.vcxproj
|
||||||
|
/googlemock/INSTALL.vcxproj.filters
|
||||||
|
/googlemock/INSTALL.vcxproj
|
||||||
|
/googlemock/gmock_main.vcxproj.filters
|
||||||
|
/googlemock/gmock_main.vcxproj
|
||||||
|
/googlemock/gmock.vcxproj.filters
|
||||||
|
/googlemock/gmock.vcxproj
|
||||||
|
/googlemock/gmock.sln
|
||||||
|
/googlemock/ALL_BUILD.vcxproj.filters
|
||||||
|
/googlemock/ALL_BUILD.vcxproj
|
||||||
|
/lib
|
||||||
|
/Win32
|
||||||
|
/ZERO_CHECK.vcxproj.filters
|
||||||
|
/ZERO_CHECK.vcxproj
|
||||||
|
/RUN_TESTS.vcxproj.filters
|
||||||
|
/RUN_TESTS.vcxproj
|
||||||
|
/INSTALL.vcxproj.filters
|
||||||
|
/INSTALL.vcxproj
|
||||||
|
/googletest-distribution.sln
|
||||||
|
/CMakeCache.txt
|
||||||
|
/ALL_BUILD.vcxproj.filters
|
||||||
|
/ALL_BUILD.vcxproj
|
||||||
|
85
vendor/github.com/google/googletest/.travis.yml
generated
vendored
85
vendor/github.com/google/googletest/.travis.yml
generated
vendored
@ -1,17 +1,54 @@
|
|||||||
# Build matrix / environment variable are explained on:
|
# Build matrix / environment variable are explained on:
|
||||||
# http://about.travis-ci.org/docs/user/build-configuration/
|
# https://docs.travis-ci.com/user/customizing-the-build/
|
||||||
# This file can be validated on:
|
# This file can be validated on:
|
||||||
# http://lint.travis-ci.org/
|
# http://lint.travis-ci.org/
|
||||||
|
|
||||||
|
language: cpp
|
||||||
|
|
||||||
|
# Define the matrix explicitly, manually expanding the combinations of (os, compiler, env).
|
||||||
|
# It is more tedious, but grants us far more flexibility.
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- os: linux
|
||||||
|
before_install: chmod -R +x ./ci/*platformio.sh
|
||||||
|
install: ./ci/install-platformio.sh
|
||||||
|
script: ./ci/build-platformio.sh
|
||||||
|
- os: linux
|
||||||
|
dist: xenial
|
||||||
|
compiler: gcc
|
||||||
|
install: ./ci/install-linux.sh && ./ci/log-config.sh
|
||||||
|
script: ./ci/build-linux-bazel.sh
|
||||||
|
- os: linux
|
||||||
|
dist: xenial
|
||||||
|
compiler: clang
|
||||||
|
install: ./ci/install-linux.sh && ./ci/log-config.sh
|
||||||
|
script: ./ci/build-linux-bazel.sh
|
||||||
|
- os: linux
|
||||||
|
compiler: gcc
|
||||||
|
env: BUILD_TYPE=Debug VERBOSE=1 CXX_FLAGS=-std=c++11
|
||||||
|
- os: linux
|
||||||
|
compiler: clang
|
||||||
|
env: BUILD_TYPE=Release VERBOSE=1 CXX_FLAGS=-std=c++11 -Wgnu-zero-variadic-macro-arguments
|
||||||
|
- os: linux
|
||||||
|
compiler: clang
|
||||||
|
env: BUILD_TYPE=Release VERBOSE=1 CXX_FLAGS=-std=c++11 NO_EXCEPTION=ON NO_RTTI=ON COMPILER_IS_GNUCXX=ON
|
||||||
|
- os: osx
|
||||||
|
compiler: gcc
|
||||||
|
env: BUILD_TYPE=Release VERBOSE=1 CXX_FLAGS=-std=c++11 HOMEBREW_LOGS=~/homebrew-logs HOMEBREW_TEMP=~/homebrew-temp
|
||||||
|
- os: osx
|
||||||
|
compiler: clang
|
||||||
|
env: BUILD_TYPE=Release VERBOSE=1 CXX_FLAGS=-std=c++11 HOMEBREW_LOGS=~/homebrew-logs HOMEBREW_TEMP=~/homebrew-temp
|
||||||
|
|
||||||
|
# These are the install and build (script) phases for the most common entries in the matrix. They could be included
|
||||||
|
# in each entry in the matrix, but that is just repetitive.
|
||||||
install:
|
install:
|
||||||
# /usr/bin/gcc is 4.6 always, but gcc-X.Y is available.
|
- ./ci/install-${TRAVIS_OS_NAME}.sh
|
||||||
- if [ "$CXX" = "g++" ]; then export CXX="g++-4.9" CC="gcc-4.9"; fi
|
- . ./ci/env-${TRAVIS_OS_NAME}.sh
|
||||||
# /usr/bin/clang is 3.4, lets override with modern one.
|
- ./ci/log-config.sh
|
||||||
- if [ "$CXX" = "clang++" ] && [ "$TRAVIS_OS_NAME" = "linux" ]; then export CXX="clang++-3.7" CC="clang-3.7"; fi
|
|
||||||
- echo ${PATH}
|
script: ./ci/travis.sh
|
||||||
- echo ${CXX}
|
|
||||||
- ${CXX} --version
|
# This section installs the necessary dependencies.
|
||||||
- ${CXX} -v
|
|
||||||
addons:
|
addons:
|
||||||
apt:
|
apt:
|
||||||
# List of whitelisted in travis packages for ubuntu-precise can be found here:
|
# List of whitelisted in travis packages for ubuntu-precise can be found here:
|
||||||
@ -20,27 +57,17 @@ addons:
|
|||||||
# https://github.com/travis-ci/apt-source-whitelist/blob/master/ubuntu.json
|
# https://github.com/travis-ci/apt-source-whitelist/blob/master/ubuntu.json
|
||||||
sources:
|
sources:
|
||||||
- ubuntu-toolchain-r-test
|
- ubuntu-toolchain-r-test
|
||||||
- llvm-toolchain-precise-3.7
|
- llvm-toolchain-precise-3.9
|
||||||
packages:
|
packages:
|
||||||
- gcc-4.9
|
|
||||||
- g++-4.9
|
- g++-4.9
|
||||||
- clang-3.7
|
- clang-3.9
|
||||||
- valgrind
|
update: true
|
||||||
os:
|
homebrew:
|
||||||
- linux
|
packages:
|
||||||
- osx
|
- ccache
|
||||||
language: cpp
|
- gcc@4.9
|
||||||
compiler:
|
- llvm@4
|
||||||
- gcc
|
update: true
|
||||||
- clang
|
|
||||||
script: ./travis.sh
|
|
||||||
env:
|
|
||||||
matrix:
|
|
||||||
- GTEST_TARGET=googletest SHARED_LIB=OFF STATIC_LIB=ON CMAKE_PKG=OFF BUILD_TYPE=debug VERBOSE_MAKE=true VERBOSE
|
|
||||||
- GTEST_TARGET=googlemock SHARED_LIB=OFF STATIC_LIB=ON CMAKE_PKG=OFF BUILD_TYPE=debug VERBOSE_MAKE=true VERBOSE
|
|
||||||
- GTEST_TARGET=googlemock SHARED_LIB=OFF STATIC_LIB=ON CMAKE_PKG=OFF BUILD_TYPE=debug CXX_FLAGS=-std=c++11 VERBOSE_MAKE=true VERBOSE
|
|
||||||
# - GTEST_TARGET=googletest SHARED_LIB=ON STATIC_LIB=ON CMAKE_PKG=ON BUILD_TYPE=release VERBOSE_MAKE=false
|
|
||||||
# - GTEST_TARGET=googlemock SHARED_LIB=ON STATIC_LIB=ON CMAKE_PKG=ON BUILD_TYPE=release VERBOSE_MAKE=false
|
|
||||||
notifications:
|
notifications:
|
||||||
email: false
|
email: false
|
||||||
sudo: false
|
|
||||||
|
179
vendor/github.com/google/googletest/BUILD.bazel
generated
vendored
Normal file
179
vendor/github.com/google/googletest/BUILD.bazel
generated
vendored
Normal file
@ -0,0 +1,179 @@
|
|||||||
|
# Copyright 2017 Google Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
#
|
||||||
|
# Bazel Build for Google C++ Testing Framework(Google Test)
|
||||||
|
|
||||||
|
load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
|
||||||
|
|
||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
licenses(["notice"])
|
||||||
|
|
||||||
|
config_setting(
|
||||||
|
name = "windows",
|
||||||
|
constraint_values = ["@bazel_tools//platforms:windows"],
|
||||||
|
)
|
||||||
|
|
||||||
|
config_setting(
|
||||||
|
name = "has_absl",
|
||||||
|
values = {"define": "absl=1"},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Library that defines the FRIEND_TEST macro.
|
||||||
|
cc_library(
|
||||||
|
name = "gtest_prod",
|
||||||
|
hdrs = ["googletest/include/gtest/gtest_prod.h"],
|
||||||
|
includes = ["googletest/include"],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Google Test including Google Mock
|
||||||
|
cc_library(
|
||||||
|
name = "gtest",
|
||||||
|
srcs = glob(
|
||||||
|
include = [
|
||||||
|
"googletest/src/*.cc",
|
||||||
|
"googletest/src/*.h",
|
||||||
|
"googletest/include/gtest/**/*.h",
|
||||||
|
"googlemock/src/*.cc",
|
||||||
|
"googlemock/include/gmock/**/*.h",
|
||||||
|
],
|
||||||
|
exclude = [
|
||||||
|
"googletest/src/gtest-all.cc",
|
||||||
|
"googletest/src/gtest_main.cc",
|
||||||
|
"googlemock/src/gmock-all.cc",
|
||||||
|
"googlemock/src/gmock_main.cc",
|
||||||
|
],
|
||||||
|
),
|
||||||
|
hdrs = glob([
|
||||||
|
"googletest/include/gtest/*.h",
|
||||||
|
"googlemock/include/gmock/*.h",
|
||||||
|
]),
|
||||||
|
copts = select({
|
||||||
|
":windows": [],
|
||||||
|
"//conditions:default": ["-pthread"],
|
||||||
|
}),
|
||||||
|
defines = select({
|
||||||
|
":has_absl": ["GTEST_HAS_ABSL=1"],
|
||||||
|
"//conditions:default": [],
|
||||||
|
}),
|
||||||
|
features = select({
|
||||||
|
":windows": ["windows_export_all_symbols"],
|
||||||
|
"//conditions:default": [],
|
||||||
|
}),
|
||||||
|
includes = [
|
||||||
|
"googlemock",
|
||||||
|
"googlemock/include",
|
||||||
|
"googletest",
|
||||||
|
"googletest/include",
|
||||||
|
],
|
||||||
|
linkopts = select({
|
||||||
|
":windows": [],
|
||||||
|
"//conditions:default": ["-pthread"],
|
||||||
|
}),
|
||||||
|
deps = select({
|
||||||
|
":has_absl": [
|
||||||
|
"@com_google_absl//absl/debugging:failure_signal_handler",
|
||||||
|
"@com_google_absl//absl/debugging:stacktrace",
|
||||||
|
"@com_google_absl//absl/debugging:symbolize",
|
||||||
|
"@com_google_absl//absl/strings",
|
||||||
|
"@com_google_absl//absl/types:optional",
|
||||||
|
"@com_google_absl//absl/types:variant",
|
||||||
|
],
|
||||||
|
"//conditions:default": [],
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "gtest_main",
|
||||||
|
srcs = ["googlemock/src/gmock_main.cc"],
|
||||||
|
features = select({
|
||||||
|
":windows": ["windows_export_all_symbols"],
|
||||||
|
"//conditions:default": [],
|
||||||
|
}),
|
||||||
|
deps = [":gtest"],
|
||||||
|
)
|
||||||
|
|
||||||
|
# The following rules build samples of how to use gTest.
|
||||||
|
cc_library(
|
||||||
|
name = "gtest_sample_lib",
|
||||||
|
srcs = [
|
||||||
|
"googletest/samples/sample1.cc",
|
||||||
|
"googletest/samples/sample2.cc",
|
||||||
|
"googletest/samples/sample4.cc",
|
||||||
|
],
|
||||||
|
hdrs = [
|
||||||
|
"googletest/samples/prime_tables.h",
|
||||||
|
"googletest/samples/sample1.h",
|
||||||
|
"googletest/samples/sample2.h",
|
||||||
|
"googletest/samples/sample3-inl.h",
|
||||||
|
"googletest/samples/sample4.h",
|
||||||
|
],
|
||||||
|
features = select({
|
||||||
|
":windows": ["windows_export_all_symbols"],
|
||||||
|
"//conditions:default": [],
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_test(
|
||||||
|
name = "gtest_samples",
|
||||||
|
size = "small",
|
||||||
|
# All Samples except:
|
||||||
|
# sample9 (main)
|
||||||
|
# sample10 (main and takes a command line option and needs to be separate)
|
||||||
|
srcs = [
|
||||||
|
"googletest/samples/sample1_unittest.cc",
|
||||||
|
"googletest/samples/sample2_unittest.cc",
|
||||||
|
"googletest/samples/sample3_unittest.cc",
|
||||||
|
"googletest/samples/sample4_unittest.cc",
|
||||||
|
"googletest/samples/sample5_unittest.cc",
|
||||||
|
"googletest/samples/sample6_unittest.cc",
|
||||||
|
"googletest/samples/sample7_unittest.cc",
|
||||||
|
"googletest/samples/sample8_unittest.cc",
|
||||||
|
],
|
||||||
|
linkstatic = 0,
|
||||||
|
deps = [
|
||||||
|
"gtest_sample_lib",
|
||||||
|
":gtest_main",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_test(
|
||||||
|
name = "sample9_unittest",
|
||||||
|
size = "small",
|
||||||
|
srcs = ["googletest/samples/sample9_unittest.cc"],
|
||||||
|
deps = [":gtest"],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_test(
|
||||||
|
name = "sample10_unittest",
|
||||||
|
size = "small",
|
||||||
|
srcs = ["googletest/samples/sample10_unittest.cc"],
|
||||||
|
deps = [":gtest"],
|
||||||
|
)
|
28
vendor/github.com/google/googletest/CMakeLists.txt
generated
vendored
28
vendor/github.com/google/googletest/CMakeLists.txt
generated
vendored
@ -1,16 +1,36 @@
|
|||||||
cmake_minimum_required(VERSION 2.6.2)
|
# Note: CMake support is community-based. The maintainers do not use CMake
|
||||||
|
# internally.
|
||||||
|
|
||||||
project( googletest-distribution )
|
cmake_minimum_required(VERSION 2.8.8)
|
||||||
|
|
||||||
|
if (POLICY CMP0048)
|
||||||
|
cmake_policy(SET CMP0048 NEW)
|
||||||
|
endif (POLICY CMP0048)
|
||||||
|
|
||||||
|
project(googletest-distribution)
|
||||||
|
set(GOOGLETEST_VERSION 1.10.0)
|
||||||
|
|
||||||
|
if (CMAKE_VERSION VERSION_LESS "3.1")
|
||||||
|
add_definitions(-std=c++11)
|
||||||
|
else()
|
||||||
|
set(CMAKE_CXX_STANDARD 11)
|
||||||
|
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||||
|
if(NOT CYGWIN)
|
||||||
|
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
enable_testing()
|
enable_testing()
|
||||||
|
|
||||||
option(BUILD_GTEST "Builds the googletest subproject" OFF)
|
include(CMakeDependentOption)
|
||||||
|
include(GNUInstallDirs)
|
||||||
|
|
||||||
#Note that googlemock target already builds googletest
|
#Note that googlemock target already builds googletest
|
||||||
option(BUILD_GMOCK "Builds the googlemock subproject" ON)
|
option(BUILD_GMOCK "Builds the googlemock subproject" ON)
|
||||||
|
option(INSTALL_GTEST "Enable installation of googletest. (Projects embedding googletest may want to turn this OFF.)" ON)
|
||||||
|
|
||||||
if(BUILD_GMOCK)
|
if(BUILD_GMOCK)
|
||||||
add_subdirectory( googlemock )
|
add_subdirectory( googlemock )
|
||||||
elseif(BUILD_GTEST)
|
else()
|
||||||
add_subdirectory( googletest )
|
add_subdirectory( googletest )
|
||||||
endif()
|
endif()
|
||||||
|
142
vendor/github.com/google/googletest/CONTRIBUTING.md
generated
vendored
Normal file
142
vendor/github.com/google/googletest/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
# How to become a contributor and submit your own code
|
||||||
|
|
||||||
|
## Contributor License Agreements
|
||||||
|
|
||||||
|
We'd love to accept your patches! Before we can take them, we have to jump a
|
||||||
|
couple of legal hurdles.
|
||||||
|
|
||||||
|
Please fill out either the individual or corporate Contributor License Agreement
|
||||||
|
(CLA).
|
||||||
|
|
||||||
|
* If you are an individual writing original source code and you're sure you
|
||||||
|
own the intellectual property, then you'll need to sign an
|
||||||
|
[individual CLA](https://developers.google.com/open-source/cla/individual).
|
||||||
|
* If you work for a company that wants to allow you to contribute your work,
|
||||||
|
then you'll need to sign a
|
||||||
|
[corporate CLA](https://developers.google.com/open-source/cla/corporate).
|
||||||
|
|
||||||
|
Follow either of the two links above to access the appropriate CLA and
|
||||||
|
instructions for how to sign and return it. Once we receive it, we'll be able to
|
||||||
|
accept your pull requests.
|
||||||
|
|
||||||
|
## Are you a Googler?
|
||||||
|
|
||||||
|
If you are a Googler, please make an attempt to submit an internal change rather
|
||||||
|
than a GitHub Pull Request. If you are not able to submit an internal change a
|
||||||
|
PR is acceptable as an alternative.
|
||||||
|
|
||||||
|
## Contributing A Patch
|
||||||
|
|
||||||
|
1. Submit an issue describing your proposed change to the
|
||||||
|
[issue tracker](https://github.com/google/googletest).
|
||||||
|
2. Please don't mix more than one logical change per submittal, because it
|
||||||
|
makes the history hard to follow. If you want to make a change that doesn't
|
||||||
|
have a corresponding issue in the issue tracker, please create one.
|
||||||
|
3. Also, coordinate with team members that are listed on the issue in question.
|
||||||
|
This ensures that work isn't being duplicated and communicating your plan
|
||||||
|
early also generally leads to better patches.
|
||||||
|
4. If your proposed change is accepted, and you haven't already done so, sign a
|
||||||
|
Contributor License Agreement (see details above).
|
||||||
|
5. Fork the desired repo, develop and test your code changes.
|
||||||
|
6. Ensure that your code adheres to the existing style in the sample to which
|
||||||
|
you are contributing.
|
||||||
|
7. Ensure that your code has an appropriate set of unit tests which all pass.
|
||||||
|
8. Submit a pull request.
|
||||||
|
|
||||||
|
## The Google Test and Google Mock Communities
|
||||||
|
|
||||||
|
The Google Test community exists primarily through the
|
||||||
|
[discussion group](http://groups.google.com/group/googletestframework) and the
|
||||||
|
GitHub repository. Likewise, the Google Mock community exists primarily through
|
||||||
|
their own [discussion group](http://groups.google.com/group/googlemock). You are
|
||||||
|
definitely encouraged to contribute to the discussion and you can also help us
|
||||||
|
to keep the effectiveness of the group high by following and promoting the
|
||||||
|
guidelines listed here.
|
||||||
|
|
||||||
|
### Please Be Friendly
|
||||||
|
|
||||||
|
Showing courtesy and respect to others is a vital part of the Google culture,
|
||||||
|
and we strongly encourage everyone participating in Google Test development to
|
||||||
|
join us in accepting nothing less. Of course, being courteous is not the same as
|
||||||
|
failing to constructively disagree with each other, but it does mean that we
|
||||||
|
should be respectful of each other when enumerating the 42 technical reasons
|
||||||
|
that a particular proposal may not be the best choice. There's never a reason to
|
||||||
|
be antagonistic or dismissive toward anyone who is sincerely trying to
|
||||||
|
contribute to a discussion.
|
||||||
|
|
||||||
|
Sure, C++ testing is serious business and all that, but it's also a lot of fun.
|
||||||
|
Let's keep it that way. Let's strive to be one of the friendliest communities in
|
||||||
|
all of open source.
|
||||||
|
|
||||||
|
As always, discuss Google Test in the official GoogleTest discussion group. You
|
||||||
|
don't have to actually submit code in order to sign up. Your participation
|
||||||
|
itself is a valuable contribution.
|
||||||
|
|
||||||
|
## Style
|
||||||
|
|
||||||
|
To keep the source consistent, readable, diffable and easy to merge, we use a
|
||||||
|
fairly rigid coding style, as defined by the
|
||||||
|
[google-styleguide](https://github.com/google/styleguide) project. All patches
|
||||||
|
will be expected to conform to the style outlined
|
||||||
|
[here](https://google.github.io/styleguide/cppguide.html). Use
|
||||||
|
[.clang-format](https://github.com/google/googletest/blob/master/.clang-format)
|
||||||
|
to check your formatting
|
||||||
|
|
||||||
|
## Requirements for Contributors
|
||||||
|
|
||||||
|
If you plan to contribute a patch, you need to build Google Test, Google Mock,
|
||||||
|
and their own tests from a git checkout, which has further requirements:
|
||||||
|
|
||||||
|
* [Python](https://www.python.org/) v2.3 or newer (for running some of the
|
||||||
|
tests and re-generating certain source files from templates)
|
||||||
|
* [CMake](https://cmake.org/) v2.6.4 or newer
|
||||||
|
|
||||||
|
## Developing Google Test and Google Mock
|
||||||
|
|
||||||
|
This section discusses how to make your own changes to the Google Test project.
|
||||||
|
|
||||||
|
### Testing Google Test and Google Mock Themselves
|
||||||
|
|
||||||
|
To make sure your changes work as intended and don't break existing
|
||||||
|
functionality, you'll want to compile and run Google Test and GoogleMock's own
|
||||||
|
tests. For that you can use CMake:
|
||||||
|
|
||||||
|
mkdir mybuild
|
||||||
|
cd mybuild
|
||||||
|
cmake -Dgtest_build_tests=ON -Dgmock_build_tests=ON ${GTEST_REPO_DIR}
|
||||||
|
|
||||||
|
To choose between building only Google Test or Google Mock, you may modify your
|
||||||
|
cmake command to be one of each
|
||||||
|
|
||||||
|
cmake -Dgtest_build_tests=ON ${GTEST_DIR} # sets up Google Test tests
|
||||||
|
cmake -Dgmock_build_tests=ON ${GMOCK_DIR} # sets up Google Mock tests
|
||||||
|
|
||||||
|
Make sure you have Python installed, as some of Google Test's tests are written
|
||||||
|
in Python. If the cmake command complains about not being able to find Python
|
||||||
|
(`Could NOT find PythonInterp (missing: PYTHON_EXECUTABLE)`), try telling it
|
||||||
|
explicitly where your Python executable can be found:
|
||||||
|
|
||||||
|
cmake -DPYTHON_EXECUTABLE=path/to/python ...
|
||||||
|
|
||||||
|
Next, you can build Google Test and / or Google Mock and all desired tests. On
|
||||||
|
\*nix, this is usually done by
|
||||||
|
|
||||||
|
make
|
||||||
|
|
||||||
|
To run the tests, do
|
||||||
|
|
||||||
|
make test
|
||||||
|
|
||||||
|
All tests should pass.
|
||||||
|
|
||||||
|
### Regenerating Source Files
|
||||||
|
|
||||||
|
Some of Google Test's source files are generated from templates (not in the C++
|
||||||
|
sense) using a script. For example, the file
|
||||||
|
include/gtest/internal/gtest-type-util.h.pump is used to generate
|
||||||
|
gtest-type-util.h in the same directory.
|
||||||
|
|
||||||
|
You don't need to worry about regenerating the source files unless you need to
|
||||||
|
modify them. You would then modify the corresponding `.pump` files and run the
|
||||||
|
'[pump.py](googletest/scripts/pump.py)' generator script. See the
|
||||||
|
[Pump Manual](googletest/docs/pump_manual.md).
|
28
vendor/github.com/google/googletest/LICENSE
generated
vendored
Normal file
28
vendor/github.com/google/googletest/LICENSE
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
Copyright 2008, Google Inc.
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
198
vendor/github.com/google/googletest/README.md
generated
vendored
198
vendor/github.com/google/googletest/README.md
generated
vendored
@ -1,70 +1,97 @@
|
|||||||
|
# Google Test
|
||||||
|
|
||||||
# Google Test #
|
#### OSS Builds Status:
|
||||||
|
|
||||||
[![Build Status](https://travis-ci.org/google/googletest.svg?branch=master)](https://travis-ci.org/google/googletest)
|
[![Build Status](https://api.travis-ci.org/google/googletest.svg?branch=master)](https://travis-ci.org/google/googletest)
|
||||||
[![Build status](https://ci.appveyor.com/api/projects/status/4o38plt0xbo1ubc8/branch/master?svg=true)](https://ci.appveyor.com/project/BillyDonahue/googletest/branch/master)
|
[![Build status](https://ci.appveyor.com/api/projects/status/4o38plt0xbo1ubc8/branch/master?svg=true)](https://ci.appveyor.com/project/GoogleTestAppVeyor/googletest/branch/master)
|
||||||
|
|
||||||
Welcome to **Google Test**, Google's C++ test framework!
|
### Future Plans
|
||||||
|
|
||||||
This repository is a merger of the formerly separate GoogleTest and
|
#### 1.8.x Release:
|
||||||
GoogleMock projects. These were so closely related that it makes sense to
|
|
||||||
maintain and release them together.
|
|
||||||
|
|
||||||
Please see the project page above for more information as well as the
|
[the 1.8.x](https://github.com/google/googletest/releases/tag/release-1.8.1) is
|
||||||
mailing list for questions, discussions, and development. There is
|
the last release that works with pre-C++11 compilers. The 1.8.x will not accept
|
||||||
also an IRC channel on OFTC (irc.oftc.net) #gtest available. Please
|
any requests for any new features and any bugfix requests will only be accepted
|
||||||
join us!
|
if proven "critical"
|
||||||
|
|
||||||
Getting started information for **Google Test** is available in the
|
#### Post 1.8.x:
|
||||||
[Google Test Primer](googletest/docs/Primer.md) documentation.
|
|
||||||
|
On-going work to improve/cleanup/pay technical debt. When this work is completed
|
||||||
|
there will be a 1.9.x tagged release
|
||||||
|
|
||||||
|
#### Post 1.9.x
|
||||||
|
|
||||||
|
Post 1.9.x googletest will follow
|
||||||
|
[Abseil Live at Head philosophy](https://abseil.io/about/philosophy)
|
||||||
|
|
||||||
|
## Welcome to **Google Test**, Google's C++ test framework!
|
||||||
|
|
||||||
|
This repository is a merger of the formerly separate GoogleTest and GoogleMock
|
||||||
|
projects. These were so closely related that it makes sense to maintain and
|
||||||
|
release them together.
|
||||||
|
|
||||||
|
Please subscribe to the mailing list at googletestframework@googlegroups.com for
|
||||||
|
questions, discussions, and development.
|
||||||
|
|
||||||
|
### Getting started:
|
||||||
|
|
||||||
|
The information for **Google Test** is available in the
|
||||||
|
[Google Test Primer](googletest/docs/primer.md) documentation.
|
||||||
|
|
||||||
**Google Mock** is an extension to Google Test for writing and using C++ mock
|
**Google Mock** is an extension to Google Test for writing and using C++ mock
|
||||||
classes. See the separate [Google Mock documentation](googlemock/README.md).
|
classes. See the separate [Google Mock documentation](googlemock/README.md).
|
||||||
|
|
||||||
More detailed documentation for googletest (including build instructions) are
|
More detailed documentation for googletest is in its interior
|
||||||
in its interior [googletest/README.md](googletest/README.md) file.
|
[googletest/README.md](googletest/README.md) file.
|
||||||
|
|
||||||
## Features ##
|
## Features
|
||||||
|
|
||||||
* An [XUnit](https://en.wikipedia.org/wiki/XUnit) test framework.
|
* An [xUnit](https://en.wikipedia.org/wiki/XUnit) test framework.
|
||||||
* Test discovery.
|
* Test discovery.
|
||||||
* A rich set of assertions.
|
* A rich set of assertions.
|
||||||
* User-defined assertions.
|
* User-defined assertions.
|
||||||
* Death tests.
|
* Death tests.
|
||||||
* Fatal and non-fatal failures.
|
* Fatal and non-fatal failures.
|
||||||
* Value-parameterized tests.
|
* Value-parameterized tests.
|
||||||
* Type-parameterized tests.
|
* Type-parameterized tests.
|
||||||
* Various options for running the tests.
|
* Various options for running the tests.
|
||||||
* XML test report generation.
|
* XML test report generation.
|
||||||
|
|
||||||
## Platforms ##
|
## Platforms
|
||||||
|
|
||||||
Google test has been used on a variety of platforms:
|
Google test has been used on a variety of platforms:
|
||||||
|
|
||||||
* Linux
|
* Linux
|
||||||
* Mac OS X
|
* Mac OS X
|
||||||
* Windows
|
* Windows
|
||||||
* Cygwin
|
* Cygwin
|
||||||
* MinGW
|
* MinGW
|
||||||
* Windows Mobile
|
* Windows Mobile
|
||||||
* Symbian
|
* Symbian
|
||||||
|
* PlatformIO
|
||||||
|
|
||||||
## Who Is Using Google Test? ##
|
## Who Is Using Google Test?
|
||||||
|
|
||||||
In addition to many internal projects at Google, Google Test is also used by
|
In addition to many internal projects at Google, Google Test is also used by the
|
||||||
the following notable projects:
|
following notable projects:
|
||||||
|
|
||||||
* The [Chromium projects](http://www.chromium.org/) (behind the Chrome
|
* The [Chromium projects](http://www.chromium.org/) (behind the Chrome browser
|
||||||
browser and Chrome OS).
|
and Chrome OS).
|
||||||
* The [LLVM](http://llvm.org/) compiler.
|
* The [LLVM](http://llvm.org/) compiler.
|
||||||
* [Protocol Buffers](https://github.com/google/protobuf), Google's data
|
* [Protocol Buffers](https://github.com/google/protobuf), Google's data
|
||||||
interchange format.
|
interchange format.
|
||||||
* The [OpenCV](http://opencv.org/) computer vision library.
|
* The [OpenCV](http://opencv.org/) computer vision library.
|
||||||
|
* [tiny-dnn](https://github.com/tiny-dnn/tiny-dnn): header only,
|
||||||
|
dependency-free deep learning framework in C++11.
|
||||||
|
|
||||||
## Related Open Source Projects ##
|
## Related Open Source Projects
|
||||||
|
|
||||||
[Google Test UI](https://github.com/ospector/gtest-gbar) is test runner that runs
|
[GTest Runner](https://github.com/nholthaus/gtest-runner) is a Qt5 based
|
||||||
your test binary, allows you to track its progress via a progress bar, and
|
automated test-runner and Graphical User Interface with powerful features for
|
||||||
|
Windows and Linux platforms.
|
||||||
|
|
||||||
|
[Google Test UI](https://github.com/ospector/gtest-gbar) is test runner that
|
||||||
|
runs your test binary, allows you to track its progress via a progress bar, and
|
||||||
displays a list of test failures. Clicking on one shows failure text. Google
|
displays a list of test failures. Clicking on one shows failure text. Google
|
||||||
Test UI is written in C#.
|
Test UI is written in C#.
|
||||||
|
|
||||||
@ -73,70 +100,35 @@ listener for Google Test that implements the
|
|||||||
[TAP protocol](https://en.wikipedia.org/wiki/Test_Anything_Protocol) for test
|
[TAP protocol](https://en.wikipedia.org/wiki/Test_Anything_Protocol) for test
|
||||||
result output. If your test runner understands TAP, you may find it useful.
|
result output. If your test runner understands TAP, you may find it useful.
|
||||||
|
|
||||||
## Requirements ##
|
[gtest-parallel](https://github.com/google/gtest-parallel) is a test runner that
|
||||||
|
runs tests from your binary in parallel to provide significant speed-up.
|
||||||
|
|
||||||
Google Test is designed to have fairly minimal requirements to build
|
[GoogleTest Adapter](https://marketplace.visualstudio.com/items?itemName=DavidSchuldenfrei.gtest-adapter)
|
||||||
and use with your projects, but there are some. Currently, we support
|
is a VS Code extension allowing to view Google Tests in a tree view, and
|
||||||
Linux, Windows, Mac OS X, and Cygwin. We will also make our best
|
run/debug your tests.
|
||||||
effort to support other platforms (e.g. Solaris, AIX, and z/OS).
|
|
||||||
However, since core members of the Google Test project have no access
|
|
||||||
to these platforms, Google Test may have outstanding issues there. If
|
|
||||||
you notice any problems on your platform, please notify
|
|
||||||
<googletestframework@googlegroups.com>. Patches for fixing them are
|
|
||||||
even more welcome!
|
|
||||||
|
|
||||||
### Linux Requirements ###
|
## Requirements
|
||||||
|
|
||||||
|
Google Test is designed to have fairly minimal requirements to build and use
|
||||||
|
with your projects, but there are some. If you notice any problems on your
|
||||||
|
platform, please notify
|
||||||
|
[googletestframework@googlegroups.com](https://groups.google.com/forum/#!forum/googletestframework).
|
||||||
|
Patches for fixing them are welcome!
|
||||||
|
|
||||||
|
### Build Requirements
|
||||||
|
|
||||||
These are the base requirements to build and use Google Test from a source
|
These are the base requirements to build and use Google Test from a source
|
||||||
package (as described below):
|
package:
|
||||||
|
|
||||||
* GNU-compatible Make or gmake
|
* [Bazel](https://bazel.build/) or [CMake](https://cmake.org/). NOTE: Bazel is
|
||||||
* POSIX-standard shell
|
the build system that googletest is using internally and tests against.
|
||||||
* POSIX(-2) Regular Expressions (regex.h)
|
CMake is community-supported.
|
||||||
* A C++98-standard-compliant compiler
|
|
||||||
|
|
||||||
### Windows Requirements ###
|
* a C++11-standard-compliant compiler
|
||||||
|
|
||||||
* Microsoft Visual C++ v7.1 or newer
|
## Contributing change
|
||||||
|
|
||||||
### Cygwin Requirements ###
|
Please read the [`CONTRIBUTING.md`](CONTRIBUTING.md) for details on how to
|
||||||
|
contribute to this project.
|
||||||
* Cygwin v1.5.25-14 or newer
|
|
||||||
|
|
||||||
### Mac OS X Requirements ###
|
|
||||||
|
|
||||||
* Mac OS X v10.4 Tiger or newer
|
|
||||||
* Xcode Developer Tools
|
|
||||||
|
|
||||||
### Requirements for Contributors ###
|
|
||||||
|
|
||||||
We welcome patches. If you plan to contribute a patch, you need to
|
|
||||||
build Google Test and its own tests from a git checkout (described
|
|
||||||
below), which has further requirements:
|
|
||||||
|
|
||||||
* [Python](https://www.python.org/) v2.3 or newer (for running some of
|
|
||||||
the tests and re-generating certain source files from templates)
|
|
||||||
* [CMake](https://cmake.org/) v2.6.4 or newer
|
|
||||||
|
|
||||||
## Regenerating Source Files ##
|
|
||||||
|
|
||||||
Some of Google Test's source files are generated from templates (not
|
|
||||||
in the C++ sense) using a script.
|
|
||||||
For example, the
|
|
||||||
file include/gtest/internal/gtest-type-util.h.pump is used to generate
|
|
||||||
gtest-type-util.h in the same directory.
|
|
||||||
|
|
||||||
You don't need to worry about regenerating the source files
|
|
||||||
unless you need to modify them. You would then modify the
|
|
||||||
corresponding `.pump` files and run the '[pump.py](googletest/scripts/pump.py)'
|
|
||||||
generator script. See the [Pump Manual](googletest/docs/PumpManual.md).
|
|
||||||
|
|
||||||
### Contributing Code ###
|
|
||||||
|
|
||||||
We welcome patches. Please read the
|
|
||||||
[Developer's Guide](googletest/docs/DevGuide.md)
|
|
||||||
for how you can contribute. In particular, make sure you have signed
|
|
||||||
the Contributor License Agreement, or we won't be able to accept the
|
|
||||||
patch.
|
|
||||||
|
|
||||||
Happy testing!
|
Happy testing!
|
||||||
|
23
vendor/github.com/google/googletest/WORKSPACE
generated
vendored
Normal file
23
vendor/github.com/google/googletest/WORKSPACE
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
workspace(name = "com_google_googletest")
|
||||||
|
|
||||||
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||||
|
|
||||||
|
# Abseil
|
||||||
|
http_archive(
|
||||||
|
name = "com_google_absl",
|
||||||
|
urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"],
|
||||||
|
strip_prefix = "abseil-cpp-master",
|
||||||
|
)
|
||||||
|
|
||||||
|
http_archive(
|
||||||
|
name = "rules_cc",
|
||||||
|
strip_prefix = "rules_cc-master",
|
||||||
|
urls = ["https://github.com/bazelbuild/rules_cc/archive/master.zip"],
|
||||||
|
)
|
||||||
|
|
||||||
|
http_archive(
|
||||||
|
name = "rules_python",
|
||||||
|
strip_prefix = "rules_python-master",
|
||||||
|
urls = ["https://github.com/bazelbuild/rules_python/archive/master.zip"],
|
||||||
|
)
|
||||||
|
|
155
vendor/github.com/google/googletest/appveyor.yml
generated
vendored
155
vendor/github.com/google/googletest/appveyor.yml
generated
vendored
@ -4,68 +4,151 @@ os: Visual Studio 2015
|
|||||||
|
|
||||||
environment:
|
environment:
|
||||||
matrix:
|
matrix:
|
||||||
- Toolset: v140
|
- compiler: msvc-15-seh
|
||||||
- Toolset: v120
|
generator: "Visual Studio 15 2017"
|
||||||
- Toolset: v110
|
build_system: cmake
|
||||||
- Toolset: v100
|
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
|
||||||
|
|
||||||
platform:
|
- compiler: msvc-15-seh
|
||||||
- Win32
|
generator: "Visual Studio 15 2017 Win64"
|
||||||
- x64
|
build_system: cmake
|
||||||
|
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
|
||||||
|
enabled_on_pr: yes
|
||||||
|
|
||||||
|
- compiler: msvc-15-seh
|
||||||
|
build_system: bazel
|
||||||
|
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
|
||||||
|
enabled_on_pr: yes
|
||||||
|
|
||||||
|
- compiler: msvc-14-seh
|
||||||
|
build_system: cmake
|
||||||
|
generator: "Visual Studio 14 2015"
|
||||||
|
enabled_on_pr: yes
|
||||||
|
|
||||||
|
- compiler: msvc-14-seh
|
||||||
|
build_system: cmake
|
||||||
|
generator: "Visual Studio 14 2015 Win64"
|
||||||
|
|
||||||
|
- compiler: gcc-6.3.0-posix
|
||||||
|
build_system: cmake
|
||||||
|
generator: "MinGW Makefiles"
|
||||||
|
cxx_path: 'C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin'
|
||||||
|
enabled_on_pr: yes
|
||||||
|
|
||||||
configuration:
|
configuration:
|
||||||
# - Release
|
|
||||||
- Debug
|
- Debug
|
||||||
|
|
||||||
build:
|
build:
|
||||||
verbosity: minimal
|
verbosity: minimal
|
||||||
|
|
||||||
artifacts:
|
install:
|
||||||
- path: '_build/Testing/Temporary/*'
|
- ps: |
|
||||||
name: test_results
|
Write-Output "Compiler: $env:compiler"
|
||||||
|
Write-Output "Generator: $env:generator"
|
||||||
|
Write-Output "Env:Configuation: $env:configuration"
|
||||||
|
Write-Output "Env: $env"
|
||||||
|
if (-not (Test-Path env:APPVEYOR_PULL_REQUEST_NUMBER)) {
|
||||||
|
Write-Output "This is *NOT* a pull request build"
|
||||||
|
} else {
|
||||||
|
Write-Output "This is a pull request build"
|
||||||
|
if (-not (Test-Path env:enabled_on_pr) -or $env:enabled_on_pr -ne "yes") {
|
||||||
|
Write-Output "PR builds are *NOT* explicitly enabled"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# install Bazel
|
||||||
|
if ($env:build_system -eq "bazel") {
|
||||||
|
appveyor DownloadFile https://github.com/bazelbuild/bazel/releases/download/0.28.1/bazel-0.28.1-windows-x86_64.exe -FileName bazel.exe
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($env:build_system -eq "cmake") {
|
||||||
|
# git bash conflicts with MinGW makefiles
|
||||||
|
if ($env:generator -eq "MinGW Makefiles") {
|
||||||
|
$env:path = $env:path.replace("C:\Program Files\Git\usr\bin;", "")
|
||||||
|
if ($env:cxx_path -ne "") {
|
||||||
|
$env:path += ";$env:cxx_path"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
before_build:
|
before_build:
|
||||||
- ps: |
|
- ps: |
|
||||||
Write-Output "Configuration: $env:CONFIGURATION"
|
$env:root=$env:APPVEYOR_BUILD_FOLDER
|
||||||
Write-Output "Platform: $env:PLATFORM"
|
Write-Output "env:root: $env:root"
|
||||||
$generator = switch ($env:TOOLSET)
|
|
||||||
{
|
|
||||||
"v140" {"Visual Studio 14 2015"}
|
|
||||||
"v120" {"Visual Studio 12 2013"}
|
|
||||||
"v110" {"Visual Studio 11 2012"}
|
|
||||||
"v100" {"Visual Studio 10 2010"}
|
|
||||||
}
|
|
||||||
if ($env:PLATFORM -eq "x64")
|
|
||||||
{
|
|
||||||
$generator = "$generator Win64"
|
|
||||||
}
|
|
||||||
|
|
||||||
build_script:
|
build_script:
|
||||||
- ps: |
|
- ps: |
|
||||||
if (($env:TOOLSET -eq "v100") -and ($env:PLATFORM -eq "x64"))
|
# Only enable some builds for pull requests, the AppVeyor queue is too long.
|
||||||
{
|
if ((Test-Path env:APPVEYOR_PULL_REQUEST_NUMBER) -And (-not (Test-Path env:enabled_on_pr) -or $env:enabled_on_pr -ne "yes")) {
|
||||||
return
|
return
|
||||||
|
} else {
|
||||||
|
# special case - build with Bazel
|
||||||
|
if ($env:build_system -eq "bazel") {
|
||||||
|
& $env:root\bazel.exe build -c opt //:gtest_samples
|
||||||
|
if ($LastExitCode -eq 0) { # bazel writes to StdErr and PowerShell interprets it as an error
|
||||||
|
$host.SetShouldExit(0)
|
||||||
|
} else { # a real error
|
||||||
|
throw "Exec: $ErrorMessage"
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
# by default build with CMake
|
||||||
md _build -Force | Out-Null
|
md _build -Force | Out-Null
|
||||||
cd _build
|
cd _build
|
||||||
|
|
||||||
& cmake -G "$generator" -DCMAKE_CONFIGURATION_TYPES="Debug;Release" -Dgtest_build_tests=ON -Dgtest_build_samples=ON -Dgmock_build_tests=ON ..
|
$conf = if ($env:generator -eq "MinGW Makefiles") {"-DCMAKE_BUILD_TYPE=$env:configuration"} else {"-DCMAKE_CONFIGURATION_TYPES=Debug;Release"}
|
||||||
|
# Disable test for MinGW (gtest tests fail, gmock tests can not build)
|
||||||
|
$gtest_build_tests = if ($env:generator -eq "MinGW Makefiles") {"-Dgtest_build_tests=OFF"} else {"-Dgtest_build_tests=ON"}
|
||||||
|
$gmock_build_tests = if ($env:generator -eq "MinGW Makefiles") {"-Dgmock_build_tests=OFF"} else {"-Dgmock_build_tests=ON"}
|
||||||
|
& cmake -G "$env:generator" $conf -Dgtest_build_samples=ON $gtest_build_tests $gmock_build_tests ..
|
||||||
if ($LastExitCode -ne 0) {
|
if ($LastExitCode -ne 0) {
|
||||||
throw "Exec: $ErrorMessage"
|
throw "Exec: $ErrorMessage"
|
||||||
}
|
}
|
||||||
& cmake --build . --config $env:CONFIGURATION
|
$cmake_parallel = if ($env:generator -eq "MinGW Makefiles") {"-j2"} else {"/m"}
|
||||||
|
& cmake --build . --config $env:configuration -- $cmake_parallel
|
||||||
if ($LastExitCode -ne 0) {
|
if ($LastExitCode -ne 0) {
|
||||||
throw "Exec: $ErrorMessage"
|
throw "Exec: $ErrorMessage"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
skip_commits:
|
||||||
|
files:
|
||||||
|
- '**/*.md'
|
||||||
|
|
||||||
test_script:
|
test_script:
|
||||||
- ps: |
|
- ps: |
|
||||||
if (($env:Toolset -eq "v100") -and ($env:PLATFORM -eq "x64"))
|
# Only enable some builds for pull requests, the AppVeyor queue is too long.
|
||||||
{
|
if ((Test-Path env:APPVEYOR_PULL_REQUEST_NUMBER) -And (-not (Test-Path env:enabled_on_pr) -or $env:enabled_on_pr -ne "yes")) {
|
||||||
return
|
return
|
||||||
|
}
|
||||||
|
if ($env:build_system -eq "bazel") {
|
||||||
|
# special case - testing with Bazel
|
||||||
|
& $env:root\bazel.exe test //:gtest_samples
|
||||||
|
if ($LastExitCode -eq 0) { # bazel writes to StdErr and PowerShell interprets it as an error
|
||||||
|
$host.SetShouldExit(0)
|
||||||
|
} else { # a real error
|
||||||
|
throw "Exec: $ErrorMessage"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ($env:build_system -eq "cmake") {
|
||||||
|
# built with CMake - test with CTest
|
||||||
|
if ($env:generator -eq "MinGW Makefiles") {
|
||||||
|
return # No test available for MinGW
|
||||||
|
}
|
||||||
|
|
||||||
|
& ctest -C $env:configuration --timeout 600 --output-on-failure
|
||||||
|
if ($LastExitCode -ne 0) {
|
||||||
|
throw "Exec: $ErrorMessage"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
& ctest -C $env:CONFIGURATION --output-on-failure
|
artifacts:
|
||||||
if ($LastExitCode -ne 0) {
|
- path: '_build/CMakeFiles/*.log'
|
||||||
throw "Exec: $ErrorMessage"
|
name: logs
|
||||||
}
|
- path: '_build/Testing/**/*.xml'
|
||||||
|
name: test_results
|
||||||
|
- path: 'bazel-testlogs/**/test.log'
|
||||||
|
name: test_logs
|
||||||
|
- path: 'bazel-testlogs/**/test.xml'
|
||||||
|
name: test_results
|
||||||
|
37
vendor/github.com/google/googletest/ci/build-linux-bazel.sh
generated
vendored
Executable file
37
vendor/github.com/google/googletest/ci/build-linux-bazel.sh
generated
vendored
Executable file
@ -0,0 +1,37 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Copyright 2017 Google Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
bazel version
|
||||||
|
bazel build --curses=no //...:all
|
||||||
|
bazel test --curses=no //...:all
|
||||||
|
bazel test --curses=no //...:all --define absl=1
|
2
vendor/github.com/google/googletest/ci/build-platformio.sh
generated
vendored
Normal file
2
vendor/github.com/google/googletest/ci/build-platformio.sh
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
# run PlatformIO builds
|
||||||
|
platformio run
|
41
vendor/github.com/google/googletest/ci/env-linux.sh
generated
vendored
Executable file
41
vendor/github.com/google/googletest/ci/env-linux.sh
generated
vendored
Executable file
@ -0,0 +1,41 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Copyright 2017 Google Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
#
|
||||||
|
# This file should be sourced, and not executed as a standalone script.
|
||||||
|
#
|
||||||
|
|
||||||
|
# TODO() - we can check if this is being sourced using $BASH_VERSION and $BASH_SOURCE[0] != ${0}.
|
||||||
|
|
||||||
|
if [ "${TRAVIS_OS_NAME}" = "linux" ]; then
|
||||||
|
if [ "$CXX" = "g++" ]; then export CXX="g++-4.9" CC="gcc-4.9"; fi
|
||||||
|
if [ "$CXX" = "clang++" ]; then export CXX="clang++-3.9" CC="clang-3.9"; fi
|
||||||
|
fi
|
47
vendor/github.com/google/googletest/googletest/xcode/Samples/FrameworkSample/runtests.sh → vendor/github.com/google/googletest/ci/env-osx.sh
generated
vendored
Normal file → Executable file
47
vendor/github.com/google/googletest/googletest/xcode/Samples/FrameworkSample/runtests.sh → vendor/github.com/google/googletest/ci/env-osx.sh
generated
vendored
Normal file → Executable file
@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/usr/bin/env bash
|
||||||
|
# Copyright 2017 Google Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Copyright 2008, Google Inc.
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
#
|
||||||
# Redistribution and use in source and binary forms, with or without
|
# Redistribution and use in source and binary forms, with or without
|
||||||
# modification, are permitted provided that the following conditions are
|
# modification, are permitted provided that the following conditions are
|
||||||
@ -29,34 +29,19 @@
|
|||||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
# Executes the samples and tests for the Google Test Framework.
|
#
|
||||||
|
# This file should be sourced, and not executed as a standalone script.
|
||||||
|
#
|
||||||
|
|
||||||
# Help the dynamic linker find the path to the libraries.
|
# TODO() - we can check if this is being sourced using $BASH_VERSION and $BASH_SOURCE[0] != ${0}.
|
||||||
export DYLD_FRAMEWORK_PATH=$BUILT_PRODUCTS_DIR
|
#
|
||||||
export DYLD_LIBRARY_PATH=$BUILT_PRODUCTS_DIR
|
|
||||||
|
|
||||||
# Create some executables.
|
if [ "${TRAVIS_OS_NAME}" = "osx" ]; then
|
||||||
test_executables=$@
|
if [ "$CXX" = "clang++" ]; then
|
||||||
|
# $PATH needs to be adjusted because the llvm tap doesn't install the
|
||||||
# Now execute each one in turn keeping track of how many succeeded and failed.
|
# package to /usr/local/bin, etc, like the gcc tap does.
|
||||||
succeeded=0
|
# See: https://github.com/Homebrew/legacy-homebrew/issues/29733
|
||||||
failed=0
|
clang_version=3.9
|
||||||
failed_list=()
|
export PATH="/usr/local/opt/llvm@${clang_version}/bin:$PATH";
|
||||||
for test in ${test_executables[*]}; do
|
fi
|
||||||
"$test"
|
|
||||||
result=$?
|
|
||||||
if [ $result -eq 0 ]; then
|
|
||||||
succeeded=$(( $succeeded + 1 ))
|
|
||||||
else
|
|
||||||
failed=$(( failed + 1 ))
|
|
||||||
failed_list="$failed_list $test"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Report the successes and failures to the console.
|
|
||||||
echo "Tests complete with $succeeded successes and $failed failures."
|
|
||||||
if [ $failed -ne 0 ]; then
|
|
||||||
echo "The following tests failed:"
|
|
||||||
echo $failed_list
|
|
||||||
fi
|
fi
|
||||||
exit $failed
|
|
48
vendor/github.com/google/googletest/ci/get-nprocessors.sh
generated
vendored
Executable file
48
vendor/github.com/google/googletest/ci/get-nprocessors.sh
generated
vendored
Executable file
@ -0,0 +1,48 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Copyright 2017 Google Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
# This file is typically sourced by another script.
|
||||||
|
# if possible, ask for the precise number of processors,
|
||||||
|
# otherwise take 2 processors as reasonable default; see
|
||||||
|
# https://docs.travis-ci.com/user/speeding-up-the-build/#Makefile-optimization
|
||||||
|
if [ -x /usr/bin/getconf ]; then
|
||||||
|
NPROCESSORS=$(/usr/bin/getconf _NPROCESSORS_ONLN)
|
||||||
|
else
|
||||||
|
NPROCESSORS=2
|
||||||
|
fi
|
||||||
|
|
||||||
|
# as of 2017-09-04 Travis CI reports 32 processors, but GCC build
|
||||||
|
# crashes if parallelized too much (maybe memory consumption problem),
|
||||||
|
# so limit to 4 processors for the time being.
|
||||||
|
if [ $NPROCESSORS -gt 4 ] ; then
|
||||||
|
echo "$0:Note: Limiting processors to use by make from $NPROCESSORS to 4."
|
||||||
|
NPROCESSORS=4
|
||||||
|
fi
|
49
vendor/github.com/google/googletest/ci/install-linux.sh
generated
vendored
Executable file
49
vendor/github.com/google/googletest/ci/install-linux.sh
generated
vendored
Executable file
@ -0,0 +1,49 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Copyright 2017 Google Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
if [ "${TRAVIS_OS_NAME}" != linux ]; then
|
||||||
|
echo "Not a Linux build; skipping installation"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if [ "${TRAVIS_SUDO}" = "true" ]; then
|
||||||
|
echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" | \
|
||||||
|
sudo tee /etc/apt/sources.list.d/bazel.list
|
||||||
|
curl https://bazel.build/bazel-release.pub.gpg | sudo apt-key add -
|
||||||
|
sudo apt-get update && sudo apt-get install -y bazel gcc-4.9 g++-4.9 clang-3.9
|
||||||
|
elif [ "${CXX}" = "clang++" ]; then
|
||||||
|
# Use ccache, assuming $HOME/bin is in the path, which is true in the Travis build environment.
|
||||||
|
ln -sf /usr/bin/ccache $HOME/bin/${CXX};
|
||||||
|
ln -sf /usr/bin/ccache $HOME/bin/${CC};
|
||||||
|
fi
|
40
vendor/github.com/google/googletest/ci/install-osx.sh
generated
vendored
Executable file
40
vendor/github.com/google/googletest/ci/install-osx.sh
generated
vendored
Executable file
@ -0,0 +1,40 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Copyright 2017 Google Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
if [ "${TRAVIS_OS_NAME}" != "osx" ]; then
|
||||||
|
echo "Not a macOS build; skipping installation"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
brew update
|
||||||
|
brew install ccache gcc@4.9
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user