Merge branch 'master' into errno

This commit is contained in:
Ben RUBSON 2017-08-08 08:26:57 +02:00 committed by GitHub
commit a031aa208c
436 changed files with 165873 additions and 105 deletions

View File

@ -4,39 +4,31 @@ language: cpp
sudo: true
os:
- linux
- osx
compiler:
- clang
- gcc
matrix:
exclude:
- os: osx
compiler: gcc
branches:
only:
- master
- coverity_scan
- travis
env:
global:
# The next declaration is the encrypted COVERITY_SCAN_TOKEN, created
# via the "travis encrypt" command using the project repo's public key
- secure: "KuAAwjiIqkk4vqSX/M3ZZIvQs6edm+tV8IADiklTUYZIFyxu8FgZ6RbDdMD2sef5bNZA1OZhlcbeRtiKff5CfMtvzc607Lg3NUkpi+ShMynWgqS/e0uCMf9ogEJlUiZMxf4juBi7v6DyMl/WV6pAdJmdfHtzcj8GF2mgTfQjkO8="
before_script:
- sudo modprobe fuse
- cmake --version
- ./ci/setup.sh
script:
- if [ "${COVERITY_SCAN_BRANCH}" != 1 ]; then ./build.sh && ./test.sh ; fi
- ./ci/check.sh
addons:
coverity_scan:
project:
name: "vgough/encfs"
description: "Build submitted via Travis CI"
notification_email: vgough@pobox.com
build_command_prepend: "make clean"
build_command: "make -j 4"
branch_pattern: coverity_scan
apt:
sources:
- ubuntu-toolchain-r-test

View File

@ -18,6 +18,7 @@ option(IWYU "Build with IWYU analysis." OFF)
set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}
"${CMAKE_CURRENT_LIST_DIR}/cmake")
option (BUILD_UNIT_TESTS "build EncFS unit tests" ON)
option (BUILD_SHARED_LIBS "build shared libraries" OFF)
option (USE_INTERNAL_TINYXML "use built-in TinyXML2" ON)
option (ENABLE_NLS "compile with Native Language Support (using gettext)" ON)
@ -96,7 +97,12 @@ check_cxx_source_compiles ("#include <sys/types.h>
include (CheckFuncs)
check_function_exists_glibc (lchmod HAVE_LCHMOD)
check_function_exists_glibc (utimensat HAVE_UTIMENSAT)
check_function_exists_glibc (fdatasync HAVE_FDATASYNC)
if (APPLE)
message ("-- There is no usable FDATASYNC on Apple")
set(HAVE_FDATASYNC FALSE)
else()
check_function_exists_glibc (fdatasync HAVE_FDATASYNC)
endif (APPLE)
set (CMAKE_THREAD_PREFER_PTHREAD)
find_package (Threads REQUIRED)
@ -229,7 +235,6 @@ if (IWYU)
endif()
endif()
# Set RPATH to library install path.
set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${LIB_INSTALL_DIR}")
@ -277,5 +282,21 @@ if (POD2MAN)
DESTINATION ${MAN_DESTINATION})
endif (POD2MAN)
add_custom_target(tests COMMAND ${CMAKE_CURRENT_LIST_DIR}/test.sh)
if (BUILD_UNIT_TESTS)
enable_testing()
set(GOOGLETEST_DIR vendor/github.com/google/googletest)
add_subdirectory(${GOOGLETEST_DIR})
link_directories(${CMAKE_BINARY_DIR}/${GOOGLETEST_DIR}/googletest)
set(GOOGLEBENCH_DIR vendor/github.com/google/benchmark)
set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "benchmark tests")
add_subdirectory(${GOOGLEBENCH_DIR})
link_directories(${CMAKE_BINARY_DIR}/${GOOGLEBENCH_DIR})
# Unit tests.
add_subdirectory(test)
# Integration test target - runs against built encfs.
add_custom_target(integration COMMAND ${CMAKE_CURRENT_LIST_DIR}/integration.sh)
endif ()

View File

@ -17,12 +17,17 @@ Steps to build EncFS:
cmake ..
make
Optional, but strongly recommended, is running the test suite
to verify that the generated binaries work as expected
(runtime: 20 seconds)
Optional, but strongly recommended, is running the unit and integration
tests to verify that the generated binaries work as expected. Unit
tests will run almost instantly:
make test
Integration tests will take ~20 seconds to run and will mount an
encrypted filesystem and run tests on it:
make integration
The compilation process creates two executables, encfs and encfsctl in
the encfs directory. You can install to in a system directory via

View File

@ -3,7 +3,6 @@
_Build Status_
- Circle: [![Circle CI](https://circleci.com/gh/vgough/encfs.svg?style=svg)](https://circleci.com/gh/vgough/encfs)
- Travis: [![Travis CI](https://travis-ci.org/vgough/encfs.svg?branch=master)](https://travis-ci.org/vgough/encfs)
- Analysis: [![Coverity](https://scan.coverity.com/projects/10117/badge.svg)](https://scan.coverity.com/projects/vgough-encfs)
## About

View File

@ -7,7 +7,7 @@ if [[ ! -d build ]]
then
mkdir build
cd build
cmake ..
cmake .. $*
cd ..
fi

21
ci/check.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/bash -eu
cmake --version
CFG=""
if [ "$TRAVIS_OS_NAME" = "osx" ]; then
CFG="-DENABLE_NLS=OFF -DOPENSSL_ROOT_DIR=/usr/local/opt/openssl"
fi
if [[ ! -d build ]]
then
mkdir build
fi
cd build
cmake .. ${CFG}
make -j2
make test
cd ..

10
ci/setup.sh Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash -eu
if [ "$TRAVIS_OS_NAME" == "linux" ]; then
sudo modprobe fuse
fi
if [ "$TRAVIS_OS_NAME" == "osx" ]; then
brew cask install osxfuse
fi

View File

@ -10,5 +10,5 @@ dependencies:
test:
override:
- bash ./ci/config.sh
- cd build && make && ./checkops && make install
- cd build && make && make test && make install
- /tmp/encfs/bin/encfsctl --version

View File

@ -260,7 +260,7 @@ int FileNode::sync(bool datasync) {
int fh = io->open(O_RDONLY);
if (fh >= 0) {
int res = -EIO;
#ifdef HAVE_FDATASYNC
#if defined(HAVE_FDATASYNC)
if (datasync) {
res = fdatasync(fh);
} else {

View File

@ -1035,7 +1035,7 @@ RootPtr createV6Config(EncFS_Context *ctx,
alg = findCipherAlgorithm("AES", keySize);
// If case-insensitive system, opt for Block32 filename encoding
#if DEFAULT_CASE_INSENSITIVE
#if defined(DEFAULT_CASE_INSENSITIVE)
nameIOIface = BlockNameIO::CurrentInterface(true);
#else
nameIOIface = BlockNameIO::CurrentInterface();

View File

@ -161,48 +161,48 @@ int TimedPBKDF2(const char *pass, int passlen, const unsigned char *salt,
// - Version 3:0 adds a new IV mechanism
static Interface BlowfishInterface("ssl/blowfish", 3, 0, 2);
static Interface AESInterface("ssl/aes", 3, 0, 2);
static Interface CAMELLIAInterface("ssl/camellia",3, 0, 2);
static Interface CAMELLIAInterface("ssl/camellia", 3, 0, 2);
#ifndef OPENSSL_NO_CAMELLIA
static Range CAMELLIAKeyRange(128, 256, 64);
static Range CAMELLIABlockRange(64, 4096, 16);
static std::shared_ptr<Cipher> NewCAMELLIACipher(const Interface &iface, int keyLen) {
if (keyLen <= 0) keyLen = 192;
static std::shared_ptr<Cipher> NewCAMELLIACipher(const Interface &iface,
int keyLen) {
if (keyLen <= 0) keyLen = 192;
keyLen = CAMELLIAKeyRange.closest(keyLen);
keyLen = CAMELLIAKeyRange.closest(keyLen);
const EVP_CIPHER *blockCipher = 0;
const EVP_CIPHER *streamCipher = 0;
const EVP_CIPHER *blockCipher = 0;
const EVP_CIPHER *streamCipher = 0;
switch (keyLen) {
case 128:
blockCipher = EVP_camellia_128_cbc();
streamCipher = EVP_camellia_128_cfb();
break;
case 192:
blockCipher = EVP_camellia_192_cbc();
streamCipher = EVP_camellia_192_cfb();
break;
case 256:
default:
blockCipher = EVP_camellia_256_cbc();
streamCipher = EVP_camellia_256_cfb();
break;
}
switch (keyLen) {
case 128:
blockCipher = EVP_camellia_128_cbc();
streamCipher = EVP_camellia_128_cfb();
break;
case 192:
blockCipher = EVP_camellia_192_cbc();
streamCipher = EVP_camellia_192_cfb();
break;
case 256:
default:
blockCipher = EVP_camellia_256_cbc();
streamCipher = EVP_camellia_256_cfb();
break;
}
return std::shared_ptr<Cipher>(new SSL_Cipher(
iface, CAMELLIAInterface, blockCipher, streamCipher, keyLen / 8 ));
return std::shared_ptr<Cipher>(new SSL_Cipher(
iface, CAMELLIAInterface, blockCipher, streamCipher, keyLen / 8));
}
static bool CAMELLIA_Cipher_registered =
Cipher::Register("CAMELLIA","16 byte block cipher", CAMELLIAInterface, CAMELLIAKeyRange,
CAMELLIABlockRange, NewCAMELLIACipher );
Cipher::Register("CAMELLIA", "16 byte block cipher", CAMELLIAInterface,
CAMELLIAKeyRange, CAMELLIABlockRange, NewCAMELLIACipher);
#endif
#ifndef OPENSSL_NO_BF
static Range BFKeyRange(128, 256, 32);

View File

@ -75,13 +75,7 @@ static EncFS_Context *context() {
* Optionally takes a pointer to the EncFS_Context, will get it from FUSE
* if the argument is NULL.
*/
static bool isReadOnly(EncFS_Context *ctx) {
if (ctx == nullptr) {
ctx = (EncFS_Context *)fuse_get_context()->private_data;
}
return ctx->opts->readOnly;
}
static bool isReadOnly(EncFS_Context *ctx) { return ctx->opts->readOnly; }
// helper function -- apply a functor to a cipher path, given the plain path
static int withCipherPath(const char *opName, const char *path,
@ -366,7 +360,6 @@ int encfs_mkdir(const char *path, mode_t mode) {
int encfs_unlink(const char *path) {
EncFS_Context *ctx = context();
if (isReadOnly(ctx)) {
return -EROFS;
}
@ -392,7 +385,8 @@ int _do_rmdir(EncFS_Context *, const string &cipherPath) {
}
int encfs_rmdir(const char *path) {
if (isReadOnly(nullptr)) {
EncFS_Context *ctx = context();
if (isReadOnly(ctx)) {
return -EROFS;
}
return withCipherPath("rmdir", path, bind(_do_rmdir, _1, _2));
@ -529,7 +523,8 @@ int _do_chmod(EncFS_Context *, const string &cipherPath, mode_t mode) {
}
int encfs_chmod(const char *path, mode_t mode) {
if (isReadOnly(nullptr)) {
EncFS_Context *ctx = context();
if (isReadOnly(ctx)) {
return -EROFS;
}
return withCipherPath("chmod", path, bind(_do_chmod, _1, _2, mode));
@ -541,7 +536,8 @@ int _do_chown(EncFS_Context *, const string &cyName, uid_t u, gid_t g) {
}
int encfs_chown(const char *path, uid_t uid, gid_t gid) {
if (isReadOnly(nullptr)) {
EncFS_Context *ctx = context();
if (isReadOnly(ctx)) {
return -EROFS;
}
return withCipherPath("chown", path, bind(_do_chown, _1, _2, uid, gid));
@ -550,14 +546,16 @@ int encfs_chown(const char *path, uid_t uid, gid_t gid) {
int _do_truncate(FileNode *fnode, off_t size) { return fnode->truncate(size); }
int encfs_truncate(const char *path, off_t size) {
if (isReadOnly(nullptr)) {
EncFS_Context *ctx = context();
if (isReadOnly(ctx)) {
return -EROFS;
}
return withFileNode("truncate", path, nullptr, bind(_do_truncate, _1, size));
}
int encfs_ftruncate(const char *path, off_t size, struct fuse_file_info *fi) {
if (isReadOnly(nullptr)) {
EncFS_Context *ctx = context();
if (isReadOnly(ctx)) {
return -EROFS;
}
return withFileNode("ftruncate", path, fi, bind(_do_truncate, _1, size));
@ -569,7 +567,8 @@ int _do_utime(EncFS_Context *, const string &cyName, struct utimbuf *buf) {
}
int encfs_utime(const char *path, struct utimbuf *buf) {
if (isReadOnly(nullptr)) {
EncFS_Context *ctx = context();
if (isReadOnly(ctx)) {
return -EROFS;
}
return withCipherPath("utime", path, bind(_do_utime, _1, _2, buf));
@ -592,7 +591,8 @@ int _do_utimens(EncFS_Context *, const string &cyName,
}
int encfs_utimens(const char *path, const struct timespec ts[2]) {
if (isReadOnly(nullptr)) {
EncFS_Context *ctx = context();
if (isReadOnly(ctx)) {
return -EROFS;
}
return withCipherPath("utimens", path, bind(_do_utimens, _1, _2, ts));
@ -701,7 +701,8 @@ int _do_fsync(FileNode *fnode, int dataSync) {
}
int encfs_fsync(const char *path, int dataSync, struct fuse_file_info *file) {
if (isReadOnly(nullptr)) {
EncFS_Context *ctx = context();
if (isReadOnly(ctx)) {
return -EROFS;
}
return withFileNode("fsync", path, file, bind(_do_fsync, _1, dataSync));
@ -717,7 +718,8 @@ int _do_write(FileNode *fnode, unsigned char *ptr, size_t size, off_t offset) {
int encfs_write(const char *path, const char *buf, size_t size, off_t offset,
struct fuse_file_info *file) {
if (isReadOnly(nullptr)) {
EncFS_Context *ctx = context();
if (isReadOnly(ctx)) {
return -EROFS;
}
return withFileNode("write", path, file,
@ -759,7 +761,10 @@ int _do_setxattr(EncFS_Context *, const string &cyName, const char *name,
}
int encfs_setxattr(const char *path, const char *name, const char *value,
size_t size, int flags, uint32_t position) {
if (isReadOnly(NULL)) return -EROFS;
EncFS_Context *ctx = context();
if (isReadOnly(ctx)) {
return -EROFS;
}
(void)flags;
return withCipherPath("setxattr", path, bind(_do_setxattr, _1, _2, name,
value, size, position));
@ -771,7 +776,8 @@ int _do_setxattr(EncFS_Context *, const string &cyName, const char *name,
}
int encfs_setxattr(const char *path, const char *name, const char *value,
size_t size, int flags) {
if (isReadOnly(nullptr)) {
EncFS_Context *ctx = context();
if (isReadOnly(ctx)) {
return -EROFS;
}
return withCipherPath("setxattr", path,
@ -831,7 +837,8 @@ int _do_removexattr(EncFS_Context *, const string &cyName, const char *name) {
}
int encfs_removexattr(const char *path, const char *name) {
if (isReadOnly(nullptr)) {
EncFS_Context *ctx = context();
if (isReadOnly(ctx)) {
return -EROFS;
}

6
integration.sh Executable file
View File

@ -0,0 +1,6 @@
#!/bin/bash -eux
# Make sure we are in the directory this script is in.
cd "$(dirname "$0")"
perl -MTest::Harness -e '$$Test::Harness::debug=1; runtests @ARGV;' integration/*.t.pl

View File

@ -5,7 +5,7 @@
use File::Temp;
use warnings;
require("tests/common.pl");
require("integration/common.pl");
sub mount_encfs_reverse {
my $p = shift;

View File

@ -5,7 +5,7 @@
use File::Temp;
use warnings;
require("tests/common.pl");
require("integration/common.pl");
# Create a new empty working directory
sub newWorkingDir {
@ -58,7 +58,7 @@ sub mount_ecryptfs {
mkdir($c);
mkdir($p);
system("expect -c \"spawn mount -t ecryptfs $c $p\" ./tests/mount-ecryptfs.expect > /dev/null") == 0
system("expect -c \"spawn mount -t ecryptfs $c $p\" ./integration/mount-ecryptfs.expect > /dev/null") == 0
or die("ecryptfs mount failed - are you root?");
print "# ecryptfs mounted on $p\n";

View File

@ -8,7 +8,7 @@ use File::Copy;
use File::Temp;
use IO::Handle;
require("tests/common.pl");
require("integration/common.pl");
my $tempDir = $ENV{'TMPDIR'} || "/tmp";

View File

@ -9,7 +9,7 @@ use File::Temp;
use IO::Handle;
use Errno qw(EROFS);
require("tests/common.pl");
require("integration/common.pl");
my $tempDir = $ENV{'TMPDIR'} || "/tmp";

View File

@ -1,12 +0,0 @@
void CHECK(bool cond) {
if (!cond) {
__coverity_panic__();
}
}
void CHECK_EQ(int l, int r) {
if (l != r) {
__coverity_panic__();
}
}

View File

@ -436,7 +436,7 @@ msgid ""
"\n"
msgstr ""
"Nun wird ein Kennwort für das Dateisystem benötigt.\n"
"Da es keinen Mechanismus zur Wiederhestellung gibt, müssen Sie\n"
"Da es keinen Mechanismus zur Wiederherstellung gibt, müssen Sie\n"
"sich an das Kennwort erinnern! Das Kennwort kann mit encfsctl\n"
"nächträglich geändert werden.\n"
"\n"

10
test.sh
View File

@ -1,10 +0,0 @@
#!/bin/bash -eux
# Make sure we are in the directory this script is in.
cd "$(dirname "$0")"
# This is very noisy so run it silently at first. Run it again with
# output if the first run fails.
./build/checkops &> /dev/null || ./build/checkops
perl -MTest::Harness -e '$$Test::Harness::debug=1; runtests @ARGV;' tests/*.t.pl

8
test/CMakeLists.txt Normal file
View File

@ -0,0 +1,8 @@
file(GLOB_RECURSE TEST_SOURCES "*_test.cpp")
add_executable (unittests ${TEST_SOURCES})
target_link_libraries(unittests gtest gtest_main encfs)
add_test(unit unittests)
file(GLOB_RECURSE BENCH_SOURCES "*_bench.cpp")
add_executable (benchmarks ${BENCH_SOURCES})
target_link_libraries(benchmarks benchmark encfs)

195
test/Cipher_test.cpp Normal file
View File

@ -0,0 +1,195 @@
#include "gtest/gtest.h"
#include "encfs/BlockNameIO.h"
#include "encfs/Cipher.h"
#include "encfs/CipherKey.h"
#include "encfs/DirNode.h"
#include "encfs/FSConfig.h"
#include "encfs/FileUtils.h"
#include "encfs/StreamNameIO.h"
using namespace encfs;
using namespace testing;
using std::string;
const int FSBlockSize = 256;
const char TEST_ROOTDIR[] = "/foo";
static void testNameCoding(DirNode &dirNode) {
// encrypt a name
const char *name[] = {
"1234567", "12345678", "123456789",
"123456789ABCDEF", "123456789ABCDEF0", "123456789ABCDEF01",
"test-name", "test-name2", "test",
"../test", "/foo/bar/blah", "test-name.21",
"test-name.22", "test-name.o", "1.test",
"2.test", "a/b/c/d", "a/c/d/e",
"b/c/d/e", "b/a/c/d", NULL};
const char **orig = name;
while (*orig) {
string encName = dirNode.relativeCipherPath(*orig);
// decrypt name
string decName = dirNode.plainPath(encName.c_str());
ASSERT_EQ(decName, *orig);
orig++;
}
}
class CipherTest : public TestWithParam<Cipher::CipherAlgorithm> {
protected:
virtual void SetUp() {
Cipher::CipherAlgorithm alg = GetParam();
cipher = Cipher::New(alg.name, alg.keyLength.closest(256));
}
virtual void TearDown() {}
std::shared_ptr<Cipher> cipher;
};
TEST_P(CipherTest, SaveRestoreKey) {
auto key = cipher->newRandomKey();
auto encodingKey = cipher->newRandomKey();
int encodedKeySize = cipher->encodedKeySize();
unsigned char keyBuf[encodedKeySize];
cipher->writeKey(key, keyBuf, encodingKey);
auto restored = cipher->readKey(keyBuf, encodingKey);
EXPECT_TRUE(restored);
EXPECT_TRUE(cipher->compareKey(key, restored));
}
TEST_P(CipherTest, NameStreamEncoding) {
auto key = cipher->newRandomKey();
FSConfigPtr fsCfg = FSConfigPtr(new FSConfig);
fsCfg->cipher = cipher;
fsCfg->key = key;
fsCfg->config.reset(new EncFSConfig);
fsCfg->config->blockSize = FSBlockSize;
fsCfg->opts.reset(new EncFS_Opts);
fsCfg->opts->idleTracking = false;
fsCfg->config->uniqueIV = false;
fsCfg->nameCoding.reset(
new StreamNameIO(StreamNameIO::CurrentInterface(), cipher, key));
{
fsCfg->nameCoding->setChainedNameIV(true);
DirNode dirNode(NULL, TEST_ROOTDIR, fsCfg);
testNameCoding(dirNode);
}
{
fsCfg->nameCoding->setChainedNameIV(false);
DirNode dirNode(NULL, TEST_ROOTDIR, fsCfg);
testNameCoding(dirNode);
}
}
TEST_P(CipherTest, NameBlockEncoding) {
auto key = cipher->newRandomKey();
FSConfigPtr fsCfg = FSConfigPtr(new FSConfig);
fsCfg->cipher = cipher;
fsCfg->key = key;
fsCfg->config.reset(new EncFSConfig);
fsCfg->config->blockSize = FSBlockSize;
fsCfg->opts.reset(new EncFS_Opts);
fsCfg->opts->idleTracking = false;
fsCfg->config->uniqueIV = false;
fsCfg->nameCoding.reset(new BlockNameIO(
BlockNameIO::CurrentInterface(), cipher, key, cipher->cipherBlockSize()));
{
fsCfg->nameCoding->setChainedNameIV(true);
DirNode dirNode(NULL, TEST_ROOTDIR, fsCfg);
testNameCoding(dirNode);
}
{
fsCfg->nameCoding->setChainedNameIV(false);
DirNode dirNode(NULL, TEST_ROOTDIR, fsCfg);
testNameCoding(dirNode);
}
}
TEST_P(CipherTest, NameBlockBase32Encoding) {
auto key = cipher->newRandomKey();
FSConfigPtr fsCfg = FSConfigPtr(new FSConfig);
fsCfg->cipher = cipher;
fsCfg->key = key;
fsCfg->config.reset(new EncFSConfig);
fsCfg->config->blockSize = FSBlockSize;
fsCfg->opts.reset(new EncFS_Opts);
fsCfg->opts->idleTracking = false;
fsCfg->config->uniqueIV = false;
fsCfg->nameCoding.reset(new BlockNameIO(BlockNameIO::CurrentInterface(),
cipher, key,
cipher->cipherBlockSize(), true));
{
fsCfg->nameCoding->setChainedNameIV(true);
DirNode dirNode(NULL, TEST_ROOTDIR, fsCfg);
testNameCoding(dirNode);
}
{
fsCfg->nameCoding->setChainedNameIV(false);
DirNode dirNode(NULL, TEST_ROOTDIR, fsCfg);
testNameCoding(dirNode);
}
}
TEST_P(CipherTest, ConfigLoadStore) {
auto key = cipher->newRandomKey();
CipherKey encodingKey = cipher->newRandomKey();
int encodedKeySize = cipher->encodedKeySize();
unsigned char keyBuf[encodedKeySize];
cipher->writeKey(key, keyBuf, encodingKey);
// store in config struct..
EncFSConfig cfg;
cfg.cipherIface = cipher->interface();
cfg.keySize = 8 * cipher->keySize();
cfg.blockSize = FSBlockSize;
cfg.assignKeyData(keyBuf, encodedKeySize);
// save config
// Creation of a temporary file should be more platform independent. On
// c++17 we could use std::filesystem.
std::string name = "/tmp/encfstestXXXXXX";
int tmpFd = mkstemp(&name[0]);
EXPECT_GE(tmpFd, 0);
// mkstemp opens the temporary file, but we only need its name -> close it
EXPECT_EQ(close(tmpFd), 0);
{
auto ok = writeV6Config(name.c_str(), &cfg);
EXPECT_TRUE(ok);
}
// read back in and check everything..
EncFSConfig cfg2;
{
auto ok = readV6Config(name.c_str(), &cfg2, nullptr);
EXPECT_TRUE(ok);
}
// delete the temporary file where we stored the config
EXPECT_EQ(unlink(name.c_str()), 0);
// check..
EXPECT_TRUE(cfg.cipherIface.implements(cfg2.cipherIface));
EXPECT_EQ(cfg.keySize, cfg2.keySize);
EXPECT_EQ(cfg.blockSize, cfg2.blockSize);
// try decoding key..
CipherKey key2 = cipher->readKey(cfg2.getKeyData(), encodingKey);
EXPECT_TRUE(key2);
EXPECT_TRUE(cipher->compareKey(key, key2));
}
INSTANTIATE_TEST_CASE_P(CipherKey, CipherTest,
ValuesIn(Cipher::GetAlgorithmList()));

14
test/MemoryPool_bench.cpp Normal file
View File

@ -0,0 +1,14 @@
#include "benchmark/benchmark.h"
#include "encfs/MemoryPool.h"
using namespace encfs;
static void BM_MemPoolAllocate(benchmark::State& state) {
while (state.KeepRunning()) {
auto block = MemoryPool::allocate(1024);
MemoryPool::release(block);
}
}
// Register the function as a benchmark
BENCHMARK(BM_MemPoolAllocate);

12
test/MemoryPool_test.cpp Normal file
View File

@ -0,0 +1,12 @@
#include "gtest/gtest.h"
#include "encfs/MemoryPool.h"
using namespace encfs;
TEST(MemoryPool, Allocate) {
auto block = MemoryPool::allocate(1024);
ASSERT_TRUE(block.data != nullptr);
ASSERT_TRUE(block.internalData != nullptr);
MemoryPool::release(block);
}

3
test/main_bench.cpp Normal file
View File

@ -0,0 +1,3 @@
#include "benchmark/benchmark.h"
BENCHMARK_MAIN();

5
vendor/github.com/google/benchmark/.clang-format generated vendored Normal file
View File

@ -0,0 +1,5 @@
---
Language: Cpp
BasedOnStyle: Google
...

46
vendor/github.com/google/benchmark/.gitignore generated vendored Normal file
View File

@ -0,0 +1,46 @@
*.a
*.so
*.so.?*
*.dll
*.exe
*.dylib
*.cmake
!/cmake/*.cmake
*~
*.pyc
__pycache__
# lcov
*.lcov
/lcov
# cmake files.
/Testing
CMakeCache.txt
CMakeFiles/
cmake_install.cmake
# makefiles.
Makefile
# in-source build.
bin/
lib/
/test/*_test
# exuberant ctags.
tags
# YouCompleteMe configuration.
.ycm_extra_conf.pyc
# ninja generated files.
.ninja_deps
.ninja_log
build.ninja
install_manifest.txt
rules.ninja
# out-of-source build top-level folders.
build/
_build/

View File

@ -0,0 +1,28 @@
#!/usr/bin/env bash
# Install a newer CMake version
curl -sSL https://cmake.org/files/v3.6/cmake-3.6.1-Linux-x86_64.sh -o install-cmake.sh
chmod +x install-cmake.sh
sudo ./install-cmake.sh --prefix=/usr/local --skip-license
# Checkout LLVM sources
git clone --depth=1 https://github.com/llvm-mirror/llvm.git llvm-source
git clone --depth=1 https://github.com/llvm-mirror/libcxx.git llvm-source/projects/libcxx
git clone --depth=1 https://github.com/llvm-mirror/libcxxabi.git llvm-source/projects/libcxxabi
# Setup libc++ options
if [ -z "$BUILD_32_BITS" ]; then
export BUILD_32_BITS=OFF && echo disabling 32 bit build
fi
# Build and install libc++ (Use unstable ABI for better sanitizer coverage)
mkdir llvm-build && cd llvm-build
cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} \
-DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr \
-DLIBCXX_ABI_UNSTABLE=ON \
-DLLVM_USE_SANITIZER=${LIBCXX_SANITIZER} \
-DLLVM_BUILD_32_BITS=${BUILD_32_BITS} \
../llvm-source
make cxx -j2
sudo make install-cxxabi install-cxx
cd ../

157
vendor/github.com/google/benchmark/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,157 @@
sudo: required
dist: trusty
language: cpp
env:
global:
- /usr/local/bin:$PATH
matrix:
include:
- compiler: gcc
addons:
apt:
packages:
- lcov
env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Coverage
- compiler: gcc
env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug
- compiler: gcc
env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release
- compiler: gcc
addons:
apt:
packages:
- g++-multilib
env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug BUILD_32_BITS=ON
- compiler: gcc
addons:
apt:
packages:
- g++-multilib
env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release BUILD_32_BITS=ON
- compiler: gcc
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- g++-6
env:
- COMPILER=g++-6 C_COMPILER=gcc-6 BUILD_TYPE=Debug
- EXTRA_FLAGS="-fno-omit-frame-pointer -g -O2 -fsanitize=undefined,address -fuse-ld=gold"
- compiler: clang
env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Debug
- compiler: clang
env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Release
# Clang w/ libc++
- compiler: clang
addons:
apt:
packages:
clang-3.8
env:
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
- LIBCXX_BUILD=1
- EXTRA_FLAGS="-stdlib=libc++"
- compiler: clang
addons:
apt:
packages:
clang-3.8
env:
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release
- LIBCXX_BUILD=1
- EXTRA_FLAGS="-stdlib=libc++"
# Clang w/ 32bit libc++
- compiler: clang
addons:
apt:
packages:
- clang-3.8
- g++-multilib
env:
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
- LIBCXX_BUILD=1
- BUILD_32_BITS=ON
- EXTRA_FLAGS="-stdlib=libc++ -m32"
# Clang w/ 32bit libc++
- compiler: clang
addons:
apt:
packages:
- clang-3.8
- g++-multilib
env:
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release
- LIBCXX_BUILD=1
- BUILD_32_BITS=ON
- EXTRA_FLAGS="-stdlib=libc++ -m32"
# Clang w/ libc++, ASAN, UBSAN
- compiler: clang
addons:
apt:
packages:
clang-3.8
env:
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
- LIBCXX_BUILD=1 LIBCXX_SANITIZER="Undefined;Address"
- EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=undefined,address -fno-sanitize-recover=all"
- UBSAN_OPTIONS=print_stacktrace=1
# Clang w/ libc++ and MSAN
- compiler: clang
addons:
apt:
packages:
clang-3.8
env:
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
- LIBCXX_BUILD=1 LIBCXX_SANITIZER=MemoryWithOrigins
- EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins"
# Clang w/ libc++ and MSAN
- compiler: clang
addons:
apt:
packages:
clang-3.8
env:
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=RelWithDebInfo
- LIBCXX_BUILD=1 LIBCXX_SANITIZER=Thread
- EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all"
- os: osx
osx_image: xcode8.3
compiler: clang
env:
- COMPILER=clang++ BUILD_TYPE=Debug
- os: osx
osx_image: xcode8.3
compiler: clang
env:
- COMPILER=clang++ BUILD_TYPE=Release
before_script:
- if [ -z "$BUILD_32_BITS" ]; then
export BUILD_32_BITS=OFF && echo disabling 32 bit build;
fi
- if [ -n "${LIBCXX_BUILD}" ]; then
source .travis-libcxx-setup.sh;
fi
- mkdir build && cd build
install:
- if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then
PATH=~/.local/bin:${PATH};
pip install --user --upgrade pip;
pip install --user cpp-coveralls;
fi
script:
- cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_CXX_FLAGS="${EXTRA_FLAGS}" -DBENCHMARK_BUILD_32_BITS=${BUILD_32_BITS} ..
- make
- ctest -C ${BUILD_TYPE} --output-on-failure
after_success:
- if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then
coveralls --include src --include include --gcov-options '\-lp' --root .. --build-root .;
fi

115
vendor/github.com/google/benchmark/.ycm_extra_conf.py generated vendored Normal file
View File

@ -0,0 +1,115 @@
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Werror',
'-pendantic-errors',
'-std=c++0x',
'-fno-strict-aliasing',
'-O3',
'-DNDEBUG',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x', 'c++',
'-I', 'include',
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cc' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}

40
vendor/github.com/google/benchmark/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,40 @@
# This is the official list of benchmark authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
#
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
#
# Please keep the list sorted.
Albert Pretorius <pretoalb@gmail.com>
Arne Beer <arne@twobeer.de>
Christopher Seymour <chris.j.seymour@hotmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
Dominic Hamon <dma@stripysock.com>
Eric Fiselier <eric@efcs.ca>
Eugene Zhuk <eugene.zhuk@gmail.com>
Evgeny Safronov <division494@gmail.com>
Felix Homann <linuxaudio@showlabor.de>
Google Inc.
International Business Machines Corporation
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
Jern-Kuan Leong <jernkuan@gmail.com>
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
JianXiong Zhou <zhoujianxiong2@gmail.com>
Jussi Knuuttila <jussi.knuuttila@gmail.com>
Kaito Udagawa <umireon@gmail.com>
Lei Xu <eddyxu@gmail.com>
Matt Clarkson <mattyclarkson@gmail.com>
Maxim Vafin <maxvafin@gmail.com>
Nick Hutchinson <nshutchinson@gmail.com>
Oleksandr Sochka <sasha.sochka@gmail.com>
Paul Redmond <paul.redmond@gmail.com>
Radoslav Yovchev <radoslav.tm@gmail.com>
Shuo Chen <chenshuo@chenshuo.com>
Yixuan Qiu <yixuanq@gmail.com>
Yusuke Suzuki <utatane.tea@gmail.com>
Dirac Research
Zbigniew Skowron <zbychs@gmail.com>
Dominik Czarnota <dominik.b.czarnota@gmail.com>

202
vendor/github.com/google/benchmark/CMakeLists.txt generated vendored Normal file
View File

@ -0,0 +1,202 @@
cmake_minimum_required (VERSION 2.8.12)
project (benchmark)
foreach(p
CMP0054 # CMake 3.1
CMP0056 # export EXE_LINKER_FLAGS to try_run
)
if(POLICY ${p})
cmake_policy(SET ${p} NEW)
endif()
endforeach()
option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON)
option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON)
option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF)
option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF)
option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library" OFF)
# Make sure we can import out CMake functions
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
# Read the git tags to determine the project version
include(GetGitVersion)
get_git_version(GIT_VERSION)
# Tell the user what versions we are using
string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]+" VERSION ${GIT_VERSION})
message("-- Version: ${VERSION}")
# The version of the libraries
set(GENERIC_LIB_VERSION ${VERSION})
string(SUBSTRING ${VERSION} 0 1 GENERIC_LIB_SOVERSION)
# Import our CMake modules
include(CheckCXXCompilerFlag)
include(AddCXXCompilerFlag)
include(CXXFeatureCheck)
if (BENCHMARK_BUILD_32_BITS)
add_required_cxx_compiler_flag(-m32)
endif()
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
# Turn compiler warnings up to 11
string(REGEX REPLACE "[-/]W[1-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4")
add_definitions(-D_CRT_SECURE_NO_WARNINGS)
if (NOT BENCHMARK_ENABLE_EXCEPTIONS)
add_cxx_compiler_flag(-EHs-)
add_cxx_compiler_flag(-EHa-)
endif()
# Link time optimisation
if (BENCHMARK_ENABLE_LTO)
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GL")
set(CMAKE_STATIC_LINKER_FLAGS_RELEASE "${CMAKE_STATIC_LINKER_FLAGS_RELEASE} /LTCG")
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG")
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /GL")
string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO}")
set(CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO}")
set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO}")
set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /GL")
set(CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL "${CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL} /LTCG")
set(CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL "${CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL} /LTCG")
set(CMAKE_EXE_LINKER_FLAGS_MINSIZEREL "${CMAKE_EXE_LINKER_FLAGS_MINSIZEREL} /LTCG")
endif()
else()
# Try and enable C++11. Don't use C++14 because it doesn't work in some
# configurations.
add_cxx_compiler_flag(-std=c++11)
if (NOT HAVE_CXX_FLAG_STD_CXX11)
add_cxx_compiler_flag(-std=c++0x)
endif()
# Turn compiler warnings up to 11
add_cxx_compiler_flag(-Wall)
add_cxx_compiler_flag(-Wextra)
add_cxx_compiler_flag(-Wshadow)
add_cxx_compiler_flag(-Werror RELEASE)
add_cxx_compiler_flag(-Werror RELWITHDEBINFO)
add_cxx_compiler_flag(-Werror MINSIZEREL)
add_cxx_compiler_flag(-pedantic)
add_cxx_compiler_flag(-pedantic-errors)
add_cxx_compiler_flag(-Wshorten-64-to-32)
add_cxx_compiler_flag(-Wfloat-equal)
add_cxx_compiler_flag(-fstrict-aliasing)
if (NOT BENCHMARK_ENABLE_EXCEPTIONS)
add_cxx_compiler_flag(-fno-exceptions)
endif()
if (NOT BENCHMARK_USE_LIBCXX)
add_cxx_compiler_flag(-Wzero-as-null-pointer-constant)
endif()
if (HAVE_CXX_FLAG_FSTRICT_ALIASING)
if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "Intel") #ICC17u2: Many false positives for Wstrict-aliasing
add_cxx_compiler_flag(-Wstrict-aliasing)
endif()
endif()
# ICC17u2: overloaded virtual function "benchmark::Fixture::SetUp" is only partially overridden
# (because of deprecated overload)
add_cxx_compiler_flag(-wd654)
add_cxx_compiler_flag(-Wthread-safety)
if (HAVE_CXX_FLAG_WTHREAD_SAFETY)
cxx_feature_check(THREAD_SAFETY_ATTRIBUTES)
endif()
# On most UNIX like platforms g++ and clang++ define _GNU_SOURCE as a
# predefined macro, which turns on all of the wonderful libc extensions.
# However g++ doesn't do this in Cygwin so we have to define it ourselfs
# since we depend on GNU/POSIX/BSD extensions.
if (CYGWIN)
add_definitions(-D_GNU_SOURCE=1)
endif()
# Link time optimisation
if (BENCHMARK_ENABLE_LTO)
add_cxx_compiler_flag(-flto)
if ("${CMAKE_C_COMPILER_ID}" STREQUAL "GNU")
find_program(GCC_AR gcc-ar)
if (GCC_AR)
set(CMAKE_AR ${GCC_AR})
endif()
find_program(GCC_RANLIB gcc-ranlib)
if (GCC_RANLIB)
set(CMAKE_RANLIB ${GCC_RANLIB})
endif()
endif()
endif()
# Coverage build type
set(CMAKE_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG}" CACHE STRING
"Flags used by the C++ compiler during coverage builds."
FORCE)
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE
"${CMAKE_EXE_LINKER_FLAGS_DEBUG}" CACHE STRING
"Flags used for linking binaries during coverage builds."
FORCE)
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE
"${CMAKE_SHARED_LINKER_FLAGS_DEBUG}" CACHE STRING
"Flags used by the shared libraries linker during coverage builds."
FORCE)
mark_as_advanced(
CMAKE_CXX_FLAGS_COVERAGE
CMAKE_EXE_LINKER_FLAGS_COVERAGE
CMAKE_SHARED_LINKER_FLAGS_COVERAGE)
set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING
"Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage."
FORCE)
add_cxx_compiler_flag(--coverage COVERAGE)
endif()
if (BENCHMARK_USE_LIBCXX)
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
add_cxx_compiler_flag(-stdlib=libc++)
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR
"${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
add_cxx_compiler_flag(-nostdinc++)
message("libc++ header path must be manually specified using CMAKE_CXX_FLAGS")
# Adding -nodefaultlibs directly to CMAKE_<TYPE>_LINKER_FLAGS will break
# configuration checks such as 'find_package(Threads)'
list(APPEND BENCHMARK_CXX_LINKER_FLAGS -nodefaultlibs)
# -lc++ cannot be added directly to CMAKE_<TYPE>_LINKER_FLAGS because
# linker flags appear before all linker inputs and -lc++ must appear after.
list(APPEND BENCHMARK_CXX_LIBRARIES c++)
else()
message(FATAL "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler")
endif()
endif(BENCHMARK_USE_LIBCXX)
# C++ feature checks
# Determine the correct regular expression engine to use
cxx_feature_check(STD_REGEX)
cxx_feature_check(GNU_POSIX_REGEX)
cxx_feature_check(POSIX_REGEX)
if(NOT HAVE_STD_REGEX AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX)
message(FATAL_ERROR "Failed to determine the source files for the regular expression backend")
endif()
if (NOT BENCHMARK_ENABLE_EXCEPTIONS AND HAVE_STD_REGEX
AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX)
message(WARNING "Using std::regex with exceptions disabled is not fully supported")
endif()
cxx_feature_check(STEADY_CLOCK)
# Ensure we have pthreads
find_package(Threads REQUIRED)
# Set up directories
include_directories(${PROJECT_SOURCE_DIR}/include)
# Build the targets
add_subdirectory(src)
if (BENCHMARK_ENABLE_TESTING)
enable_testing()
add_subdirectory(test)
endif()

58
vendor/github.com/google/benchmark/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,58 @@
# How to contribute #
We'd love to accept your patches and contributions to this project. There are
a just a few small guidelines you need to follow.
## Contributor License Agreement ##
Contributions to any Google project must be accompanied by a Contributor
License Agreement. This is not a copyright **assignment**, it simply gives
Google permission to use and redistribute your contributions as part of the
project.
* If you are an individual writing original source code and you're sure you
own the intellectual property, then you'll need to sign an [individual
CLA][].
* If you work for a company that wants to allow you to contribute your work,
then you'll need to sign a [corporate CLA][].
You generally only need to submit a CLA once, so if you've already submitted
one (even if it was for a different project), you probably don't need to do it
again.
[individual CLA]: https://developers.google.com/open-source/cla/individual
[corporate CLA]: https://developers.google.com/open-source/cla/corporate
Once your CLA is submitted (or if you already submitted one for
another Google project), make a commit adding yourself to the
[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part
of your first [pull request][].
[AUTHORS]: AUTHORS
[CONTRIBUTORS]: CONTRIBUTORS
## Submitting a patch ##
1. It's generally best to start by opening a new issue describing the bug or
feature you're intending to fix. Even if you think it's relatively minor,
it's helpful to know what people are working on. Mention in the initial
issue that you are planning to work on that bug or feature so that it can
be assigned to you.
1. Follow the normal process of [forking][] the project, and setup a new
branch to work in. It's important that each group of changes be done in
separate branches in order to ensure that a pull request only includes the
commits related to that bug or feature.
1. Do your best to have [well-formed commit messages][] for each change.
This provides consistency throughout the project, and ensures that commit
messages are able to be formatted properly by various git tools.
1. Finally, push the commits to your fork and submit a [pull request][].
[forking]: https://help.github.com/articles/fork-a-repo
[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
[pull request]: https://help.github.com/articles/creating-a-pull-request

59
vendor/github.com/google/benchmark/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,59 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# The agreement for individuals can be filled out on the web.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
#
# Names should be added to this file as:
# Name <email address>
#
# Please keep the list sorted.
Albert Pretorius <pretoalb@gmail.com>
Arne Beer <arne@twobeer.de>
Billy Robert O'Neal III <billy.oneal@gmail.com> <bion@microsoft.com>
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
Christopher Seymour <chris.j.seymour@hotmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
Dominic Hamon <dma@stripysock.com>
Eric Fiselier <eric@efcs.ca>
Eugene Zhuk <eugene.zhuk@gmail.com>
Evgeny Safronov <division494@gmail.com>
Felix Homann <linuxaudio@showlabor.de>
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
Jern-Kuan Leong <jernkuan@gmail.com>
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
JianXiong Zhou <zhoujianxiong2@gmail.com>
Jussi Knuuttila <jussi.knuuttila@gmail.com>
Kaito Udagawa <umireon@gmail.com>
Kai Wolf <kai.wolf@gmail.com>
Lei Xu <eddyxu@gmail.com>
Matt Clarkson <mattyclarkson@gmail.com>
Maxim Vafin <maxvafin@gmail.com>
Nick Hutchinson <nshutchinson@gmail.com>
Oleksandr Sochka <sasha.sochka@gmail.com>
Pascal Leroy <phl@google.com>
Paul Redmond <paul.redmond@gmail.com>
Pierre Phaneuf <pphaneuf@google.com>
Radoslav Yovchev <radoslav.tm@gmail.com>
Ray Glover <ray.glover@uk.ibm.com>
Shuo Chen <chenshuo@chenshuo.com>
Tom Madams <tom.ej.madams@gmail.com> <tmadams@google.com>
Yixuan Qiu <yixuanq@gmail.com>
Yusuke Suzuki <utatane.tea@gmail.com>
Tobias Ulvgård <tobias.ulvgard@dirac.se>
Zbigniew Skowron <zbychs@gmail.com>
Dominik Czarnota <dominik.b.czarnota@gmail.com>

202
vendor/github.com/google/benchmark/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

726
vendor/github.com/google/benchmark/README.md generated vendored Normal file
View File

@ -0,0 +1,726 @@
# benchmark
[![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark)
[![Build status](https://ci.appveyor.com/api/projects/status/u0qsyp7t1tk7cpxs/branch/master?svg=true)](https://ci.appveyor.com/project/google/benchmark/branch/master)
[![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark)
A library to support the benchmarking of functions, similar to unit-tests.
Discussion group: https://groups.google.com/d/forum/benchmark-discuss
IRC channel: https://freenode.net #googlebenchmark
[Known issues and common problems](#known-issues)
[Additional Tooling Documentation](docs/tools.md)
## Example usage
### Basic usage
Define a function that executes the code to be measured.
```c++
static void BM_StringCreation(benchmark::State& state) {
while (state.KeepRunning())
std::string empty_string;
}
// Register the function as a benchmark
BENCHMARK(BM_StringCreation);
// Define another benchmark
static void BM_StringCopy(benchmark::State& state) {
std::string x = "hello";
while (state.KeepRunning())
std::string copy(x);
}
BENCHMARK(BM_StringCopy);
BENCHMARK_MAIN();
```
### Passing arguments
Sometimes a family of benchmarks can be implemented with just one routine that
takes an extra argument to specify which one of the family of benchmarks to
run. For example, the following code defines a family of benchmarks for
measuring the speed of `memcpy()` calls of different lengths:
```c++
static void BM_memcpy(benchmark::State& state) {
char* src = new char[state.range(0)];
char* dst = new char[state.range(0)];
memset(src, 'x', state.range(0));
while (state.KeepRunning())
memcpy(dst, src, state.range(0));
state.SetBytesProcessed(int64_t(state.iterations()) *
int64_t(state.range(0)));
delete[] src;
delete[] dst;
}
BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10);
```
The preceding code is quite repetitive, and can be replaced with the following
short-hand. The following invocation will pick a few appropriate arguments in
the specified range and will generate a benchmark for each such argument.
```c++
BENCHMARK(BM_memcpy)->Range(8, 8<<10);
```
By default the arguments in the range are generated in multiples of eight and
the command above selects [ 8, 64, 512, 4k, 8k ]. In the following code the
range multiplier is changed to multiples of two.
```c++
BENCHMARK(BM_memcpy)->RangeMultiplier(2)->Range(8, 8<<10);
```
Now arguments generated are [ 8, 16, 32, 64, 128, 256, 512, 1024, 2k, 4k, 8k ].
You might have a benchmark that depends on two or more inputs. For example, the
following code defines a family of benchmarks for measuring the speed of set
insertion.
```c++
static void BM_SetInsert(benchmark::State& state) {
while (state.KeepRunning()) {
state.PauseTiming();
std::set<int> data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
for (int j = 0; j < state.range(1); ++j)
data.insert(RandomNumber());
}
}
BENCHMARK(BM_SetInsert)
->Args({1<<10, 1})
->Args({1<<10, 8})
->Args({1<<10, 64})
->Args({1<<10, 512})
->Args({8<<10, 1})
->Args({8<<10, 8})
->Args({8<<10, 64})
->Args({8<<10, 512});
```
The preceding code is quite repetitive, and can be replaced with the following
short-hand. The following macro will pick a few appropriate arguments in the
product of the two specified ranges and will generate a benchmark for each such
pair.
```c++
BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {1, 512}});
```
For more complex patterns of inputs, passing a custom function to `Apply` allows
programmatic specification of an arbitrary set of arguments on which to run the
benchmark. The following example enumerates a dense range on one parameter,
and a sparse range on the second.
```c++
static void CustomArguments(benchmark::internal::Benchmark* b) {
for (int i = 0; i <= 10; ++i)
for (int j = 32; j <= 1024*1024; j *= 8)
b->Args({i, j});
}
BENCHMARK(BM_SetInsert)->Apply(CustomArguments);
```
### Calculate asymptotic complexity (Big O)
Asymptotic complexity might be calculated for a family of benchmarks. The
following code will calculate the coefficient for the high-order term in the
running time and the normalized root-mean square error of string comparison.
```c++
static void BM_StringCompare(benchmark::State& state) {
std::string s1(state.range(0), '-');
std::string s2(state.range(0), '-');
while (state.KeepRunning()) {
benchmark::DoNotOptimize(s1.compare(s2));
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_StringCompare)
->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::oN);
```
As shown in the following invocation, asymptotic complexity might also be
calculated automatically.
```c++
BENCHMARK(BM_StringCompare)
->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity();
```
The following code will specify asymptotic complexity with a lambda function,
that might be used to customize high-order term calculation.
```c++
BENCHMARK(BM_StringCompare)->RangeMultiplier(2)
->Range(1<<10, 1<<18)->Complexity([](int n)->double{return n; });
```
### Templated benchmarks
Templated benchmarks work the same way: This example produces and consumes
messages of size `sizeof(v)` `range_x` times. It also outputs throughput in the
absence of multiprogramming.
```c++
template <class Q> int BM_Sequential(benchmark::State& state) {
Q q;
typename Q::value_type v;
while (state.KeepRunning()) {
for (int i = state.range(0); i--; )
q.push(v);
for (int e = state.range(0); e--; )
q.Wait(&v);
}
// actually messages, not bytes:
state.SetBytesProcessed(
static_cast<int64_t>(state.iterations())*state.range(0));
}
BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue<int>)->Range(1<<0, 1<<10);
```
Three macros are provided for adding benchmark templates.
```c++
#if __cplusplus >= 201103L // C++11 and greater.
#define BENCHMARK_TEMPLATE(func, ...) // Takes any number of parameters.
#else // C++ < C++11
#define BENCHMARK_TEMPLATE(func, arg1)
#endif
#define BENCHMARK_TEMPLATE1(func, arg1)
#define BENCHMARK_TEMPLATE2(func, arg1, arg2)
```
## Passing arbitrary arguments to a benchmark
In C++11 it is possible to define a benchmark that takes an arbitrary number
of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)`
macro creates a benchmark that invokes `func` with the `benchmark::State` as
the first argument followed by the specified `args...`.
The `test_case_name` is appended to the name of the benchmark and
should describe the values passed.
```c++
template <class ...ExtraArgs>`
void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) {
[...]
}
// Registers a benchmark named "BM_takes_args/int_string_test` that passes
// the specified values to `extra_args`.
BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc"));
```
Note that elements of `...args` may refer to global variables. Users should
avoid modifying global state inside of a benchmark.
## Using RegisterBenchmark(name, fn, args...)
The `RegisterBenchmark(name, func, args...)` function provides an alternative
way to create and register benchmarks.
`RegisterBenchmark(name, func, args...)` creates, registers, and returns a
pointer to a new benchmark with the specified `name` that invokes
`func(st, args...)` where `st` is a `benchmark::State` object.
Unlike the `BENCHMARK` registration macros, which can only be used at the global
scope, the `RegisterBenchmark` can be called anywhere. This allows for
benchmark tests to be registered programmatically.
Additionally `RegisterBenchmark` allows any callable object to be registered
as a benchmark. Including capturing lambdas and function objects. This
allows the creation
For Example:
```c++
auto BM_test = [](benchmark::State& st, auto Inputs) { /* ... */ };
int main(int argc, char** argv) {
for (auto& test_input : { /* ... */ })
benchmark::RegisterBenchmark(test_input.name(), BM_test, test_input);
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
}
```
### Multithreaded benchmarks
In a multithreaded test (benchmark invoked by multiple threads simultaneously),
it is guaranteed that none of the threads will start until all have called
`KeepRunning`, and all will have finished before KeepRunning returns false. As
such, any global setup or teardown can be wrapped in a check against the thread
index:
```c++
static void BM_MultiThreaded(benchmark::State& state) {
if (state.thread_index == 0) {
// Setup code here.
}
while (state.KeepRunning()) {
// Run the test as normal.
}
if (state.thread_index == 0) {
// Teardown code here.
}
}
BENCHMARK(BM_MultiThreaded)->Threads(2);
```
If the benchmarked code itself uses threads and you want to compare it to
single-threaded code, you may want to use real-time ("wallclock") measurements
for latency comparisons:
```c++
BENCHMARK(BM_test)->Range(8, 8<<10)->UseRealTime();
```
Without `UseRealTime`, CPU time is used by default.
## Manual timing
For benchmarking something for which neither CPU time nor real-time are
correct or accurate enough, completely manual timing is supported using
the `UseManualTime` function.
When `UseManualTime` is used, the benchmarked code must call
`SetIterationTime` once per iteration of the `KeepRunning` loop to
report the manually measured time.
An example use case for this is benchmarking GPU execution (e.g. OpenCL
or CUDA kernels, OpenGL or Vulkan or Direct3D draw calls), which cannot
be accurately measured using CPU time or real-time. Instead, they can be
measured accurately using a dedicated API, and these measurement results
can be reported back with `SetIterationTime`.
```c++
static void BM_ManualTiming(benchmark::State& state) {
int microseconds = state.range(0);
std::chrono::duration<double, std::micro> sleep_duration {
static_cast<double>(microseconds)
};
while (state.KeepRunning()) {
auto start = std::chrono::high_resolution_clock::now();
// Simulate some useful workload with a sleep
std::this_thread::sleep_for(sleep_duration);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
}
BENCHMARK(BM_ManualTiming)->Range(1, 1<<17)->UseManualTime();
```
### Preventing optimisation
To prevent a value or expression from being optimized away by the compiler
the `benchmark::DoNotOptimize(...)` and `benchmark::ClobberMemory()`
functions can be used.
```c++
static void BM_test(benchmark::State& state) {
while (state.KeepRunning()) {
int x = 0;
for (int i=0; i < 64; ++i) {
benchmark::DoNotOptimize(x += i);
}
}
}
```
`DoNotOptimize(<expr>)` forces the *result* of `<expr>` to be stored in either
memory or a register. For GNU based compilers it acts as read/write barrier
for global memory. More specifically it forces the compiler to flush pending
writes to memory and reload any other values as necessary.
Note that `DoNotOptimize(<expr>)` does not prevent optimizations on `<expr>`
in any way. `<expr>` may even be removed entirely when the result is already
known. For example:
```c++
/* Example 1: `<expr>` is removed entirely. */
int foo(int x) { return x + 42; }
while (...) DoNotOptimize(foo(0)); // Optimized to DoNotOptimize(42);
/* Example 2: Result of '<expr>' is only reused */
int bar(int) __attribute__((const));
while (...) DoNotOptimize(bar(0)); // Optimized to:
// int __result__ = bar(0);
// while (...) DoNotOptimize(__result__);
```
The second tool for preventing optimizations is `ClobberMemory()`. In essence
`ClobberMemory()` forces the compiler to perform all pending writes to global
memory. Memory managed by block scope objects must be "escaped" using
`DoNotOptimize(...)` before it can be clobbered. In the below example
`ClobberMemory()` prevents the call to `v.push_back(42)` from being optimized
away.
```c++
static void BM_vector_push_back(benchmark::State& state) {
while (state.KeepRunning()) {
std::vector<int> v;
v.reserve(1);
benchmark::DoNotOptimize(v.data()); // Allow v.data() to be clobbered.
v.push_back(42);
benchmark::ClobberMemory(); // Force 42 to be written to memory.
}
}
```
Note that `ClobberMemory()` is only available for GNU or MSVC based compilers.
### Set time unit manually
If a benchmark runs a few milliseconds it may be hard to visually compare the
measured times, since the output data is given in nanoseconds per default. In
order to manually set the time unit, you can specify it manually:
```c++
BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
```
## Controlling number of iterations
In all cases, the number of iterations for which the benchmark is run is
governed by the amount of time the benchmark takes. Concretely, the number of
iterations is at least one, not more than 1e9, until CPU time is greater than
the minimum time, or the wallclock time is 5x minimum time. The minimum time is
set as a flag `--benchmark_min_time` or per benchmark by calling `MinTime` on
the registered benchmark object.
## Reporting the mean and standard devation by repeated benchmarks
By default each benchmark is run once and that single result is reported.
However benchmarks are often noisy and a single result may not be representative
of the overall behavior. For this reason it's possible to repeatedly rerun the
benchmark.
The number of runs of each benchmark is specified globally by the
`--benchmark_repetitions` flag or on a per benchmark basis by calling
`Repetitions` on the registered benchmark object. When a benchmark is run
more than once the mean and standard deviation of the runs will be reported.
Additionally the `--benchmark_report_aggregates_only={true|false}` flag or
`ReportAggregatesOnly(bool)` function can be used to change how repeated tests
are reported. By default the result of each repeated run is reported. When this
option is 'true' only the mean and standard deviation of the runs is reported.
Calling `ReportAggregatesOnly(bool)` on a registered benchmark object overrides
the value of the flag for that benchmark.
## Fixtures
Fixture tests are created by
first defining a type that derives from ::benchmark::Fixture and then
creating/registering the tests using the following macros:
* `BENCHMARK_F(ClassName, Method)`
* `BENCHMARK_DEFINE_F(ClassName, Method)`
* `BENCHMARK_REGISTER_F(ClassName, Method)`
For Example:
```c++
class MyFixture : public benchmark::Fixture {};
BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) {
while (st.KeepRunning()) {
...
}
}
BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) {
while (st.KeepRunning()) {
...
}
}
/* BarTest is NOT registered */
BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2);
/* BarTest is now registered */
```
## User-defined counters
You can add your own counters with user-defined names. The example below
will add columns "Foo", "Bar" and "Baz" in its output:
```c++
static void UserCountersExample1(benchmark::State& state) {
double numFoos = 0, numBars = 0, numBazs = 0;
while (state.KeepRunning()) {
// ... count Foo,Bar,Baz events
}
state.counters["Foo"] = numFoos;
state.counters["Bar"] = numBars;
state.counters["Baz"] = numBazs;
}
```
The `state.counters` object is a `std::map` with `std::string` keys
and `Counter` values. The latter is a `double`-like class, via an implicit
conversion to `double&`. Thus you can use all of the standard arithmetic
assignment operators (`=,+=,-=,*=,/=`) to change the value of each counter.
In multithreaded benchmarks, each counter is set on the calling thread only.
When the benchmark finishes, the counters from each thread will be summed;
the resulting sum is the value which will be shown for the benchmark.
The `Counter` constructor accepts two parameters: the value as a `double`
and a bit flag which allows you to show counters as rates and/or as
per-thread averages:
```c++
// sets a simple counter
state.counters["Foo"] = numFoos;
// Set the counter as a rate. It will be presented divided
// by the duration of the benchmark.
state.counters["FooRate"] = Counter(numFoos, benchmark::Counter::kIsRate);
// Set the counter as a thread-average quantity. It will
// be presented divided by the number of threads.
state.counters["FooAvg"] = Counter(numFoos, benchmark::Counter::kAvgThreads);
// There's also a combined flag:
state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate);
```
When you're compiling in C++11 mode or later you can use `insert()` with
`std::initializer_list`:
```c++
// With C++11, this can be done:
state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}});
// ... instead of:
state.counters["Foo"] = numFoos;
state.counters["Bar"] = numBars;
state.counters["Baz"] = numBazs;
```
### Counter reporting
When using the console reporter, by default, user counters are are printed at
the end after the table, the same way as ``bytes_processed`` and
``items_processed``. This is best for cases in which there are few counters,
or where there are only a couple of lines per benchmark. Here's an example of
the default output:
```
------------------------------------------------------------------------------
Benchmark Time CPU Iterations UserCounters...
------------------------------------------------------------------------------
BM_UserCounter/threads:8 2248 ns 10277 ns 68808 Bar=16 Bat=40 Baz=24 Foo=8
BM_UserCounter/threads:1 9797 ns 9788 ns 71523 Bar=2 Bat=5 Baz=3 Foo=1024m
BM_UserCounter/threads:2 4924 ns 9842 ns 71036 Bar=4 Bat=10 Baz=6 Foo=2
BM_UserCounter/threads:4 2589 ns 10284 ns 68012 Bar=8 Bat=20 Baz=12 Foo=4
BM_UserCounter/threads:8 2212 ns 10287 ns 68040 Bar=16 Bat=40 Baz=24 Foo=8
BM_UserCounter/threads:16 1782 ns 10278 ns 68144 Bar=32 Bat=80 Baz=48 Foo=16
BM_UserCounter/threads:32 1291 ns 10296 ns 68256 Bar=64 Bat=160 Baz=96 Foo=32
BM_UserCounter/threads:4 2615 ns 10307 ns 68040 Bar=8 Bat=20 Baz=12 Foo=4
BM_Factorial 26 ns 26 ns 26608979 40320
BM_Factorial/real_time 26 ns 26 ns 26587936 40320
BM_CalculatePiRange/1 16 ns 16 ns 45704255 0
BM_CalculatePiRange/8 73 ns 73 ns 9520927 3.28374
BM_CalculatePiRange/64 609 ns 609 ns 1140647 3.15746
BM_CalculatePiRange/512 4900 ns 4901 ns 142696 3.14355
```
If this doesn't suit you, you can print each counter as a table column by
passing the flag `--benchmark_counters_tabular=true` to the benchmark
application. This is best for cases in which there are a lot of counters, or
a lot of lines per individual benchmark. Note that this will trigger a
reprinting of the table header any time the counter set changes between
individual benchmarks. Here's an example of corresponding output when
`--benchmark_counters_tabular=true` is passed:
```
---------------------------------------------------------------------------------------
Benchmark Time CPU Iterations Bar Bat Baz Foo
---------------------------------------------------------------------------------------
BM_UserCounter/threads:8 2198 ns 9953 ns 70688 16 40 24 8
BM_UserCounter/threads:1 9504 ns 9504 ns 73787 2 5 3 1
BM_UserCounter/threads:2 4775 ns 9550 ns 72606 4 10 6 2
BM_UserCounter/threads:4 2508 ns 9951 ns 70332 8 20 12 4
BM_UserCounter/threads:8 2055 ns 9933 ns 70344 16 40 24 8
BM_UserCounter/threads:16 1610 ns 9946 ns 70720 32 80 48 16
BM_UserCounter/threads:32 1192 ns 9948 ns 70496 64 160 96 32
BM_UserCounter/threads:4 2506 ns 9949 ns 70332 8 20 12 4
--------------------------------------------------------------
Benchmark Time CPU Iterations
--------------------------------------------------------------
BM_Factorial 26 ns 26 ns 26392245 40320
BM_Factorial/real_time 26 ns 26 ns 26494107 40320
BM_CalculatePiRange/1 15 ns 15 ns 45571597 0
BM_CalculatePiRange/8 74 ns 74 ns 9450212 3.28374
BM_CalculatePiRange/64 595 ns 595 ns 1173901 3.15746
BM_CalculatePiRange/512 4752 ns 4752 ns 147380 3.14355
BM_CalculatePiRange/4k 37970 ns 37972 ns 18453 3.14184
BM_CalculatePiRange/32k 303733 ns 303744 ns 2305 3.14162
BM_CalculatePiRange/256k 2434095 ns 2434186 ns 288 3.1416
BM_CalculatePiRange/1024k 9721140 ns 9721413 ns 71 3.14159
BM_CalculatePi/threads:8 2255 ns 9943 ns 70936
```
Note above the additional header printed when the benchmark changes from
``BM_UserCounter`` to ``BM_Factorial``. This is because ``BM_Factorial`` does
not have the same counter set as ``BM_UserCounter``.
## Exiting Benchmarks in Error
When errors caused by external influences, such as file I/O and network
communication, occur within a benchmark the
`State::SkipWithError(const char* msg)` function can be used to skip that run
of benchmark and report the error. Note that only future iterations of the
`KeepRunning()` are skipped. Users may explicitly return to exit the
benchmark immediately.
The `SkipWithError(...)` function may be used at any point within the benchmark,
including before and after the `KeepRunning()` loop.
For example:
```c++
static void BM_test(benchmark::State& state) {
auto resource = GetResource();
if (!resource.good()) {
state.SkipWithError("Resource is not good!");
// KeepRunning() loop will not be entered.
}
while (state.KeepRunning()) {
auto data = resource.read_data();
if (!resource.good()) {
state.SkipWithError("Failed to read data!");
break; // Needed to skip the rest of the iteration.
}
do_stuff(data);
}
}
```
## Running a subset of the benchmarks
The `--benchmark_filter=<regex>` option can be used to only run the benchmarks
which match the specified `<regex>`. For example:
```bash
$ ./run_benchmarks.x --benchmark_filter=BM_memcpy/32
Run on (1 X 2300 MHz CPU )
2016-06-25 19:34:24
Benchmark Time CPU Iterations
----------------------------------------------------
BM_memcpy/32 11 ns 11 ns 79545455
BM_memcpy/32k 2181 ns 2185 ns 324074
BM_memcpy/32 12 ns 12 ns 54687500
BM_memcpy/32k 1834 ns 1837 ns 357143
```
## Output Formats
The library supports multiple output formats. Use the
`--benchmark_format=<console|json|csv>` flag to set the format type. `console`
is the default format.
The Console format is intended to be a human readable format. By default
the format generates color output. Context is output on stderr and the
tabular data on stdout. Example tabular output looks like:
```
Benchmark Time(ns) CPU(ns) Iterations
----------------------------------------------------------------------
BM_SetInsert/1024/1 28928 29349 23853 133.097kB/s 33.2742k items/s
BM_SetInsert/1024/8 32065 32913 21375 949.487kB/s 237.372k items/s
BM_SetInsert/1024/10 33157 33648 21431 1.13369MB/s 290.225k items/s
```
The JSON format outputs human readable json split into two top level attributes.
The `context` attribute contains information about the run in general, including
information about the CPU and the date.
The `benchmarks` attribute contains a list of ever benchmark run. Example json
output looks like:
```json
{
"context": {
"date": "2015/03/17-18:40:25",
"num_cpus": 40,
"mhz_per_cpu": 2801,
"cpu_scaling_enabled": false,
"build_type": "debug"
},
"benchmarks": [
{
"name": "BM_SetInsert/1024/1",
"iterations": 94877,
"real_time": 29275,
"cpu_time": 29836,
"bytes_per_second": 134066,
"items_per_second": 33516
},
{
"name": "BM_SetInsert/1024/8",
"iterations": 21609,
"real_time": 32317,
"cpu_time": 32429,
"bytes_per_second": 986770,
"items_per_second": 246693
},
{
"name": "BM_SetInsert/1024/10",
"iterations": 21393,
"real_time": 32724,
"cpu_time": 33355,
"bytes_per_second": 1199226,
"items_per_second": 299807
}
]
}
```
The CSV format outputs comma-separated values. The `context` is output on stderr
and the CSV itself on stdout. Example CSV output looks like:
```
name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label
"BM_SetInsert/1024/1",65465,17890.7,8407.45,475768,118942,
"BM_SetInsert/1024/8",116606,18810.1,9766.64,3.27646e+06,819115,
"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06,
```
## Output Files
The library supports writing the output of the benchmark to a file specified
by `--benchmark_out=<filename>`. The format of the output can be specified
using `--benchmark_out_format={json|console|csv}`. Specifying
`--benchmark_out` does not suppress the console output.
## Debug vs Release
By default, benchmark builds as a debug library. You will see a warning in the output when this is the case. To build it as a release library instead, use:
```
cmake -DCMAKE_BUILD_TYPE=Release
```
To enable link-time optimisation, use
```
cmake -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_LTO=true
```
## Linking against the library
When using gcc, it is necessary to link against pthread to avoid runtime exceptions.
This is due to how gcc implements std::thread.
See [issue #67](https://github.com/google/benchmark/issues/67) for more details.
## Compiler Support
Google Benchmark uses C++11 when building the library. As such we require
a modern C++ toolchain, both compiler and standard library.
The following minimum versions are strongly recommended build the library:
* GCC 4.8
* Clang 3.4
* Visual Studio 2013
* Intel 2015 Update 1
Anything older *may* work.
Note: Using the library and its headers in C++03 is supported. C++11 is only
required to build the library.
# Known Issues
### Windows
* Users must manually link `shlwapi.lib`. Failure to do so may result
in unresolved symbols.

56
vendor/github.com/google/benchmark/appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,56 @@
version: '{build}'
image: Visual Studio 2017
configuration:
- Debug
- Release
environment:
matrix:
- compiler: msvc-15-seh
generator: "Visual Studio 15 2017"
- compiler: msvc-15-seh
generator: "Visual Studio 15 2017 Win64"
- compiler: msvc-14-seh
generator: "Visual Studio 14 2015"
- compiler: msvc-14-seh
generator: "Visual Studio 14 2015 Win64"
- compiler: msvc-12-seh
generator: "Visual Studio 12 2013"
- compiler: msvc-12-seh
generator: "Visual Studio 12 2013 Win64"
- compiler: gcc-5.3.0-posix
generator: "MinGW Makefiles"
cxx_path: 'C:\mingw-w64\i686-5.3.0-posix-dwarf-rt_v4-rev0\mingw32\bin'
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
matrix:
fast_finish: true
install:
# git bash conflicts with MinGW makefiles
- if "%generator%"=="MinGW Makefiles" (set "PATH=%PATH:C:\Program Files\Git\usr\bin;=%")
- if not "%cxx_path%"=="" (set "PATH=%PATH%;%cxx_path%")
build_script:
- md _build -Force
- cd _build
- echo %configuration%
- cmake -G "%generator%" "-DCMAKE_BUILD_TYPE=%configuration%" ..
- cmake --build . --config %configuration%
test_script:
- ctest -c %configuration% --timeout 300 --output-on-failure
artifacts:
- path: '_build/CMakeFiles/*.log'
name: logs
- path: '_build/Testing/**/*.xml'
name: test_results

View File

@ -0,0 +1,64 @@
# - Adds a compiler flag if it is supported by the compiler
#
# This function checks that the supplied compiler flag is supported and then
# adds it to the corresponding compiler flags
#
# add_cxx_compiler_flag(<FLAG> [<VARIANT>])
#
# - Example
#
# include(AddCXXCompilerFlag)
# add_cxx_compiler_flag(-Wall)
# add_cxx_compiler_flag(-no-strict-aliasing RELEASE)
# Requires CMake 2.6+
if(__add_cxx_compiler_flag)
return()
endif()
set(__add_cxx_compiler_flag INCLUDED)
include(CheckCXXCompilerFlag)
function(mangle_compiler_flag FLAG OUTPUT)
string(TOUPPER "HAVE_CXX_FLAG_${FLAG}" SANITIZED_FLAG)
string(REPLACE "+" "X" SANITIZED_FLAG ${SANITIZED_FLAG})
string(REGEX REPLACE "[^A-Za-z_0-9]" "_" SANITIZED_FLAG ${SANITIZED_FLAG})
string(REGEX REPLACE "_+" "_" SANITIZED_FLAG ${SANITIZED_FLAG})
set(${OUTPUT} "${SANITIZED_FLAG}" PARENT_SCOPE)
endfunction(mangle_compiler_flag)
function(add_cxx_compiler_flag FLAG)
mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}")
check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
if(${MANGLED_FLAG})
set(VARIANT ${ARGV1})
if(ARGV1)
string(TOUPPER "_${VARIANT}" VARIANT)
endif()
set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
endif()
endfunction()
function(add_required_cxx_compiler_flag FLAG)
mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}")
check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
if(${MANGLED_FLAG})
set(VARIANT ${ARGV1})
if(ARGV1)
string(TOUPPER "_${VARIANT}" VARIANT)
endif()
set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE)
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE)
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}" PARENT_SCOPE)
else()
message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler")
endif()
endfunction()

View File

@ -0,0 +1,46 @@
# - Compile and run code to check for C++ features
#
# This functions compiles a source file under the `cmake` folder
# and adds the corresponding `HAVE_[FILENAME]` flag to the CMake
# environment
#
# cxx_feature_check(<FLAG> [<VARIANT>])
#
# - Example
#
# include(CXXFeatureCheck)
# cxx_feature_check(STD_REGEX)
# Requires CMake 2.8.12+
if(__cxx_feature_check)
return()
endif()
set(__cxx_feature_check INCLUDED)
function(cxx_feature_check FILE)
string(TOLOWER ${FILE} FILE)
string(TOUPPER ${FILE} VAR)
string(TOUPPER "HAVE_${VAR}" FEATURE)
if (DEFINED HAVE_${VAR})
set(HAVE_${VAR} 1 CACHE INTERNAL "Feature test for ${FILE}" PARENT_SCOPE)
add_definitions(-DHAVE_${VAR})
return()
endif()
message("-- Performing Test ${FEATURE}")
try_run(RUN_${FEATURE} COMPILE_${FEATURE}
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
if(RUN_${FEATURE} EQUAL 0)
message("-- Performing Test ${FEATURE} -- success")
set(HAVE_${VAR} 1 CACHE INTERNAL "Feature test for ${FILE}" PARENT_SCOPE)
add_definitions(-DHAVE_${VAR})
else()
if(NOT COMPILE_${FEATURE})
message("-- Performing Test ${FEATURE} -- failed to compile")
else()
message("-- Performing Test ${FEATURE} -- compiled but failed to run")
endif()
endif()
endfunction()

View File

@ -0,0 +1 @@
include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake")

View File

@ -0,0 +1,51 @@
# - Returns a version string from Git tags
#
# This function inspects the annotated git tags for the project and returns a string
# into a CMake variable
#
# get_git_version(<var>)
#
# - Example
#
# include(GetGitVersion)
# get_git_version(GIT_VERSION)
#
# Requires CMake 2.8.11+
find_package(Git)
if(__get_git_version)
return()
endif()
set(__get_git_version INCLUDED)
function(get_git_version var)
if(GIT_EXECUTABLE)
execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8
RESULT_VARIABLE status
OUTPUT_VARIABLE GIT_VERSION
ERROR_QUIET)
if(${status})
set(GIT_VERSION "v0.0.0")
else()
string(STRIP ${GIT_VERSION} GIT_VERSION)
string(REGEX REPLACE "-[0-9]+-g" "-" GIT_VERSION ${GIT_VERSION})
endif()
# Work out if the repository is dirty
execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh
OUTPUT_QUIET
ERROR_QUIET)
execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD --
OUTPUT_VARIABLE GIT_DIFF_INDEX
ERROR_QUIET)
string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY)
if (${GIT_DIRTY})
set(GIT_VERSION "${GIT_VERSION}-dirty")
endif()
else()
set(GIT_VERSION "v0.0.0")
endif()
message("-- git Version: ${GIT_VERSION}")
set(${var} ${GIT_VERSION} PARENT_SCOPE)
endfunction()

View File

@ -0,0 +1,12 @@
#include <gnuregex.h>
#include <string>
int main() {
std::string str = "test0159";
regex_t re;
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
if (ec != 0) {
return ec;
}
return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
}

View File

@ -0,0 +1,14 @@
#include <regex.h>
#include <string>
int main() {
std::string str = "test0159";
regex_t re;
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
if (ec != 0) {
return ec;
}
int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
regfree(&re);
return ret;
}

10
vendor/github.com/google/benchmark/cmake/std_regex.cpp generated vendored Normal file
View File

@ -0,0 +1,10 @@
#include <regex>
#include <string>
int main() {
const std::string str = "test0159";
std::regex re;
re = std::regex("^[a-z]+[0-9]+$",
std::regex_constants::extended | std::regex_constants::nosubs);
return std::regex_search(str, re) ? 0 : -1;
}

View File

@ -0,0 +1,7 @@
#include <chrono>
int main() {
typedef std::chrono::steady_clock Clock;
Clock::time_point tp = Clock::now();
((void)tp);
}

View File

@ -0,0 +1,4 @@
#define HAVE_THREAD_SAFETY_ATTRIBUTES
#include "../src/mutex.h"
int main() {}

59
vendor/github.com/google/benchmark/docs/tools.md generated vendored Normal file
View File

@ -0,0 +1,59 @@
# Benchmark Tools
## compare_bench.py
The `compare_bench.py` utility which can be used to compare the result of benchmarks.
The program is invoked like:
``` bash
$ compare_bench.py <old-benchmark> <new-benchmark> [benchmark options]...
```
Where `<old-benchmark>` and `<new-benchmark>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
The sample output using the JSON test files under `Inputs/` gives:
``` bash
$ ./compare_bench.py ./gbench/Inputs/test1_run1.json ./gbench/Inputs/test1_run2.json
Comparing ./gbench/Inputs/test1_run1.json to ./gbench/Inputs/test1_run2.json
Benchmark Time CPU
----------------------------------------------
BM_SameTimes +0.00 +0.00
BM_2xFaster -0.50 -0.50
BM_2xSlower +1.00 +1.00
BM_10PercentFaster -0.10 -0.10
BM_10PercentSlower +0.10 +0.10
```
When a benchmark executable is run, the raw output from the benchmark is printed in real time to stdout. The sample output using `benchmark/basic_test` for both arguments looks like:
```
./compare_bench.py test/basic_test test/basic_test --benchmark_filter=BM_empty.*
RUNNING: test/basic_test --benchmark_filter=BM_empty.*
Run on (4 X 4228.32 MHz CPU s)
2016-08-02 19:21:33
Benchmark Time CPU Iterations
--------------------------------------------------------------------
BM_empty 9 ns 9 ns 79545455
BM_empty/threads:4 4 ns 9 ns 75268816
BM_empty_stop_start 8 ns 8 ns 83333333
BM_empty_stop_start/threads:4 3 ns 8 ns 83333332
RUNNING: test/basic_test --benchmark_filter=BM_empty.*
Run on (4 X 4228.32 MHz CPU s)
2016-08-02 19:21:35
Benchmark Time CPU Iterations
--------------------------------------------------------------------
BM_empty 9 ns 9 ns 76086957
BM_empty/threads:4 4 ns 9 ns 76086956
BM_empty_stop_start 8 ns 8 ns 87500000
BM_empty_stop_start/threads:4 3 ns 8 ns 88607596
Comparing test/basic_test to test/basic_test
Benchmark Time CPU
---------------------------------------------------------
BM_empty +0.00 +0.00
BM_empty/threads:4 +0.00 +0.00
BM_empty_stop_start +0.00 +0.00
BM_empty_stop_start/threads:4 +0.00 +0.00
```
Obviously this example doesn't give any useful output, but it's intended to show the output format when 'compare_bench.py' needs to run benchmarks.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,27 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BENCHMARK_BENCHMARK_API_H_
#define BENCHMARK_BENCHMARK_API_H_
#ifdef __DEPRECATED
# ifndef BENCHMARK_WARNING_MSG
# warning the benchmark_api.h header has been deprecated and will be removed, please include benchmark.h instead
# else
BENCHMARK_WARNING_MSG("the benchmark_api.h header has been deprecated and will be removed, please include benchmark.h instead")
# endif
#endif
#include "benchmark.h" // For forward declaration of BenchmarkReporter
#endif // BENCHMARK_BENCHMARK_API_H_

View File

@ -0,0 +1,27 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BENCHMARK_REPORTER_H_
#define BENCHMARK_REPORTER_H_
#ifdef __DEPRECATED
# ifndef BENCHMARK_WARNING_MSG
# warning the reporter.h header has been deprecated and will be removed, please include benchmark.h instead
# else
BENCHMARK_WARNING_MSG("the reporter.h header has been deprecated and will be removed, please include benchmark.h instead")
# endif
#endif
#include "benchmark.h" // For forward declaration of BenchmarkReporter
#endif // BENCHMARK_REPORTER_H_

320
vendor/github.com/google/benchmark/mingw.py generated vendored Normal file
View File

@ -0,0 +1,320 @@
#! /usr/bin/env python
# encoding: utf-8
import argparse
import errno
import logging
import os
import platform
import re
import sys
import subprocess
import tempfile
try:
import winreg
except ImportError:
import _winreg as winreg
try:
import urllib.request as request
except ImportError:
import urllib as request
try:
import urllib.parse as parse
except ImportError:
import urlparse as parse
class EmptyLogger(object):
'''
Provides an implementation that performs no logging
'''
def debug(self, *k, **kw):
pass
def info(self, *k, **kw):
pass
def warn(self, *k, **kw):
pass
def error(self, *k, **kw):
pass
def critical(self, *k, **kw):
pass
def setLevel(self, *k, **kw):
pass
urls = (
'http://downloads.sourceforge.net/project/mingw-w64/Toolchains%20'
'targetting%20Win32/Personal%20Builds/mingw-builds/installer/'
'repository.txt',
'http://downloads.sourceforge.net/project/mingwbuilds/host-windows/'
'repository.txt'
)
'''
A list of mingw-build repositories
'''
def repository(urls = urls, log = EmptyLogger()):
'''
Downloads and parse mingw-build repository files and parses them
'''
log.info('getting mingw-builds repository')
versions = {}
re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files')
re_sub = r'http://downloads.sourceforge.net/project/\1'
for url in urls:
log.debug(' - requesting: %s', url)
socket = request.urlopen(url)
repo = socket.read()
if not isinstance(repo, str):
repo = repo.decode();
socket.close()
for entry in repo.split('\n')[:-1]:
value = entry.split('|')
version = tuple([int(n) for n in value[0].strip().split('.')])
version = versions.setdefault(version, {})
arch = value[1].strip()
if arch == 'x32':
arch = 'i686'
elif arch == 'x64':
arch = 'x86_64'
arch = version.setdefault(arch, {})
threading = arch.setdefault(value[2].strip(), {})
exceptions = threading.setdefault(value[3].strip(), {})
revision = exceptions.setdefault(int(value[4].strip()[3:]),
re_sourceforge.sub(re_sub, value[5].strip()))
return versions
def find_in_path(file, path=None):
'''
Attempts to find an executable in the path
'''
if platform.system() == 'Windows':
file += '.exe'
if path is None:
path = os.environ.get('PATH', '')
if type(path) is type(''):
path = path.split(os.pathsep)
return list(filter(os.path.exists,
map(lambda dir, file=file: os.path.join(dir, file), path)))
def find_7zip(log = EmptyLogger()):
'''
Attempts to find 7zip for unpacking the mingw-build archives
'''
log.info('finding 7zip')
path = find_in_path('7z')
if not path:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\7-Zip')
path, _ = winreg.QueryValueEx(key, 'Path')
path = [os.path.join(path, '7z.exe')]
log.debug('found \'%s\'', path[0])
return path[0]
find_7zip()
def unpack(archive, location, log = EmptyLogger()):
'''
Unpacks a mingw-builds archive
'''
sevenzip = find_7zip(log)
log.info('unpacking %s', os.path.basename(archive))
cmd = [sevenzip, 'x', archive, '-o' + location, '-y']
log.debug(' - %r', cmd)
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout = devnull)
def download(url, location, log = EmptyLogger()):
'''
Downloads and unpacks a mingw-builds archive
'''
log.info('downloading MinGW')
log.debug(' - url: %s', url)
log.debug(' - location: %s', location)
re_content = re.compile(r'attachment;[ \t]*filename=(")?([^"]*)(")?[\r\n]*')
stream = request.urlopen(url)
try:
content = stream.getheader('Content-Disposition') or ''
except AttributeError:
content = stream.headers.getheader('Content-Disposition') or ''
matches = re_content.match(content)
if matches:
filename = matches.group(2)
else:
parsed = parse.urlparse(stream.geturl())
filename = os.path.basename(parsed.path)
try:
os.makedirs(location)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(location):
pass
else:
raise
archive = os.path.join(location, filename)
with open(archive, 'wb') as out:
while True:
buf = stream.read(1024)
if not buf:
break
out.write(buf)
unpack(archive, location, log = log)
os.remove(archive)
possible = os.path.join(location, 'mingw64')
if not os.path.exists(possible):
possible = os.path.join(location, 'mingw32')
if not os.path.exists(possible):
raise ValueError('Failed to find unpacked MinGW: ' + possible)
return possible
def root(location = None, arch = None, version = None, threading = None,
exceptions = None, revision = None, log = EmptyLogger()):
'''
Returns the root folder of a specific version of the mingw-builds variant
of gcc. Will download the compiler if needed
'''
# Get the repository if we don't have all the information
if not (arch and version and threading and exceptions and revision):
versions = repository(log = log)
# Determine some defaults
version = version or max(versions.keys())
if not arch:
arch = platform.machine().lower()
if arch == 'x86':
arch = 'i686'
elif arch == 'amd64':
arch = 'x86_64'
if not threading:
keys = versions[version][arch].keys()
if 'posix' in keys:
threading = 'posix'
elif 'win32' in keys:
threading = 'win32'
else:
threading = keys[0]
if not exceptions:
keys = versions[version][arch][threading].keys()
if 'seh' in keys:
exceptions = 'seh'
elif 'sjlj' in keys:
exceptions = 'sjlj'
else:
exceptions = keys[0]
if revision == None:
revision = max(versions[version][arch][threading][exceptions].keys())
if not location:
location = os.path.join(tempfile.gettempdir(), 'mingw-builds')
# Get the download url
url = versions[version][arch][threading][exceptions][revision]
# Tell the user whatzzup
log.info('finding MinGW %s', '.'.join(str(v) for v in version))
log.debug(' - arch: %s', arch)
log.debug(' - threading: %s', threading)
log.debug(' - exceptions: %s', exceptions)
log.debug(' - revision: %s', revision)
log.debug(' - url: %s', url)
# Store each specific revision differently
slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}'
slug = slug.format(
version = '.'.join(str(v) for v in version),
arch = arch,
threading = threading,
exceptions = exceptions,
revision = revision
)
if arch == 'x86_64':
root_dir = os.path.join(location, slug, 'mingw64')
elif arch == 'i686':
root_dir = os.path.join(location, slug, 'mingw32')
else:
raise ValueError('Unknown MinGW arch: ' + arch)
# Download if needed
if not os.path.exists(root_dir):
downloaded = download(url, os.path.join(location, slug), log = log)
if downloaded != root_dir:
raise ValueError('The location of mingw did not match\n%s\n%s'
% (downloaded, root_dir))
return root_dir
def str2ver(string):
'''
Converts a version string into a tuple
'''
try:
version = tuple(int(v) for v in string.split('.'))
if len(version) is not 3:
raise ValueError()
except ValueError:
raise argparse.ArgumentTypeError(
'please provide a three digit version string')
return version
def main():
'''
Invoked when the script is run directly by the python interpreter
'''
parser = argparse.ArgumentParser(
description = 'Downloads a specific version of MinGW',
formatter_class = argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--location',
help = 'the location to download the compiler to',
default = os.path.join(tempfile.gettempdir(), 'mingw-builds'))
parser.add_argument('--arch', required = True, choices = ['i686', 'x86_64'],
help = 'the target MinGW architecture string')
parser.add_argument('--version', type = str2ver,
help = 'the version of GCC to download')
parser.add_argument('--threading', choices = ['posix', 'win32'],
help = 'the threading type of the compiler')
parser.add_argument('--exceptions', choices = ['sjlj', 'seh', 'dwarf'],
help = 'the method to throw exceptions')
parser.add_argument('--revision', type=int,
help = 'the revision of the MinGW release')
group = parser.add_mutually_exclusive_group()
group.add_argument('-v', '--verbose', action='store_true',
help='increase the script output verbosity')
group.add_argument('-q', '--quiet', action='store_true',
help='only print errors and warning')
args = parser.parse_args()
# Create the logger
logger = logging.getLogger('mingw')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
if args.quiet:
logger.setLevel(logging.WARN)
if args.verbose:
logger.setLevel(logging.DEBUG)
# Get MinGW
root_dir = root(location = args.location, arch = args.arch,
version = args.version, threading = args.threading,
exceptions = args.exceptions, revision = args.revision,
log = logger)
sys.stdout.write('%s\n' % os.path.join(root_dir, 'bin'))
if __name__ == '__main__':
try:
main()
except IOError as e:
sys.stderr.write('IO error: %s\n' % e)
sys.exit(1)
except OSError as e:
sys.stderr.write('OS error: %s\n' % e)
sys.exit(1)
except KeyboardInterrupt as e:
sys.stderr.write('Killed\n')
sys.exit(1)

78
vendor/github.com/google/benchmark/src/CMakeLists.txt generated vendored Normal file
View File

@ -0,0 +1,78 @@
# Allow the source files to find headers in src/
include_directories(${PROJECT_SOURCE_DIR}/src)
if (DEFINED BENCHMARK_CXX_LINKER_FLAGS)
list(APPEND CMAKE_SHARED_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS})
list(APPEND CMAKE_MODULE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS})
endif()
file(GLOB
SOURCE_FILES
*.cc
${PROJECT_SOURCE_DIR}/include/benchmark/*.h
${CMAKE_CURRENT_SOURCE_DIR}/*.h)
add_library(benchmark ${SOURCE_FILES})
set_target_properties(benchmark PROPERTIES
OUTPUT_NAME "benchmark"
VERSION ${GENERIC_LIB_VERSION}
SOVERSION ${GENERIC_LIB_SOVERSION}
)
target_include_directories(benchmark PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include>
)
# Link threads.
target_link_libraries(benchmark ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
find_library(LIBRT rt)
if(LIBRT)
target_link_libraries(benchmark ${LIBRT})
endif()
# We need extra libraries on Windows
if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
target_link_libraries(benchmark Shlwapi)
endif()
set(include_install_dir "include")
set(lib_install_dir "lib/")
set(bin_install_dir "bin/")
set(config_install_dir "lib/cmake/${PROJECT_NAME}")
set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake")
set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake")
set(targets_export_name "${PROJECT_NAME}Targets")
set(namespace "${PROJECT_NAME}::")
include(CMakePackageConfigHelpers)
write_basic_package_version_file(
"${version_config}" VERSION ${GIT_VERSION} COMPATIBILITY SameMajorVersion
)
configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY)
# Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable)
install(
TARGETS benchmark
EXPORT ${targets_export_name}
ARCHIVE DESTINATION ${lib_install_dir}
LIBRARY DESTINATION ${lib_install_dir}
RUNTIME DESTINATION ${bin_install_dir}
INCLUDES DESTINATION ${include_install_dir})
install(
DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark"
DESTINATION ${include_install_dir}
FILES_MATCHING PATTERN "*.*h")
install(
FILES "${project_config}" "${version_config}"
DESTINATION "${config_install_dir}")
install(
EXPORT "${targets_export_name}"
NAMESPACE "${namespace}"
DESTINATION "${config_install_dir}")

33
vendor/github.com/google/benchmark/src/arraysize.h generated vendored Normal file
View File

@ -0,0 +1,33 @@
#ifndef BENCHMARK_ARRAYSIZE_H_
#define BENCHMARK_ARRAYSIZE_H_
#include "internal_macros.h"
namespace benchmark {
namespace internal {
// The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be
// used in defining new arrays, for example. If you use arraysize on
// a pointer by mistake, you will get a compile-time error.
//
// This template function declaration is used in defining arraysize.
// Note that the function doesn't need an implementation, as we only
// use its type.
template <typename T, size_t N>
char (&ArraySizeHelper(T (&array)[N]))[N];
// That gcc wants both of these prototypes seems mysterious. VC, for
// its part, can't decide which to use (another mystery). Matching of
// template overloads: the final frontier.
#ifndef COMPILER_MSVC
template <typename T, size_t N>
char (&ArraySizeHelper(const T (&array)[N]))[N];
#endif
#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array)))
} // end namespace internal
} // end namespace benchmark
#endif // BENCHMARK_ARRAYSIZE_H_

715
vendor/github.com/google/benchmark/src/benchmark.cc generated vendored Normal file
View File

@ -0,0 +1,715 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include "benchmark_api_internal.h"
#include "internal_macros.h"
#ifndef BENCHMARK_OS_WINDOWS
#include <sys/resource.h>
#include <sys/time.h>
#include <unistd.h>
#endif
#include <algorithm>
#include <atomic>
#include <condition_variable>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <memory>
#include <thread>
#include "check.h"
#include "colorprint.h"
#include "commandlineflags.h"
#include "complexity.h"
#include "counter.h"
#include "log.h"
#include "mutex.h"
#include "re.h"
#include "stat.h"
#include "string_util.h"
#include "sysinfo.h"
#include "timers.h"
DEFINE_bool(benchmark_list_tests, false,
"Print a list of benchmarks. This option overrides all other "
"options.");
DEFINE_string(benchmark_filter, ".",
"A regular expression that specifies the set of benchmarks "
"to execute. If this flag is empty, no benchmarks are run. "
"If this flag is the string \"all\", all benchmarks linked "
"into the process are run.");
DEFINE_double(benchmark_min_time, 0.5,
"Minimum number of seconds we should run benchmark before "
"results are considered significant. For cpu-time based "
"tests, this is the lower bound on the total cpu time "
"used by all threads that make up the test. For real-time "
"based tests, this is the lower bound on the elapsed time "
"of the benchmark execution, regardless of number of "
"threads.");
DEFINE_int32(benchmark_repetitions, 1,
"The number of runs of each benchmark. If greater than 1, the "
"mean and standard deviation of the runs will be reported.");
DEFINE_bool(benchmark_report_aggregates_only, false,
"Report the result of each benchmark repetitions. When 'true' is "
"specified only the mean, standard deviation, and other statistics "
"are reported for repeated benchmarks.");
DEFINE_string(benchmark_format, "console",
"The format to use for console output. Valid values are "
"'console', 'json', or 'csv'.");
DEFINE_string(benchmark_out_format, "json",
"The format to use for file output. Valid values are "
"'console', 'json', or 'csv'.");
DEFINE_string(benchmark_out, "", "The file to write additonal output to");
DEFINE_string(benchmark_color, "auto",
"Whether to use colors in the output. Valid values: "
"'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use "
"colors if the output is being sent to a terminal and the TERM "
"environment variable is set to a terminal type that supports "
"colors.");
DEFINE_bool(benchmark_counters_tabular, false,
"Whether to use tabular format when printing user counters to "
"the console. Valid values: 'true'/'yes'/1, 'false'/'no'/0."
"Defaults to false.");
DEFINE_int32(v, 0, "The level of verbose logging to output");
namespace benchmark {
namespace internal {
void UseCharPointer(char const volatile*) {}
} // end namespace internal
namespace {
static const size_t kMaxIterations = 1000000000;
} // end namespace
namespace internal {
class ThreadManager {
public:
ThreadManager(int num_threads)
: alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
return benchmark_mutex_;
}
bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
return start_stop_barrier_.wait();
}
void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
start_stop_barrier_.removeThread();
if (--alive_threads_ == 0) {
MutexLock lock(end_cond_mutex_);
end_condition_.notify_all();
}
}
void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
MutexLock lock(end_cond_mutex_);
end_condition_.wait(lock.native_handle(),
[this]() { return alive_threads_ == 0; });
}
public:
struct Result {
double real_time_used = 0;
double cpu_time_used = 0;
double manual_time_used = 0;
int64_t bytes_processed = 0;
int64_t items_processed = 0;
int complexity_n = 0;
std::string report_label_;
std::string error_message_;
bool has_error_ = false;
UserCounters counters;
};
GUARDED_BY(GetBenchmarkMutex()) Result results;
private:
mutable Mutex benchmark_mutex_;
std::atomic<int> alive_threads_;
Barrier start_stop_barrier_;
Mutex end_cond_mutex_;
Condition end_condition_;
};
// Timer management class
class ThreadTimer {
public:
ThreadTimer() = default;
// Called by each thread
void StartTimer() {
running_ = true;
start_real_time_ = ChronoClockNow();
start_cpu_time_ = ThreadCPUUsage();
}
// Called by each thread
void StopTimer() {
CHECK(running_);
running_ = false;
real_time_used_ += ChronoClockNow() - start_real_time_;
cpu_time_used_ += ThreadCPUUsage() - start_cpu_time_;
}
// Called by each thread
void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
bool running() const { return running_; }
// REQUIRES: timer is not running
double real_time_used() {
CHECK(!running_);
return real_time_used_;
}
// REQUIRES: timer is not running
double cpu_time_used() {
CHECK(!running_);
return cpu_time_used_;
}
// REQUIRES: timer is not running
double manual_time_used() {
CHECK(!running_);
return manual_time_used_;
}
private:
bool running_ = false; // Is the timer running
double start_real_time_ = 0; // If running_
double start_cpu_time_ = 0; // If running_
// Accumulated time so far (does not contain current slice if running_)
double real_time_used_ = 0;
double cpu_time_used_ = 0;
// Manually set iteration time. User sets this with SetIterationTime(seconds).
double manual_time_used_ = 0;
};
namespace {
BenchmarkReporter::Run CreateRunReport(
const benchmark::internal::Benchmark::Instance& b,
const internal::ThreadManager::Result& results, size_t iters,
double seconds) {
// Create report about this benchmark run.
BenchmarkReporter::Run report;
report.benchmark_name = b.name;
report.error_occurred = results.has_error_;
report.error_message = results.error_message_;
report.report_label = results.report_label_;
// Report the total iterations across all threads.
report.iterations = static_cast<int64_t>(iters) * b.threads;
report.time_unit = b.time_unit;
if (!report.error_occurred) {
double bytes_per_second = 0;
if (results.bytes_processed > 0 && seconds > 0.0) {
bytes_per_second = (results.bytes_processed / seconds);
}
double items_per_second = 0;
if (results.items_processed > 0 && seconds > 0.0) {
items_per_second = (results.items_processed / seconds);
}
if (b.use_manual_time) {
report.real_accumulated_time = results.manual_time_used;
} else {
report.real_accumulated_time = results.real_time_used;
}
report.cpu_accumulated_time = results.cpu_time_used;
report.bytes_per_second = bytes_per_second;
report.items_per_second = items_per_second;
report.complexity_n = results.complexity_n;
report.complexity = b.complexity;
report.complexity_lambda = b.complexity_lambda;
report.counters = results.counters;
internal::Finish(&report.counters, seconds, b.threads);
}
return report;
}
// Execute one thread of benchmark b for the specified number of iterations.
// Adds the stats collected for the thread into *total.
void RunInThread(const benchmark::internal::Benchmark::Instance* b,
size_t iters, int thread_id,
internal::ThreadManager* manager) {
internal::ThreadTimer timer;
State st(iters, b->arg, thread_id, b->threads, &timer, manager);
b->benchmark->Run(st);
CHECK(st.iterations() == st.max_iterations)
<< "Benchmark returned before State::KeepRunning() returned false!";
{
MutexLock l(manager->GetBenchmarkMutex());
internal::ThreadManager::Result& results = manager->results;
results.cpu_time_used += timer.cpu_time_used();
results.real_time_used += timer.real_time_used();
results.manual_time_used += timer.manual_time_used();
results.bytes_processed += st.bytes_processed();
results.items_processed += st.items_processed();
results.complexity_n += st.complexity_length_n();
internal::Increment(&results.counters, st.counters);
}
manager->NotifyThreadComplete();
}
std::vector<BenchmarkReporter::Run> RunBenchmark(
const benchmark::internal::Benchmark::Instance& b,
std::vector<BenchmarkReporter::Run>* complexity_reports) {
std::vector<BenchmarkReporter::Run> reports; // return value
const bool has_explicit_iteration_count = b.iterations != 0;
size_t iters = has_explicit_iteration_count ? b.iterations : 1;
std::unique_ptr<internal::ThreadManager> manager;
std::vector<std::thread> pool(b.threads - 1);
const int repeats =
b.repetitions != 0 ? b.repetitions : FLAGS_benchmark_repetitions;
const bool report_aggregates_only =
repeats != 1 &&
(b.report_mode == internal::RM_Unspecified
? FLAGS_benchmark_report_aggregates_only
: b.report_mode == internal::RM_ReportAggregatesOnly);
for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
for (;;) {
// Try benchmark
VLOG(2) << "Running " << b.name << " for " << iters << "\n";
manager.reset(new internal::ThreadManager(b.threads));
for (std::size_t ti = 0; ti < pool.size(); ++ti) {
pool[ti] = std::thread(&RunInThread, &b, iters,
static_cast<int>(ti + 1), manager.get());
}
RunInThread(&b, iters, 0, manager.get());
manager->WaitForAllThreads();
for (std::thread& thread : pool) thread.join();
internal::ThreadManager::Result results;
{
MutexLock l(manager->GetBenchmarkMutex());
results = manager->results;
}
manager.reset();
// Adjust real/manual time stats since they were reported per thread.
results.real_time_used /= b.threads;
results.manual_time_used /= b.threads;
VLOG(2) << "Ran in " << results.cpu_time_used << "/"
<< results.real_time_used << "\n";
// Base decisions off of real time if requested by this benchmark.
double seconds = results.cpu_time_used;
if (b.use_manual_time) {
seconds = results.manual_time_used;
} else if (b.use_real_time) {
seconds = results.real_time_used;
}
const double min_time =
!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time;
// Determine if this run should be reported; Either it has
// run for a sufficient amount of time or because an error was reported.
const bool should_report = repetition_num > 0
|| has_explicit_iteration_count // An exact iteration count was requested
|| results.has_error_
|| iters >= kMaxIterations
|| seconds >= min_time // the elapsed time is large enough
// CPU time is specified but the elapsed real time greatly exceeds the
// minimum time. Note that user provided timers are except from this
// sanity check.
|| ((results.real_time_used >= 5 * min_time) && !b.use_manual_time);
if (should_report) {
BenchmarkReporter::Run report =
CreateRunReport(b, results, iters, seconds);
if (!report.error_occurred && b.complexity != oNone)
complexity_reports->push_back(report);
reports.push_back(report);
break;
}
// See how much iterations should be increased by
// Note: Avoid division by zero with max(seconds, 1ns).
double multiplier = min_time * 1.4 / std::max(seconds, 1e-9);
// If our last run was at least 10% of FLAGS_benchmark_min_time then we
// use the multiplier directly. Otherwise we use at most 10 times
// expansion.
// NOTE: When the last run was at least 10% of the min time the max
// expansion should be 14x.
bool is_significant = (seconds / min_time) > 0.1;
multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
if (multiplier <= 1.0) multiplier = 2.0;
double next_iters = std::max(multiplier * iters, iters + 1.0);
if (next_iters > kMaxIterations) {
next_iters = kMaxIterations;
}
VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
iters = static_cast<int>(next_iters + 0.5);
}
}
// Calculate additional statistics
auto stat_reports = ComputeStats(reports);
if ((b.complexity != oNone) && b.last_benchmark_instance) {
auto additional_run_stats = ComputeBigO(*complexity_reports);
stat_reports.insert(stat_reports.end(), additional_run_stats.begin(),
additional_run_stats.end());
complexity_reports->clear();
}
if (report_aggregates_only) reports.clear();
reports.insert(reports.end(), stat_reports.begin(), stat_reports.end());
return reports;
}
} // namespace
} // namespace internal
State::State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
int n_threads, internal::ThreadTimer* timer,
internal::ThreadManager* manager)
: started_(false),
finished_(false),
total_iterations_(0),
range_(ranges),
bytes_processed_(0),
items_processed_(0),
complexity_n_(0),
error_occurred_(false),
counters(),
thread_index(thread_i),
threads(n_threads),
max_iterations(max_iters),
timer_(timer),
manager_(manager) {
CHECK(max_iterations != 0) << "At least one iteration must be run";
CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
}
void State::PauseTiming() {
// Add in time accumulated so far
CHECK(started_ && !finished_ && !error_occurred_);
timer_->StopTimer();
}
void State::ResumeTiming() {
CHECK(started_ && !finished_ && !error_occurred_);
timer_->StartTimer();
}
void State::SkipWithError(const char* msg) {
CHECK(msg);
error_occurred_ = true;
{
MutexLock l(manager_->GetBenchmarkMutex());
if (manager_->results.has_error_ == false) {
manager_->results.error_message_ = msg;
manager_->results.has_error_ = true;
}
}
total_iterations_ = max_iterations;
if (timer_->running()) timer_->StopTimer();
}
void State::SetIterationTime(double seconds) {
timer_->SetIterationTime(seconds);
}
void State::SetLabel(const char* label) {
MutexLock l(manager_->GetBenchmarkMutex());
manager_->results.report_label_ = label;
}
void State::StartKeepRunning() {
CHECK(!started_ && !finished_);
started_ = true;
manager_->StartStopBarrier();
if (!error_occurred_) ResumeTiming();
}
void State::FinishKeepRunning() {
CHECK(started_ && (!finished_ || error_occurred_));
if (!error_occurred_) {
PauseTiming();
}
// Total iterations now is one greater than max iterations. Fix this.
total_iterations_ = max_iterations;
finished_ = true;
manager_->StartStopBarrier();
}
namespace internal {
namespace {
void RunBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
BenchmarkReporter* console_reporter,
BenchmarkReporter* file_reporter) {
// Note the file_reporter can be null.
CHECK(console_reporter != nullptr);
// Determine the width of the name field using a minimum width of 10.
bool has_repetitions = FLAGS_benchmark_repetitions > 1;
size_t name_field_width = 10;
for (const Benchmark::Instance& benchmark : benchmarks) {
name_field_width =
std::max<size_t>(name_field_width, benchmark.name.size());
has_repetitions |= benchmark.repetitions > 1;
}
if (has_repetitions) name_field_width += std::strlen("_stddev");
// Print header here
BenchmarkReporter::Context context;
context.num_cpus = NumCPUs();
context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f;
context.cpu_scaling_enabled = CpuScalingEnabled();
context.name_field_width = name_field_width;
// Keep track of runing times of all instances of current benchmark
std::vector<BenchmarkReporter::Run> complexity_reports;
// We flush streams after invoking reporter methods that write to them. This
// ensures users get timely updates even when streams are not line-buffered.
auto flushStreams = [](BenchmarkReporter* reporter) {
if (!reporter) return;
std::flush(reporter->GetOutputStream());
std::flush(reporter->GetErrorStream());
};
if (console_reporter->ReportContext(context) &&
(!file_reporter || file_reporter->ReportContext(context))) {
flushStreams(console_reporter);
flushStreams(file_reporter);
for (const auto& benchmark : benchmarks) {
std::vector<BenchmarkReporter::Run> reports =
RunBenchmark(benchmark, &complexity_reports);
console_reporter->ReportRuns(reports);
if (file_reporter) file_reporter->ReportRuns(reports);
flushStreams(console_reporter);
flushStreams(file_reporter);
}
}
console_reporter->Finalize();
if (file_reporter) file_reporter->Finalize();
flushStreams(console_reporter);
flushStreams(file_reporter);
}
std::unique_ptr<BenchmarkReporter> CreateReporter(
std::string const& name, ConsoleReporter::OutputOptions output_opts) {
typedef std::unique_ptr<BenchmarkReporter> PtrType;
if (name == "console") {
return PtrType(new ConsoleReporter(output_opts));
} else if (name == "json") {
return PtrType(new JSONReporter);
} else if (name == "csv") {
return PtrType(new CSVReporter);
} else {
std::cerr << "Unexpected format: '" << name << "'\n";
std::exit(1);
}
}
} // end namespace
bool IsZero(double n) {
return std::abs(n) < std::numeric_limits<double>::epsilon();
}
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
int output_opts = ConsoleReporter::OO_Defaults;
if ((FLAGS_benchmark_color == "auto" && IsColorTerminal()) ||
IsTruthyFlagValue(FLAGS_benchmark_color)) {
output_opts |= ConsoleReporter::OO_Color;
} else {
output_opts &= ~ConsoleReporter::OO_Color;
}
if(force_no_color) {
output_opts &= ~ConsoleReporter::OO_Color;
}
if(FLAGS_benchmark_counters_tabular) {
output_opts |= ConsoleReporter::OO_Tabular;
} else {
output_opts &= ~ConsoleReporter::OO_Tabular;
}
return static_cast< ConsoleReporter::OutputOptions >(output_opts);
}
} // end namespace internal
size_t RunSpecifiedBenchmarks() {
return RunSpecifiedBenchmarks(nullptr, nullptr);
}
size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter) {
return RunSpecifiedBenchmarks(console_reporter, nullptr);
}
size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
BenchmarkReporter* file_reporter) {
std::string spec = FLAGS_benchmark_filter;
if (spec.empty() || spec == "all")
spec = "."; // Regexp that matches all benchmarks
// Setup the reporters
std::ofstream output_file;
std::unique_ptr<BenchmarkReporter> default_console_reporter;
std::unique_ptr<BenchmarkReporter> default_file_reporter;
if (!console_reporter) {
default_console_reporter = internal::CreateReporter(
FLAGS_benchmark_format, internal::GetOutputOptions());
console_reporter = default_console_reporter.get();
}
auto& Out = console_reporter->GetOutputStream();
auto& Err = console_reporter->GetErrorStream();
std::string const& fname = FLAGS_benchmark_out;
if (fname.empty() && file_reporter) {
Err << "A custom file reporter was provided but "
"--benchmark_out=<file> was not specified."
<< std::endl;
std::exit(1);
}
if (!fname.empty()) {
output_file.open(fname);
if (!output_file.is_open()) {
Err << "invalid file name: '" << fname << std::endl;
std::exit(1);
}
if (!file_reporter) {
default_file_reporter = internal::CreateReporter(
FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
file_reporter = default_file_reporter.get();
}
file_reporter->SetOutputStream(&output_file);
file_reporter->SetErrorStream(&output_file);
}
std::vector<internal::Benchmark::Instance> benchmarks;
if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
if (benchmarks.empty()) {
Err << "Failed to match any benchmarks against regex: " << spec << "\n";
return 0;
}
if (FLAGS_benchmark_list_tests) {
for (auto const& benchmark : benchmarks) Out << benchmark.name << "\n";
} else {
internal::RunBenchmarks(benchmarks, console_reporter, file_reporter);
}
return benchmarks.size();
}
namespace internal {
void PrintUsageAndExit() {
fprintf(stdout,
"benchmark"
" [--benchmark_list_tests={true|false}]\n"
" [--benchmark_filter=<regex>]\n"
" [--benchmark_min_time=<min_time>]\n"
" [--benchmark_repetitions=<num_repetitions>]\n"
" [--benchmark_report_aggregates_only={true|false}\n"
" [--benchmark_format=<console|json|csv>]\n"
" [--benchmark_out=<filename>]\n"
" [--benchmark_out_format=<json|console|csv>]\n"
" [--benchmark_color={auto|true|false}]\n"
" [--benchmark_counters_tabular={true|false}]\n"
" [--v=<verbosity>]\n");
exit(0);
}
void ParseCommandLineFlags(int* argc, char** argv) {
using namespace benchmark;
for (int i = 1; i < *argc; ++i) {
if (ParseBoolFlag(argv[i], "benchmark_list_tests",
&FLAGS_benchmark_list_tests) ||
ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
ParseDoubleFlag(argv[i], "benchmark_min_time",
&FLAGS_benchmark_min_time) ||
ParseInt32Flag(argv[i], "benchmark_repetitions",
&FLAGS_benchmark_repetitions) ||
ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
&FLAGS_benchmark_report_aggregates_only) ||
ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
ParseStringFlag(argv[i], "benchmark_out_format",
&FLAGS_benchmark_out_format) ||
ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
// "color_print" is the deprecated name for "benchmark_color".
// TODO: Remove this.
ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
ParseBoolFlag(argv[i], "benchmark_counters_tabular",
&FLAGS_benchmark_counters_tabular) ||
ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
--(*argc);
--i;
} else if (IsFlag(argv[i], "help")) {
PrintUsageAndExit();
}
}
for (auto const* flag :
{&FLAGS_benchmark_format, &FLAGS_benchmark_out_format})
if (*flag != "console" && *flag != "json" && *flag != "csv") {
PrintUsageAndExit();
}
if (FLAGS_benchmark_color.empty()) {
PrintUsageAndExit();
}
}
int InitializeStreams() {
static std::ios_base::Init init;
return 0;
}
} // end namespace internal
void Initialize(int* argc, char** argv) {
internal::ParseCommandLineFlags(argc, argv);
internal::LogLevel() = FLAGS_v;
}
bool ReportUnrecognizedArguments(int argc, char** argv) {
for (int i = 1; i < argc; ++i) {
fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], argv[i]);
}
return argc > 1;
}
} // end namespace benchmark

View File

@ -0,0 +1,46 @@
#ifndef BENCHMARK_API_INTERNAL_H
#define BENCHMARK_API_INTERNAL_H
#include "benchmark/benchmark.h"
#include <cmath>
#include <iosfwd>
#include <limits>
#include <string>
#include <vector>
namespace benchmark {
namespace internal {
// Information kept per benchmark we may want to run
struct Benchmark::Instance {
std::string name;
Benchmark* benchmark;
ReportMode report_mode;
std::vector<int> arg;
TimeUnit time_unit;
int range_multiplier;
bool use_real_time;
bool use_manual_time;
BigO complexity;
BigOFunc* complexity_lambda;
UserCounters counters;
bool last_benchmark_instance;
int repetitions;
double min_time;
size_t iterations;
int threads; // Number of concurrent threads to us
};
bool FindBenchmarksInternal(const std::string& re,
std::vector<Benchmark::Instance>* benchmarks,
std::ostream* Err);
bool IsZero(double n);
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false);
} // end namespace internal
} // end namespace benchmark
#endif // BENCHMARK_API_INTERNAL_H

View File

@ -0,0 +1,467 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include "benchmark_api_internal.h"
#include "internal_macros.h"
#ifndef BENCHMARK_OS_WINDOWS
#include <sys/resource.h>
#include <sys/time.h>
#include <unistd.h>
#endif
#include <algorithm>
#include <atomic>
#include <condition_variable>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <memory>
#include <sstream>
#include <thread>
#include "check.h"
#include "commandlineflags.h"
#include "complexity.h"
#include "log.h"
#include "mutex.h"
#include "re.h"
#include "stat.h"
#include "string_util.h"
#include "sysinfo.h"
#include "timers.h"
namespace benchmark {
namespace {
// For non-dense Range, intermediate values are powers of kRangeMultiplier.
static const int kRangeMultiplier = 8;
// The size of a benchmark family determines is the number of inputs to repeat
// the benchmark on. If this is "large" then warn the user during configuration.
static const size_t kMaxFamilySize = 100;
} // end namespace
namespace internal {
//=============================================================================//
// BenchmarkFamilies
//=============================================================================//
// Class for managing registered benchmarks. Note that each registered
// benchmark identifies a family of related benchmarks to run.
class BenchmarkFamilies {
public:
static BenchmarkFamilies* GetInstance();
// Registers a benchmark family and returns the index assigned to it.
size_t AddBenchmark(std::unique_ptr<Benchmark> family);
// Clear all registered benchmark families.
void ClearBenchmarks();
// Extract the list of benchmark instances that match the specified
// regular expression.
bool FindBenchmarks(const std::string& re,
std::vector<Benchmark::Instance>* benchmarks,
std::ostream* Err);
private:
BenchmarkFamilies() {}
std::vector<std::unique_ptr<Benchmark>> families_;
Mutex mutex_;
};
BenchmarkFamilies* BenchmarkFamilies::GetInstance() {
static BenchmarkFamilies instance;
return &instance;
}
size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr<Benchmark> family) {
MutexLock l(mutex_);
size_t index = families_.size();
families_.push_back(std::move(family));
return index;
}
void BenchmarkFamilies::ClearBenchmarks() {
MutexLock l(mutex_);
families_.clear();
families_.shrink_to_fit();
}
bool BenchmarkFamilies::FindBenchmarks(
const std::string& spec, std::vector<Benchmark::Instance>* benchmarks,
std::ostream* ErrStream) {
CHECK(ErrStream);
auto& Err = *ErrStream;
// Make regular expression out of command-line flag
std::string error_msg;
Regex re;
if (!re.Init(spec, &error_msg)) {
Err << "Could not compile benchmark re: " << error_msg << std::endl;
return false;
}
// Special list of thread counts to use when none are specified
const std::vector<int> one_thread = {1};
MutexLock l(mutex_);
for (std::unique_ptr<Benchmark>& family : families_) {
// Family was deleted or benchmark doesn't match
if (!family) continue;
if (family->ArgsCnt() == -1) {
family->Args({});
}
const std::vector<int>* thread_counts =
(family->thread_counts_.empty()
? &one_thread
: &static_cast<const std::vector<int>&>(family->thread_counts_));
const size_t family_size = family->args_.size() * thread_counts->size();
// The benchmark will be run at least 'family_size' different inputs.
// If 'family_size' is very large warn the user.
if (family_size > kMaxFamilySize) {
Err << "The number of inputs is very large. " << family->name_
<< " will be repeated at least " << family_size << " times.\n";
}
// reserve in the special case the regex ".", since we know the final
// family size.
if (spec == ".") benchmarks->reserve(family_size);
for (auto const& args : family->args_) {
for (int num_threads : *thread_counts) {
Benchmark::Instance instance;
instance.name = family->name_;
instance.benchmark = family.get();
instance.report_mode = family->report_mode_;
instance.arg = args;
instance.time_unit = family->time_unit_;
instance.range_multiplier = family->range_multiplier_;
instance.min_time = family->min_time_;
instance.iterations = family->iterations_;
instance.repetitions = family->repetitions_;
instance.use_real_time = family->use_real_time_;
instance.use_manual_time = family->use_manual_time_;
instance.complexity = family->complexity_;
instance.complexity_lambda = family->complexity_lambda_;
instance.threads = num_threads;
// Add arguments to instance name
size_t arg_i = 0;
for (auto const& arg : args) {
instance.name += "/";
if (arg_i < family->arg_names_.size()) {
const auto& arg_name = family->arg_names_[arg_i];
if (!arg_name.empty()) {
instance.name +=
StringPrintF("%s:", family->arg_names_[arg_i].c_str());
}
}
instance.name += StringPrintF("%d", arg);
++arg_i;
}
if (!IsZero(family->min_time_))
instance.name += StringPrintF("/min_time:%0.3f", family->min_time_);
if (family->iterations_ != 0)
instance.name += StringPrintF("/iterations:%d", family->iterations_);
if (family->repetitions_ != 0)
instance.name += StringPrintF("/repeats:%d", family->repetitions_);
if (family->use_manual_time_) {
instance.name += "/manual_time";
} else if (family->use_real_time_) {
instance.name += "/real_time";
}
// Add the number of threads used to the name
if (!family->thread_counts_.empty()) {
instance.name += StringPrintF("/threads:%d", instance.threads);
}
if (re.Match(instance.name)) {
instance.last_benchmark_instance = (&args == &family->args_.back());
benchmarks->push_back(std::move(instance));
}
}
}
}
return true;
}
Benchmark* RegisterBenchmarkInternal(Benchmark* bench) {
std::unique_ptr<Benchmark> bench_ptr(bench);
BenchmarkFamilies* families = BenchmarkFamilies::GetInstance();
families->AddBenchmark(std::move(bench_ptr));
return bench;
}
// FIXME: This function is a hack so that benchmark.cc can access
// `BenchmarkFamilies`
bool FindBenchmarksInternal(const std::string& re,
std::vector<Benchmark::Instance>* benchmarks,
std::ostream* Err) {
return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err);
}
//=============================================================================//
// Benchmark
//=============================================================================//
Benchmark::Benchmark(const char* name)
: name_(name),
report_mode_(RM_Unspecified),
time_unit_(kNanosecond),
range_multiplier_(kRangeMultiplier),
min_time_(0),
iterations_(0),
repetitions_(0),
use_real_time_(false),
use_manual_time_(false),
complexity_(oNone),
complexity_lambda_(nullptr) {}
Benchmark::~Benchmark() {}
void Benchmark::AddRange(std::vector<int>* dst, int lo, int hi, int mult) {
CHECK_GE(lo, 0);
CHECK_GE(hi, lo);
CHECK_GE(mult, 2);
// Add "lo"
dst->push_back(lo);
static const int kint32max = std::numeric_limits<int32_t>::max();
// Now space out the benchmarks in multiples of "mult"
for (int32_t i = 1; i < kint32max / mult; i *= mult) {
if (i >= hi) break;
if (i > lo) {
dst->push_back(i);
}
}
// Add "hi" (if different from "lo")
if (hi != lo) {
dst->push_back(hi);
}
}
Benchmark* Benchmark::Arg(int x) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
args_.push_back({x});
return this;
}
Benchmark* Benchmark::Unit(TimeUnit unit) {
time_unit_ = unit;
return this;
}
Benchmark* Benchmark::Range(int start, int limit) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
std::vector<int> arglist;
AddRange(&arglist, start, limit, range_multiplier_);
for (int i : arglist) {
args_.push_back({i});
}
return this;
}
Benchmark* Benchmark::Ranges(const std::vector<std::pair<int, int>>& ranges) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
std::vector<std::vector<int>> arglists(ranges.size());
std::size_t total = 1;
for (std::size_t i = 0; i < ranges.size(); i++) {
AddRange(&arglists[i], ranges[i].first, ranges[i].second,
range_multiplier_);
total *= arglists[i].size();
}
std::vector<std::size_t> ctr(arglists.size(), 0);
for (std::size_t i = 0; i < total; i++) {
std::vector<int> tmp;
tmp.reserve(arglists.size());
for (std::size_t j = 0; j < arglists.size(); j++) {
tmp.push_back(arglists[j].at(ctr[j]));
}
args_.push_back(std::move(tmp));
for (std::size_t j = 0; j < arglists.size(); j++) {
if (ctr[j] + 1 < arglists[j].size()) {
++ctr[j];
break;
}
ctr[j] = 0;
}
}
return this;
}
Benchmark* Benchmark::ArgName(const std::string& name) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
arg_names_ = {name};
return this;
}
Benchmark* Benchmark::ArgNames(const std::vector<std::string>& names) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(names.size()));
arg_names_ = names;
return this;
}
Benchmark* Benchmark::DenseRange(int start, int limit, int step) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
CHECK_GE(start, 0);
CHECK_LE(start, limit);
for (int arg = start; arg <= limit; arg += step) {
args_.push_back({arg});
}
return this;
}
Benchmark* Benchmark::Args(const std::vector<int>& args) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
args_.push_back(args);
return this;
}
Benchmark* Benchmark::Apply(void (*custom_arguments)(Benchmark* benchmark)) {
custom_arguments(this);
return this;
}
Benchmark* Benchmark::RangeMultiplier(int multiplier) {
CHECK(multiplier > 1);
range_multiplier_ = multiplier;
return this;
}
Benchmark* Benchmark::MinTime(double t) {
CHECK(t > 0.0);
CHECK(iterations_ == 0);
min_time_ = t;
return this;
}
Benchmark* Benchmark::Iterations(size_t n) {
CHECK(n > 0);
CHECK(IsZero(min_time_));
iterations_ = n;
return this;
}
Benchmark* Benchmark::Repetitions(int n) {
CHECK(n > 0);
repetitions_ = n;
return this;
}
Benchmark* Benchmark::ReportAggregatesOnly(bool value) {
report_mode_ = value ? RM_ReportAggregatesOnly : RM_Default;
return this;
}
Benchmark* Benchmark::UseRealTime() {
CHECK(!use_manual_time_)
<< "Cannot set UseRealTime and UseManualTime simultaneously.";
use_real_time_ = true;
return this;
}
Benchmark* Benchmark::UseManualTime() {
CHECK(!use_real_time_)
<< "Cannot set UseRealTime and UseManualTime simultaneously.";
use_manual_time_ = true;
return this;
}
Benchmark* Benchmark::Complexity(BigO complexity) {
complexity_ = complexity;
return this;
}
Benchmark* Benchmark::Complexity(BigOFunc* complexity) {
complexity_lambda_ = complexity;
complexity_ = oLambda;
return this;
}
Benchmark* Benchmark::Threads(int t) {
CHECK_GT(t, 0);
thread_counts_.push_back(t);
return this;
}
Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) {
CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads);
AddRange(&thread_counts_, min_threads, max_threads, 2);
return this;
}
Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads,
int stride) {
CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads);
CHECK_GE(stride, 1);
for (auto i = min_threads; i < max_threads; i += stride) {
thread_counts_.push_back(i);
}
thread_counts_.push_back(max_threads);
return this;
}
Benchmark* Benchmark::ThreadPerCpu() {
static int num_cpus = NumCPUs();
thread_counts_.push_back(num_cpus);
return this;
}
void Benchmark::SetName(const char* name) { name_ = name; }
int Benchmark::ArgsCnt() const {
if (args_.empty()) {
if (arg_names_.empty()) return -1;
return static_cast<int>(arg_names_.size());
}
return static_cast<int>(args_.front().size());
}
//=============================================================================//
// FunctionBenchmark
//=============================================================================//
void FunctionBenchmark::Run(State& st) { func_(st); }
} // end namespace internal
void ClearRegisteredBenchmarks() {
internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks();
}
} // end namespace benchmark

79
vendor/github.com/google/benchmark/src/check.h generated vendored Normal file
View File

@ -0,0 +1,79 @@
#ifndef CHECK_H_
#define CHECK_H_
#include <cstdlib>
#include <ostream>
#include <cmath>
#include "internal_macros.h"
#include "log.h"
namespace benchmark {
namespace internal {
typedef void(AbortHandlerT)();
inline AbortHandlerT*& GetAbortHandler() {
static AbortHandlerT* handler = &std::abort;
return handler;
}
BENCHMARK_NORETURN inline void CallAbortHandler() {
GetAbortHandler()();
std::abort(); // fallback to enforce noreturn
}
// CheckHandler is the class constructed by failing CHECK macros. CheckHandler
// will log information about the failures and abort when it is destructed.
class CheckHandler {
public:
CheckHandler(const char* check, const char* file, const char* func, int line)
: log_(GetErrorLogInstance()) {
log_ << file << ":" << line << ": " << func << ": Check `" << check
<< "' failed. ";
}
LogType& GetLog() { return log_; }
BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) {
log_ << std::endl;
CallAbortHandler();
}
CheckHandler& operator=(const CheckHandler&) = delete;
CheckHandler(const CheckHandler&) = delete;
CheckHandler() = delete;
private:
LogType& log_;
};
} // end namespace internal
} // end namespace benchmark
// The CHECK macro returns a std::ostream object that can have extra information
// written to it.
#ifndef NDEBUG
#define CHECK(b) \
(b ? ::benchmark::internal::GetNullLogInstance() \
: ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \
.GetLog())
#else
#define CHECK(b) ::benchmark::internal::GetNullLogInstance()
#endif
#define CHECK_EQ(a, b) CHECK((a) == (b))
#define CHECK_NE(a, b) CHECK((a) != (b))
#define CHECK_GE(a, b) CHECK((a) >= (b))
#define CHECK_LE(a, b) CHECK((a) <= (b))
#define CHECK_GT(a, b) CHECK((a) > (b))
#define CHECK_LT(a, b) CHECK((a) < (b))
#define CHECK_FLOAT_EQ(a, b, eps) CHECK(std::fabs((a) - (b)) < (eps))
#define CHECK_FLOAT_NE(a, b, eps) CHECK(std::fabs((a) - (b)) >= (eps))
#define CHECK_FLOAT_GE(a, b, eps) CHECK((a) - (b) > -(eps))
#define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps))
#define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps))
#define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps))
#endif // CHECK_H_

188
vendor/github.com/google/benchmark/src/colorprint.cc generated vendored Normal file
View File

@ -0,0 +1,188 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "colorprint.h"
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <memory>
#include <string>
#include "check.h"
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
#include <Windows.h>
#include <io.h>
#else
#include <unistd.h>
#endif // BENCHMARK_OS_WINDOWS
namespace benchmark {
namespace {
#ifdef BENCHMARK_OS_WINDOWS
typedef WORD PlatformColorCode;
#else
typedef const char* PlatformColorCode;
#endif
PlatformColorCode GetPlatformColorCode(LogColor color) {
#ifdef BENCHMARK_OS_WINDOWS
switch (color) {
case COLOR_RED:
return FOREGROUND_RED;
case COLOR_GREEN:
return FOREGROUND_GREEN;
case COLOR_YELLOW:
return FOREGROUND_RED | FOREGROUND_GREEN;
case COLOR_BLUE:
return FOREGROUND_BLUE;
case COLOR_MAGENTA:
return FOREGROUND_BLUE | FOREGROUND_RED;
case COLOR_CYAN:
return FOREGROUND_BLUE | FOREGROUND_GREEN;
case COLOR_WHITE: // fall through to default
default:
return 0;
}
#else
switch (color) {
case COLOR_RED:
return "1";
case COLOR_GREEN:
return "2";
case COLOR_YELLOW:
return "3";
case COLOR_BLUE:
return "4";
case COLOR_MAGENTA:
return "5";
case COLOR_CYAN:
return "6";
case COLOR_WHITE:
return "7";
default:
return nullptr;
};
#endif
}
} // end namespace
std::string FormatString(const char* msg, va_list args) {
// we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp;
va_copy(args_cp, args);
std::size_t size = 256;
char local_buff[256];
auto ret = vsnprintf(local_buff, size, msg, args_cp);
va_end(args_cp);
// currently there is no error handling for failure, so this is hack.
CHECK(ret >= 0);
if (ret == 0) // handle empty expansion
return {};
else if (static_cast<size_t>(ret) < size)
return local_buff;
else {
// we did not provide a long enough buffer on our first attempt.
size = (size_t)ret + 1; // + 1 for the null byte
std::unique_ptr<char[]> buff(new char[size]);
ret = vsnprintf(buff.get(), size, msg, args);
CHECK(ret > 0 && ((size_t)ret) < size);
return buff.get();
}
}
std::string FormatString(const char* msg, ...) {
va_list args;
va_start(args, msg);
auto tmp = FormatString(msg, args);
va_end(args);
return tmp;
}
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ColorPrintf(out, color, fmt, args);
va_end(args);
}
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
va_list args) {
#ifdef BENCHMARK_OS_WINDOWS
((void)out); // suppress unused warning
const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
// Gets the current text color.
CONSOLE_SCREEN_BUFFER_INFO buffer_info;
GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
const WORD old_color_attrs = buffer_info.wAttributes;
// We need to flush the stream buffers into the console before each
// SetConsoleTextAttribute call lest it affect the text that is already
// printed but has not yet reached the console.
fflush(stdout);
SetConsoleTextAttribute(stdout_handle,
GetPlatformColorCode(color) | FOREGROUND_INTENSITY);
vprintf(fmt, args);
fflush(stdout);
// Restores the text color.
SetConsoleTextAttribute(stdout_handle, old_color_attrs);
#else
const char* color_code = GetPlatformColorCode(color);
if (color_code) out << FormatString("\033[0;3%sm", color_code);
out << FormatString(fmt, args) << "\033[m";
#endif
}
bool IsColorTerminal() {
#if BENCHMARK_OS_WINDOWS
// On Windows the TERM variable is usually not set, but the
// console there does support colors.
return 0 != _isatty(_fileno(stdout));
#else
// On non-Windows platforms, we rely on the TERM variable. This list of
// supported TERM values is copied from Google Test:
// <https://github.com/google/googletest/blob/master/googletest/src/gtest.cc#L2925>.
const char* const SUPPORTED_TERM_VALUES[] = {
"xterm", "xterm-color", "xterm-256color",
"screen", "screen-256color", "tmux",
"tmux-256color", "rxvt-unicode", "rxvt-unicode-256color",
"linux", "cygwin",
};
const char* const term = getenv("TERM");
bool term_supports_color = false;
for (const char* candidate : SUPPORTED_TERM_VALUES) {
if (term && 0 == strcmp(term, candidate)) {
term_supports_color = true;
break;
}
}
return 0 != isatty(fileno(stdout)) && term_supports_color;
#endif // BENCHMARK_OS_WINDOWS
}
} // end namespace benchmark

33
vendor/github.com/google/benchmark/src/colorprint.h generated vendored Normal file
View File

@ -0,0 +1,33 @@
#ifndef BENCHMARK_COLORPRINT_H_
#define BENCHMARK_COLORPRINT_H_
#include <cstdarg>
#include <iostream>
#include <string>
namespace benchmark {
enum LogColor {
COLOR_DEFAULT,
COLOR_RED,
COLOR_GREEN,
COLOR_YELLOW,
COLOR_BLUE,
COLOR_MAGENTA,
COLOR_CYAN,
COLOR_WHITE
};
std::string FormatString(const char* msg, va_list args);
std::string FormatString(const char* msg, ...);
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
va_list args);
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...);
// Returns true if stdout appears to be a terminal that supports colored
// output, false otherwise.
bool IsColorTerminal();
} // end namespace benchmark
#endif // BENCHMARK_COLORPRINT_H_

View File

@ -0,0 +1,218 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "commandlineflags.h"
#include <cctype>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <limits>
namespace benchmark {
// Parses 'str' for a 32-bit signed integer. If successful, writes
// the result to *value and returns true; otherwise leaves *value
// unchanged and returns false.
bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) {
// Parses the environment variable as a decimal integer.
char* end = nullptr;
const long long_value = strtol(str, &end, 10); // NOLINT
// Has strtol() consumed all characters in the string?
if (*end != '\0') {
// No - an invalid character was encountered.
std::cerr << src_text << " is expected to be a 32-bit integer, "
<< "but actually has value \"" << str << "\".\n";
return false;
}
// Is the parsed value in the range of an Int32?
const int32_t result = static_cast<int32_t>(long_value);
if (long_value == std::numeric_limits<long>::max() ||
long_value == std::numeric_limits<long>::min() ||
// The parsed value overflows as a long. (strtol() returns
// LONG_MAX or LONG_MIN when the input overflows.)
result != long_value
// The parsed value overflows as an Int32.
) {
std::cerr << src_text << " is expected to be a 32-bit integer, "
<< "but actually has value \"" << str << "\", "
<< "which overflows.\n";
return false;
}
*value = result;
return true;
}
// Parses 'str' for a double. If successful, writes the result to *value and
// returns true; otherwise leaves *value unchanged and returns false.
bool ParseDouble(const std::string& src_text, const char* str, double* value) {
// Parses the environment variable as a decimal integer.
char* end = nullptr;
const double double_value = strtod(str, &end); // NOLINT
// Has strtol() consumed all characters in the string?
if (*end != '\0') {
// No - an invalid character was encountered.
std::cerr << src_text << " is expected to be a double, "
<< "but actually has value \"" << str << "\".\n";
return false;
}
*value = double_value;
return true;
}
// Returns the name of the environment variable corresponding to the
// given flag. For example, FlagToEnvVar("foo") will return
// "BENCHMARK_FOO" in the open-source version.
static std::string FlagToEnvVar(const char* flag) {
const std::string flag_str(flag);
std::string env_var;
for (size_t i = 0; i != flag_str.length(); ++i)
env_var += static_cast<char>(::toupper(flag_str.c_str()[i]));
return "BENCHMARK_" + env_var;
}
// Reads and returns the Boolean environment variable corresponding to
// the given flag; if it's not set, returns default_value.
//
// The value is considered true iff it's not "0".
bool BoolFromEnv(const char* flag, bool default_value) {
const std::string env_var = FlagToEnvVar(flag);
const char* const string_value = getenv(env_var.c_str());
return string_value == nullptr ? default_value
: strcmp(string_value, "0") != 0;
}
// Reads and returns a 32-bit integer stored in the environment
// variable corresponding to the given flag; if it isn't set or
// doesn't represent a valid 32-bit integer, returns default_value.
int32_t Int32FromEnv(const char* flag, int32_t default_value) {
const std::string env_var = FlagToEnvVar(flag);
const char* const string_value = getenv(env_var.c_str());
if (string_value == nullptr) {
// The environment variable is not set.
return default_value;
}
int32_t result = default_value;
if (!ParseInt32(std::string("Environment variable ") + env_var, string_value,
&result)) {
std::cout << "The default value " << default_value << " is used.\n";
return default_value;
}
return result;
}
// Reads and returns the string environment variable corresponding to
// the given flag; if it's not set, returns default_value.
const char* StringFromEnv(const char* flag, const char* default_value) {
const std::string env_var = FlagToEnvVar(flag);
const char* const value = getenv(env_var.c_str());
return value == nullptr ? default_value : value;
}
// Parses a string as a command line flag. The string should have
// the format "--flag=value". When def_optional is true, the "=value"
// part can be omitted.
//
// Returns the value of the flag, or nullptr if the parsing failed.
const char* ParseFlagValue(const char* str, const char* flag,
bool def_optional) {
// str and flag must not be nullptr.
if (str == nullptr || flag == nullptr) return nullptr;
// The flag must start with "--".
const std::string flag_str = std::string("--") + std::string(flag);
const size_t flag_len = flag_str.length();
if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr;
// Skips the flag name.
const char* flag_end = str + flag_len;
// When def_optional is true, it's OK to not have a "=value" part.
if (def_optional && (flag_end[0] == '\0')) return flag_end;
// If def_optional is true and there are more characters after the
// flag name, or if def_optional is false, there must be a '=' after
// the flag name.
if (flag_end[0] != '=') return nullptr;
// Returns the string after "=".
return flag_end + 1;
}
bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, true);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
// Converts the string value to a bool.
*value = IsTruthyFlagValue(value_str);
return true;
}
bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
// Sets *value to the value of the flag.
return ParseInt32(std::string("The value of flag --") + flag, value_str,
value);
}
bool ParseDoubleFlag(const char* str, const char* flag, double* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
// Sets *value to the value of the flag.
return ParseDouble(std::string("The value of flag --") + flag, value_str,
value);
}
bool ParseStringFlag(const char* str, const char* flag, std::string* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
*value = value_str;
return true;
}
bool IsFlag(const char* str, const char* flag) {
return (ParseFlagValue(str, flag, true) != nullptr);
}
bool IsTruthyFlagValue(const std::string& value) {
if (value.empty()) return true;
char ch = value[0];
return isalnum(ch) &&
!(ch == '0' || ch == 'f' || ch == 'F' || ch == 'n' || ch == 'N');
}
} // end namespace benchmark

View File

@ -0,0 +1,79 @@
#ifndef BENCHMARK_COMMANDLINEFLAGS_H_
#define BENCHMARK_COMMANDLINEFLAGS_H_
#include <cstdint>
#include <string>
// Macro for referencing flags.
#define FLAG(name) FLAGS_##name
// Macros for declaring flags.
#define DECLARE_bool(name) extern bool FLAG(name)
#define DECLARE_int32(name) extern int32_t FLAG(name)
#define DECLARE_int64(name) extern int64_t FLAG(name)
#define DECLARE_double(name) extern double FLAG(name)
#define DECLARE_string(name) extern std::string FLAG(name)
// Macros for defining flags.
#define DEFINE_bool(name, default_val, doc) bool FLAG(name) = (default_val)
#define DEFINE_int32(name, default_val, doc) int32_t FLAG(name) = (default_val)
#define DEFINE_int64(name, default_val, doc) int64_t FLAG(name) = (default_val)
#define DEFINE_double(name, default_val, doc) double FLAG(name) = (default_val)
#define DEFINE_string(name, default_val, doc) \
std::string FLAG(name) = (default_val)
namespace benchmark {
// Parses 'str' for a 32-bit signed integer. If successful, writes the result
// to *value and returns true; otherwise leaves *value unchanged and returns
// false.
bool ParseInt32(const std::string& src_text, const char* str, int32_t* value);
// Parses a bool/Int32/string from the environment variable
// corresponding to the given Google Test flag.
bool BoolFromEnv(const char* flag, bool default_val);
int32_t Int32FromEnv(const char* flag, int32_t default_val);
double DoubleFromEnv(const char* flag, double default_val);
const char* StringFromEnv(const char* flag, const char* default_val);
// Parses a string for a bool flag, in the form of either
// "--flag=value" or "--flag".
//
// In the former case, the value is taken as true if it passes IsTruthyValue().
//
// In the latter case, the value is taken as true.
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseBoolFlag(const char* str, const char* flag, bool* value);
// Parses a string for an Int32 flag, in the form of
// "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseInt32Flag(const char* str, const char* flag, int32_t* value);
// Parses a string for a Double flag, in the form of
// "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseDoubleFlag(const char* str, const char* flag, double* value);
// Parses a string for a string flag, in the form of
// "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseStringFlag(const char* str, const char* flag, std::string* value);
// Returns true if the string matches the flag.
bool IsFlag(const char* str, const char* flag);
// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or
// some non-alphanumeric character. As a special case, also returns true if
// value is the empty string.
bool IsTruthyFlagValue(const std::string& value);
} // end namespace benchmark
#endif // BENCHMARK_COMMANDLINEFLAGS_H_

324
vendor/github.com/google/benchmark/src/complexity.cc generated vendored Normal file
View File

@ -0,0 +1,324 @@
// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Source project : https://github.com/ismaelJimenez/cpp.leastsq
// Adapted to be used with google benchmark
#include "benchmark/benchmark.h"
#include <algorithm>
#include <cmath>
#include "check.h"
#include "complexity.h"
#include "stat.h"
namespace benchmark {
// Internal function to calculate the different scalability forms
BigOFunc* FittingCurve(BigO complexity) {
switch (complexity) {
case oN:
return [](int n) -> double { return n; };
case oNSquared:
return [](int n) -> double { return std::pow(n, 2); };
case oNCubed:
return [](int n) -> double { return std::pow(n, 3); };
case oLogN:
return [](int n) { return log2(n); };
case oNLogN:
return [](int n) { return n * log2(n); };
case o1:
default:
return [](int) { return 1.0; };
}
}
// Function to return an string for the calculated complexity
std::string GetBigOString(BigO complexity) {
switch (complexity) {
case oN:
return "N";
case oNSquared:
return "N^2";
case oNCubed:
return "N^3";
case oLogN:
return "lgN";
case oNLogN:
return "NlgN";
case o1:
return "(1)";
default:
return "f(N)";
}
}
// Find the coefficient for the high-order term in the running time, by
// minimizing the sum of squares of relative error, for the fitting curve
// given by the lambda expresion.
// - n : Vector containing the size of the benchmark tests.
// - time : Vector containing the times for the benchmark tests.
// - fitting_curve : lambda expresion (e.g. [](int n) {return n; };).
// For a deeper explanation on the algorithm logic, look the README file at
// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
LeastSq MinimalLeastSq(const std::vector<int>& n,
const std::vector<double>& time,
BigOFunc* fitting_curve) {
double sigma_gn = 0.0;
double sigma_gn_squared = 0.0;
double sigma_time = 0.0;
double sigma_time_gn = 0.0;
// Calculate least square fitting parameter
for (size_t i = 0; i < n.size(); ++i) {
double gn_i = fitting_curve(n[i]);
sigma_gn += gn_i;
sigma_gn_squared += gn_i * gn_i;
sigma_time += time[i];
sigma_time_gn += time[i] * gn_i;
}
LeastSq result;
result.complexity = oLambda;
// Calculate complexity.
result.coef = sigma_time_gn / sigma_gn_squared;
// Calculate RMS
double rms = 0.0;
for (size_t i = 0; i < n.size(); ++i) {
double fit = result.coef * fitting_curve(n[i]);
rms += pow((time[i] - fit), 2);
}
// Normalized RMS by the mean of the observed values
double mean = sigma_time / n.size();
result.rms = sqrt(rms / n.size()) / mean;
return result;
}
// Find the coefficient for the high-order term in the running time, by
// minimizing the sum of squares of relative error.
// - n : Vector containing the size of the benchmark tests.
// - time : Vector containing the times for the benchmark tests.
// - complexity : If different than oAuto, the fitting curve will stick to
// this one. If it is oAuto, it will be calculated the best
// fitting curve.
LeastSq MinimalLeastSq(const std::vector<int>& n,
const std::vector<double>& time, const BigO complexity) {
CHECK_EQ(n.size(), time.size());
CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
// benchmark runs are given
CHECK_NE(complexity, oNone);
LeastSq best_fit;
if (complexity == oAuto) {
std::vector<BigO> fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed};
// Take o1 as default best fitting curve
best_fit = MinimalLeastSq(n, time, FittingCurve(o1));
best_fit.complexity = o1;
// Compute all possible fitting curves and stick to the best one
for (const auto& fit : fit_curves) {
LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit));
if (current_fit.rms < best_fit.rms) {
best_fit = current_fit;
best_fit.complexity = fit;
}
}
} else {
best_fit = MinimalLeastSq(n, time, FittingCurve(complexity));
best_fit.complexity = complexity;
}
return best_fit;
}
std::vector<BenchmarkReporter::Run> ComputeStats(
const std::vector<BenchmarkReporter::Run>& reports) {
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
auto error_count =
std::count_if(reports.begin(), reports.end(),
[](Run const& run) { return run.error_occurred; });
if (reports.size() - error_count < 2) {
// We don't report aggregated data if there was a single run.
return results;
}
// Accumulators.
Stat1_d real_accumulated_time_stat;
Stat1_d cpu_accumulated_time_stat;
Stat1_d bytes_per_second_stat;
Stat1_d items_per_second_stat;
// All repetitions should be run with the same number of iterations so we
// can take this information from the first benchmark.
int64_t const run_iterations = reports.front().iterations;
// create stats for user counters
struct CounterStat {
Counter c;
Stat1_d s;
};
std::map< std::string, CounterStat > counter_stats;
for(Run const& r : reports) {
for(auto const& cnt : r.counters) {
auto it = counter_stats.find(cnt.first);
if(it == counter_stats.end()) {
counter_stats.insert({cnt.first, {cnt.second, Stat1_d{}}});
} else {
CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
}
}
}
// Populate the accumulators.
for (Run const& run : reports) {
CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
CHECK_EQ(run_iterations, run.iterations);
if (run.error_occurred) continue;
real_accumulated_time_stat +=
Stat1_d(run.real_accumulated_time / run.iterations);
cpu_accumulated_time_stat +=
Stat1_d(run.cpu_accumulated_time / run.iterations);
items_per_second_stat += Stat1_d(run.items_per_second);
bytes_per_second_stat += Stat1_d(run.bytes_per_second);
// user counters
for(auto const& cnt : run.counters) {
auto it = counter_stats.find(cnt.first);
CHECK_NE(it, counter_stats.end());
it->second.s += Stat1_d(cnt.second);
}
}
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run mean_data;
mean_data.benchmark_name = reports[0].benchmark_name + "_mean";
mean_data.iterations = run_iterations;
mean_data.real_accumulated_time =
real_accumulated_time_stat.Mean() * run_iterations;
mean_data.cpu_accumulated_time =
cpu_accumulated_time_stat.Mean() * run_iterations;
mean_data.bytes_per_second = bytes_per_second_stat.Mean();
mean_data.items_per_second = items_per_second_stat.Mean();
mean_data.time_unit = reports[0].time_unit;
// user counters
for(auto const& kv : counter_stats) {
auto c = Counter(kv.second.s.Mean(), counter_stats[kv.first].c.flags);
mean_data.counters[kv.first] = c;
}
// Only add label to mean/stddev if it is same for all runs
mean_data.report_label = reports[0].report_label;
for (std::size_t i = 1; i < reports.size(); i++) {
if (reports[i].report_label != reports[0].report_label) {
mean_data.report_label = "";
break;
}
}
Run stddev_data;
stddev_data.benchmark_name = reports[0].benchmark_name + "_stddev";
stddev_data.report_label = mean_data.report_label;
stddev_data.iterations = 0;
stddev_data.real_accumulated_time = real_accumulated_time_stat.StdDev();
stddev_data.cpu_accumulated_time = cpu_accumulated_time_stat.StdDev();
stddev_data.bytes_per_second = bytes_per_second_stat.StdDev();
stddev_data.items_per_second = items_per_second_stat.StdDev();
stddev_data.time_unit = reports[0].time_unit;
// user counters
for(auto const& kv : counter_stats) {
auto c = Counter(kv.second.s.StdDev(), counter_stats[kv.first].c.flags);
stddev_data.counters[kv.first] = c;
}
results.push_back(mean_data);
results.push_back(stddev_data);
return results;
}
std::vector<BenchmarkReporter::Run> ComputeBigO(
const std::vector<BenchmarkReporter::Run>& reports) {
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
if (reports.size() < 2) return results;
// Accumulators.
std::vector<int> n;
std::vector<double> real_time;
std::vector<double> cpu_time;
// Populate the accumulators.
for (const Run& run : reports) {
CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?";
n.push_back(run.complexity_n);
real_time.push_back(run.real_accumulated_time / run.iterations);
cpu_time.push_back(run.cpu_accumulated_time / run.iterations);
}
LeastSq result_cpu;
LeastSq result_real;
if (reports[0].complexity == oLambda) {
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda);
result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda);
} else {
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
}
std::string benchmark_name =
reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/'));
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run big_o;
big_o.benchmark_name = benchmark_name + "_BigO";
big_o.iterations = 0;
big_o.real_accumulated_time = result_real.coef;
big_o.cpu_accumulated_time = result_cpu.coef;
big_o.report_big_o = true;
big_o.complexity = result_cpu.complexity;
// All the time results are reported after being multiplied by the
// time unit multiplier. But since RMS is a relative quantity it
// should not be multiplied at all. So, here, we _divide_ it by the
// multiplier so that when it is multiplied later the result is the
// correct one.
double multiplier = GetTimeUnitMultiplier(reports[0].time_unit);
// Only add label to mean/stddev if it is same for all runs
Run rms;
big_o.report_label = reports[0].report_label;
rms.benchmark_name = benchmark_name + "_RMS";
rms.report_label = big_o.report_label;
rms.iterations = 0;
rms.real_accumulated_time = result_real.rms / multiplier;
rms.cpu_accumulated_time = result_cpu.rms / multiplier;
rms.report_rms = true;
rms.complexity = result_cpu.complexity;
// don't forget to keep the time unit, or we won't be able to
// recover the correct value.
rms.time_unit = reports[0].time_unit;
results.push_back(big_o);
results.push_back(rms);
return results;
}
} // end namespace benchmark

60
vendor/github.com/google/benchmark/src/complexity.h generated vendored Normal file
View File

@ -0,0 +1,60 @@
// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Source project : https://github.com/ismaelJimenez/cpp.leastsq
// Adapted to be used with google benchmark
#ifndef COMPLEXITY_H_
#define COMPLEXITY_H_
#include <string>
#include <vector>
#include "benchmark/benchmark.h"
namespace benchmark {
// Return a vector containing the mean and standard devation information for
// the specified list of reports. If 'reports' contains less than two
// non-errored runs an empty vector is returned
std::vector<BenchmarkReporter::Run> ComputeStats(
const std::vector<BenchmarkReporter::Run>& reports);
// Return a vector containing the bigO and RMS information for the specified
// list of reports. If 'reports.size() < 2' an empty vector is returned.
std::vector<BenchmarkReporter::Run> ComputeBigO(
const std::vector<BenchmarkReporter::Run>& reports);
// This data structure will contain the result returned by MinimalLeastSq
// - coef : Estimated coeficient for the high-order term as
// interpolated from data.
// - rms : Normalized Root Mean Squared Error.
// - complexity : Scalability form (e.g. oN, oNLogN). In case a scalability
// form has been provided to MinimalLeastSq this will return
// the same value. In case BigO::oAuto has been selected, this
// parameter will return the best fitting curve detected.
struct LeastSq {
LeastSq() : coef(0.0), rms(0.0), complexity(oNone) {}
double coef;
double rms;
BigO complexity;
};
// Function to return an string for the calculated complexity
std::string GetBigOString(BigO complexity);
} // end namespace benchmark
#endif // COMPLEXITY_H_

View File

@ -0,0 +1,180 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include "complexity.h"
#include "counter.h"
#include <algorithm>
#include <cstdint>
#include <cstdio>
#include <iostream>
#include <string>
#include <tuple>
#include <vector>
#include "check.h"
#include "colorprint.h"
#include "commandlineflags.h"
#include "internal_macros.h"
#include "string_util.h"
#include "timers.h"
namespace benchmark {
bool ConsoleReporter::ReportContext(const Context& context) {
name_field_width_ = context.name_field_width;
printed_header_ = false;
prev_counters_.clear();
PrintBasicContext(&GetErrorStream(), context);
#ifdef BENCHMARK_OS_WINDOWS
if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) {
GetErrorStream()
<< "Color printing is only supported for stdout on windows."
" Disabling color printing\n";
output_options_ = static_cast< OutputOptions >(output_options_ & ~OO_Color);
}
#endif
return true;
}
void ConsoleReporter::PrintHeader(const Run& run) {
std::string str = FormatString("%-*s %13s %13s %10s", static_cast<int>(name_field_width_),
"Benchmark", "Time", "CPU", "Iterations");
if(!run.counters.empty()) {
if(output_options_ & OO_Tabular) {
for(auto const& c : run.counters) {
str += FormatString(" %10s", c.first.c_str());
}
} else {
str += " UserCounters...";
}
}
str += "\n";
std::string line = std::string(str.length(), '-');
GetOutputStream() << line << "\n" << str << line << "\n";
}
void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
for (const auto& run : reports) {
// print the header:
// --- if none was printed yet
bool print_header = !printed_header_;
// --- or if the format is tabular and this run
// has different fields from the prev header
print_header |= (output_options_ & OO_Tabular) &&
(!internal::SameNames(run.counters, prev_counters_));
if (print_header) {
printed_header_ = true;
prev_counters_ = run.counters;
PrintHeader(run);
}
// As an alternative to printing the headers like this, we could sort
// the benchmarks by header and then print. But this would require
// waiting for the full results before printing, or printing twice.
PrintRunData(run);
}
}
static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt,
...) {
va_list args;
va_start(args, fmt);
out << FormatString(fmt, args);
va_end(args);
}
void ConsoleReporter::PrintRunData(const Run& result) {
typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...);
auto& Out = GetOutputStream();
PrinterFn* printer = (output_options_ & OO_Color) ?
(PrinterFn*)ColorPrintf : IgnoreColorPrint;
auto name_color =
(result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
printer(Out, name_color, "%-*s ", name_field_width_,
result.benchmark_name.c_str());
if (result.error_occurred) {
printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'",
result.error_message.c_str());
printer(Out, COLOR_DEFAULT, "\n");
return;
}
// Format bytes per second
std::string rate;
if (result.bytes_per_second > 0) {
rate = StrCat(" ", HumanReadableNumber(result.bytes_per_second), "B/s");
}
// Format items per second
std::string items;
if (result.items_per_second > 0) {
items =
StrCat(" ", HumanReadableNumber(result.items_per_second), " items/s");
}
const double real_time = result.GetAdjustedRealTime();
const double cpu_time = result.GetAdjustedCPUTime();
if (result.report_big_o) {
std::string big_o = GetBigOString(result.complexity);
printer(Out, COLOR_YELLOW, "%10.2f %s %10.2f %s ", real_time, big_o.c_str(),
cpu_time, big_o.c_str());
} else if (result.report_rms) {
printer(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ", real_time * 100,
cpu_time * 100);
} else {
const char* timeLabel = GetTimeUnitString(result.time_unit);
printer(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ", real_time, timeLabel,
cpu_time, timeLabel);
}
if (!result.report_big_o && !result.report_rms) {
printer(Out, COLOR_CYAN, "%10lld", result.iterations);
}
for (auto& c : result.counters) {
auto const& s = HumanReadableNumber(c.second.value);
if (output_options_ & OO_Tabular) {
if (c.second.flags & Counter::kIsRate) {
printer(Out, COLOR_DEFAULT, " %8s/s", s.c_str());
} else {
printer(Out, COLOR_DEFAULT, " %10s", s.c_str());
}
} else {
const char* unit = (c.second.flags & Counter::kIsRate) ? "/s" : "";
printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(),
unit);
}
}
if (!rate.empty()) {
printer(Out, COLOR_DEFAULT, " %*s", 13, rate.c_str());
}
if (!items.empty()) {
printer(Out, COLOR_DEFAULT, " %*s", 18, items.c_str());
}
if (!result.report_label.empty()) {
printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
}
printer(Out, COLOR_DEFAULT, "\n");
}
} // end namespace benchmark

68
vendor/github.com/google/benchmark/src/counter.cc generated vendored Normal file
View File

@ -0,0 +1,68 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "counter.h"
namespace benchmark {
namespace internal {
double Finish(Counter const& c, double cpu_time, double num_threads) {
double v = c.value;
if (c.flags & Counter::kIsRate) {
v /= cpu_time;
}
if (c.flags & Counter::kAvgThreads) {
v /= num_threads;
}
return v;
}
void Finish(UserCounters *l, double cpu_time, double num_threads) {
for (auto &c : *l) {
c.second.value = Finish(c.second, cpu_time, num_threads);
}
}
void Increment(UserCounters *l, UserCounters const& r) {
// add counters present in both or just in *l
for (auto &c : *l) {
auto it = r.find(c.first);
if (it != r.end()) {
c.second.value = c.second + it->second;
}
}
// add counters present in r, but not in *l
for (auto const &tc : r) {
auto it = l->find(tc.first);
if (it == l->end()) {
(*l)[tc.first] = tc.second;
}
}
}
bool SameNames(UserCounters const& l, UserCounters const& r) {
if (&l == &r) return true;
if (l.size() != r.size()) {
return false;
}
for (auto const& c : l) {
if (r.find(c.first) == r.end()) {
return false;
}
}
return true;
}
} // end namespace internal
} // end namespace benchmark

26
vendor/github.com/google/benchmark/src/counter.h generated vendored Normal file
View File

@ -0,0 +1,26 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
namespace benchmark {
// these counter-related functions are hidden to reduce API surface.
namespace internal {
void Finish(UserCounters *l, double time, double num_threads);
void Increment(UserCounters *l, UserCounters const& r);
bool SameNames(UserCounters const& l, UserCounters const& r);
} // end namespace internal
} //end namespace benchmark

149
vendor/github.com/google/benchmark/src/csv_reporter.cc generated vendored Normal file
View File

@ -0,0 +1,149 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include "complexity.h"
#include <algorithm>
#include <cstdint>
#include <iostream>
#include <string>
#include <tuple>
#include <vector>
#include "string_util.h"
#include "timers.h"
#include "check.h"
// File format reference: http://edoceo.com/utilitas/csv-file-format.
namespace benchmark {
namespace {
std::vector<std::string> elements = {
"name", "iterations", "real_time", "cpu_time",
"time_unit", "bytes_per_second", "items_per_second", "label",
"error_occurred", "error_message"};
} // namespace
bool CSVReporter::ReportContext(const Context& context) {
PrintBasicContext(&GetErrorStream(), context);
return true;
}
void CSVReporter::ReportRuns(const std::vector<Run> & reports) {
std::ostream& Out = GetOutputStream();
if (!printed_header_) {
// save the names of all the user counters
for (const auto& run : reports) {
for (const auto& cnt : run.counters) {
user_counter_names_.insert(cnt.first);
}
}
// print the header
for (auto B = elements.begin(); B != elements.end();) {
Out << *B++;
if (B != elements.end()) Out << ",";
}
for (auto B = user_counter_names_.begin(); B != user_counter_names_.end();) {
Out << ",\"" << *B++ << "\"";
}
Out << "\n";
printed_header_ = true;
} else {
// check that all the current counters are saved in the name set
for (const auto& run : reports) {
for (const auto& cnt : run.counters) {
CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end())
<< "All counters must be present in each run. "
<< "Counter named \"" << cnt.first
<< "\" was not in a run after being added to the header";
}
}
}
// print results for each run
for (const auto& run : reports) {
PrintRunData(run);
}
}
void CSVReporter::PrintRunData(const Run & run) {
std::ostream& Out = GetOutputStream();
// Field with embedded double-quote characters must be doubled and the field
// delimited with double-quotes.
std::string name = run.benchmark_name;
ReplaceAll(&name, "\"", "\"\"");
Out << '"' << name << "\",";
if (run.error_occurred) {
Out << std::string(elements.size() - 3, ',');
Out << "true,";
std::string msg = run.error_message;
ReplaceAll(&msg, "\"", "\"\"");
Out << '"' << msg << "\"\n";
return;
}
// Do not print iteration on bigO and RMS report
if (!run.report_big_o && !run.report_rms) {
Out << run.iterations;
}
Out << ",";
Out << run.GetAdjustedRealTime() << ",";
Out << run.GetAdjustedCPUTime() << ",";
// Do not print timeLabel on bigO and RMS report
if (run.report_big_o) {
Out << GetBigOString(run.complexity);
} else if (!run.report_rms) {
Out << GetTimeUnitString(run.time_unit);
}
Out << ",";
if (run.bytes_per_second > 0.0) {
Out << run.bytes_per_second;
}
Out << ",";
if (run.items_per_second > 0.0) {
Out << run.items_per_second;
}
Out << ",";
if (!run.report_label.empty()) {
// Field with embedded double-quote characters must be doubled and the field
// delimited with double-quotes.
std::string label = run.report_label;
ReplaceAll(&label, "\"", "\"\"");
Out << "\"" << label << "\"";
}
Out << ",,"; // for error_occurred and error_message
// Print user counters
for (const auto &ucn : user_counter_names_) {
auto it = run.counters.find(ucn);
if(it == run.counters.end()) {
Out << ",";
} else {
Out << "," << it->second;
}
}
Out << '\n';
}
} // end namespace benchmark

172
vendor/github.com/google/benchmark/src/cycleclock.h generated vendored Normal file
View File

@ -0,0 +1,172 @@
// ----------------------------------------------------------------------
// CycleClock
// A CycleClock tells you the current time in Cycles. The "time"
// is actually time since power-on. This is like time() but doesn't
// involve a system call and is much more precise.
//
// NOTE: Not all cpu/platform/kernel combinations guarantee that this
// clock increments at a constant rate or is synchronized across all logical
// cpus in a system.
//
// If you need the above guarantees, please consider using a different
// API. There are efforts to provide an interface which provides a millisecond
// granularity and implemented as a memory read. A memory read is generally
// cheaper than the CycleClock for many architectures.
//
// Also, in some out of order CPU implementations, the CycleClock is not
// serializing. So if you're trying to count at cycles granularity, your
// data might be inaccurate due to out of order instruction execution.
// ----------------------------------------------------------------------
#ifndef BENCHMARK_CYCLECLOCK_H_
#define BENCHMARK_CYCLECLOCK_H_
#include <cstdint>
#include "benchmark/benchmark.h"
#include "internal_macros.h"
#if defined(BENCHMARK_OS_MACOSX)
#include <mach/mach_time.h>
#endif
// For MSVC, we want to use '_asm rdtsc' when possible (since it works
// with even ancient MSVC compilers), and when not possible the
// __rdtsc intrinsic, declared in <intrin.h>. Unfortunately, in some
// environments, <windows.h> and <intrin.h> have conflicting
// declarations of some other intrinsics, breaking compilation.
// Therefore, we simply declare __rdtsc ourselves. See also
// http://connect.microsoft.com/VisualStudio/feedback/details/262047
#if defined(COMPILER_MSVC) && !defined(_M_IX86)
extern "C" uint64_t __rdtsc();
#pragma intrinsic(__rdtsc)
#endif
#ifndef BENCHMARK_OS_WINDOWS
#include <sys/time.h>
#include <time.h>
#endif
#ifdef BENCHMARK_OS_EMSCRIPTEN
#include <emscripten.h>
#endif
namespace benchmark {
// NOTE: only i386 and x86_64 have been well tested.
// PPC, sparc, alpha, and ia64 are based on
// http://peter.kuscsik.com/wordpress/?p=14
// with modifications by m3b. See also
// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
namespace cycleclock {
// This should return the number of cycles since power-on. Thread-safe.
inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
#if defined(BENCHMARK_OS_MACOSX)
// this goes at the top because we need ALL Macs, regardless of
// architecture, to return the number of "mach time units" that
// have passed since startup. See sysinfo.cc where
// InitializeSystemInfo() sets the supposed cpu clock frequency of
// macs to the number of mach time units per second, not actual
// CPU clock frequency (which can change in the face of CPU
// frequency scaling). Also note that when the Mac sleeps, this
// counter pauses; it does not continue counting, nor does it
// reset to zero.
return mach_absolute_time();
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
// this goes above x86-specific code because old versions of Emscripten
// define __x86_64__, although they have nothing to do with it.
return static_cast<int64_t>(emscripten_get_now() * 1e+6);
#elif defined(__i386__)
int64_t ret;
__asm__ volatile("rdtsc" : "=A"(ret));
return ret;
#elif defined(__x86_64__) || defined(__amd64__)
uint64_t low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
return (high << 32) | low;
#elif defined(__powerpc__) || defined(__ppc__)
// This returns a time-base, which is not always precisely a cycle-count.
int64_t tbl, tbu0, tbu1;
asm("mftbu %0" : "=r"(tbu0));
asm("mftb %0" : "=r"(tbl));
asm("mftbu %0" : "=r"(tbu1));
tbl &= -static_cast<int64_t>(tbu0 == tbu1);
// high 32 bits in tbu1; low 32 bits in tbl (tbu0 is garbage)
return (tbu1 << 32) | tbl;
#elif defined(__sparc__)
int64_t tick;
asm(".byte 0x83, 0x41, 0x00, 0x00");
asm("mov %%g1, %0" : "=r"(tick));
return tick;
#elif defined(__ia64__)
int64_t itc;
asm("mov %0 = ar.itc" : "=r"(itc));
return itc;
#elif defined(COMPILER_MSVC) && defined(_M_IX86)
// Older MSVC compilers (like 7.x) don't seem to support the
// __rdtsc intrinsic properly, so I prefer to use _asm instead
// when I know it will work. Otherwise, I'll use __rdtsc and hope
// the code is being compiled with a non-ancient compiler.
_asm rdtsc
#elif defined(COMPILER_MSVC)
return __rdtsc();
#elif defined(BENCHMARK_OS_NACL)
// Native Client validator on x86/x86-64 allows RDTSC instructions,
// and this case is handled above. Native Client validator on ARM
// rejects MRC instructions (used in the ARM-specific sequence below),
// so we handle it here. Portable Native Client compiles to
// architecture-agnostic bytecode, which doesn't provide any
// cycle counter access mnemonics.
// Native Client does not provide any API to access cycle counter.
// Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday
// because is provides nanosecond resolution (which is noticable at
// least for PNaCl modules running on x86 Mac & Linux).
// Initialize to always return 0 if clock_gettime fails.
struct timespec ts = { 0, 0 };
clock_gettime(CLOCK_MONOTONIC, &ts);
return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
#elif defined(__aarch64__)
// System timer of ARMv8 runs at a different frequency than the CPU's.
// The frequency is fixed, typically in the range 1-50MHz. It can be
// read at CNTFRQ special register. We assume the OS has set up
// the virtual timer properly.
int64_t virtual_timer_value;
asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
return virtual_timer_value;
#elif defined(__ARM_ARCH)
// V6 is the earliest arch that has a standard cyclecount
// Native Client validator doesn't allow MRC instructions.
#if (__ARM_ARCH >= 6)
uint32_t pmccntr;
uint32_t pmuseren;
uint32_t pmcntenset;
// Read the user mode perf monitor counter access permissions.
asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
if (pmuseren & 1) { // Allows reading perfmon counters for user mode code.
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
if (pmcntenset & 0x80000000ul) { // Is it counting?
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
// The counter is set up to count every 64th cycle
return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6
}
}
#endif
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#elif defined(__mips__)
// mips apparently only allows rdtsc for superusers, so we fall
// back to gettimeofday. It's possible clock_gettime would be better.
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#else
// The soft failover to a generic implementation is automatic only for ARM.
// For other platforms the developer is expected to make an attempt to create
// a fast implementation and use generic version if nothing better is available.
#error You need to define CycleTimer for your OS and CPU
#endif
}
} // end namespace cycleclock
} // end namespace benchmark
#endif // BENCHMARK_CYCLECLOCK_H_

View File

@ -0,0 +1,57 @@
#ifndef BENCHMARK_INTERNAL_MACROS_H_
#define BENCHMARK_INTERNAL_MACROS_H_
#include "benchmark/benchmark.h"
#ifndef __has_feature
#define __has_feature(x) 0
#endif
#if defined(__clang__)
#define COMPILER_CLANG
#elif defined(_MSC_VER)
#define COMPILER_MSVC
#elif defined(__GNUC__)
#define COMPILER_GCC
#endif
#if __has_feature(cxx_attributes)
#define BENCHMARK_NORETURN [[noreturn]]
#elif defined(__GNUC__)
#define BENCHMARK_NORETURN __attribute__((noreturn))
#elif defined(COMPILER_MSVC)
#define BENCHMARK_NORETURN __declspec(noreturn)
#else
#define BENCHMARK_NORETURN
#endif
#if defined(__CYGWIN__)
#define BENCHMARK_OS_CYGWIN 1
#elif defined(_WIN32)
#define BENCHMARK_OS_WINDOWS 1
#elif defined(__APPLE__)
#include "TargetConditionals.h"
#if defined(TARGET_OS_MAC)
#define BENCHMARK_OS_MACOSX 1
#if defined(TARGET_OS_IPHONE)
#define BENCHMARK_OS_IOS 1
#endif
#endif
#elif defined(__FreeBSD__)
#define BENCHMARK_OS_FREEBSD 1
#elif defined(__linux__)
#define BENCHMARK_OS_LINUX 1
#elif defined(__native_client__)
#define BENCHMARK_OS_NACL 1
#elif defined(EMSCRIPTEN)
#define BENCHMARK_OS_EMSCRIPTEN 1
#elif defined(__rtems__)
#define BENCHMARK_OS_RTEMS 1
#endif
#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \
&& !defined(__EXCEPTIONS)
#define BENCHMARK_HAS_NO_EXCEPTIONS
#endif
#endif // BENCHMARK_INTERNAL_MACROS_H_

168
vendor/github.com/google/benchmark/src/json_reporter.cc generated vendored Normal file
View File

@ -0,0 +1,168 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include "complexity.h"
#include <algorithm>
#include <cstdint>
#include <iostream>
#include <string>
#include <tuple>
#include <vector>
#include "string_util.h"
#include "timers.h"
namespace benchmark {
namespace {
std::string FormatKV(std::string const& key, std::string const& value) {
return StringPrintF("\"%s\": \"%s\"", key.c_str(), value.c_str());
}
std::string FormatKV(std::string const& key, const char* value) {
return StringPrintF("\"%s\": \"%s\"", key.c_str(), value);
}
std::string FormatKV(std::string const& key, bool value) {
return StringPrintF("\"%s\": %s", key.c_str(), value ? "true" : "false");
}
std::string FormatKV(std::string const& key, int64_t value) {
std::stringstream ss;
ss << '"' << key << "\": " << value;
return ss.str();
}
std::string FormatKV(std::string const& key, double value) {
return StringPrintF("\"%s\": %.2f", key.c_str(), value);
}
int64_t RoundDouble(double v) { return static_cast<int64_t>(v + 0.5); }
} // end namespace
bool JSONReporter::ReportContext(const Context& context) {
std::ostream& out = GetOutputStream();
out << "{\n";
std::string inner_indent(2, ' ');
// Open context block and print context information.
out << inner_indent << "\"context\": {\n";
std::string indent(4, ' ');
std::string walltime_value = LocalDateTimeString();
out << indent << FormatKV("date", walltime_value) << ",\n";
out << indent << FormatKV("num_cpus", static_cast<int64_t>(context.num_cpus))
<< ",\n";
out << indent << FormatKV("mhz_per_cpu", RoundDouble(context.mhz_per_cpu))
<< ",\n";
out << indent << FormatKV("cpu_scaling_enabled", context.cpu_scaling_enabled)
<< ",\n";
#if defined(NDEBUG)
const char build_type[] = "release";
#else
const char build_type[] = "debug";
#endif
out << indent << FormatKV("library_build_type", build_type) << "\n";
// Close context block and open the list of benchmarks.
out << inner_indent << "},\n";
out << inner_indent << "\"benchmarks\": [\n";
return true;
}
void JSONReporter::ReportRuns(std::vector<Run> const& reports) {
if (reports.empty()) {
return;
}
std::string indent(4, ' ');
std::ostream& out = GetOutputStream();
if (!first_report_) {
out << ",\n";
}
first_report_ = false;
for (auto it = reports.begin(); it != reports.end(); ++it) {
out << indent << "{\n";
PrintRunData(*it);
out << indent << '}';
auto it_cp = it;
if (++it_cp != reports.end()) {
out << ",\n";
}
}
}
void JSONReporter::Finalize() {
// Close the list of benchmarks and the top level object.
GetOutputStream() << "\n ]\n}\n";
}
void JSONReporter::PrintRunData(Run const& run) {
std::string indent(6, ' ');
std::ostream& out = GetOutputStream();
out << indent << FormatKV("name", run.benchmark_name) << ",\n";
if (run.error_occurred) {
out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
out << indent << FormatKV("error_message", run.error_message) << ",\n";
}
if (!run.report_big_o && !run.report_rms) {
out << indent << FormatKV("iterations", run.iterations) << ",\n";
out << indent
<< FormatKV("real_time", RoundDouble(run.GetAdjustedRealTime()))
<< ",\n";
out << indent
<< FormatKV("cpu_time", RoundDouble(run.GetAdjustedCPUTime()));
out << ",\n"
<< indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
} else if (run.report_big_o) {
out << indent
<< FormatKV("cpu_coefficient", RoundDouble(run.GetAdjustedCPUTime()))
<< ",\n";
out << indent
<< FormatKV("real_coefficient", RoundDouble(run.GetAdjustedRealTime()))
<< ",\n";
out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
} else if (run.report_rms) {
out << indent
<< FormatKV("rms", run.GetAdjustedCPUTime());
}
if (run.bytes_per_second > 0.0) {
out << ",\n"
<< indent
<< FormatKV("bytes_per_second", RoundDouble(run.bytes_per_second));
}
if (run.items_per_second > 0.0) {
out << ",\n"
<< indent
<< FormatKV("items_per_second", RoundDouble(run.items_per_second));
}
for(auto &c : run.counters) {
out << ",\n"
<< indent
<< FormatKV(c.first, RoundDouble(c.second));
}
if (!run.report_label.empty()) {
out << ",\n" << indent << FormatKV("label", run.report_label);
}
out << '\n';
}
} // end namespace benchmark

73
vendor/github.com/google/benchmark/src/log.h generated vendored Normal file
View File

@ -0,0 +1,73 @@
#ifndef BENCHMARK_LOG_H_
#define BENCHMARK_LOG_H_
#include <iostream>
#include <ostream>
#include "benchmark/benchmark.h"
namespace benchmark {
namespace internal {
typedef std::basic_ostream<char>&(EndLType)(std::basic_ostream<char>&);
class LogType {
friend LogType& GetNullLogInstance();
friend LogType& GetErrorLogInstance();
// FIXME: Add locking to output.
template <class Tp>
friend LogType& operator<<(LogType&, Tp const&);
friend LogType& operator<<(LogType&, EndLType*);
private:
LogType(std::ostream* out) : out_(out) {}
std::ostream* out_;
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType);
};
template <class Tp>
LogType& operator<<(LogType& log, Tp const& value) {
if (log.out_) {
*log.out_ << value;
}
return log;
}
inline LogType& operator<<(LogType& log, EndLType* m) {
if (log.out_) {
*log.out_ << m;
}
return log;
}
inline int& LogLevel() {
static int log_level = 0;
return log_level;
}
inline LogType& GetNullLogInstance() {
static LogType log(nullptr);
return log;
}
inline LogType& GetErrorLogInstance() {
static LogType log(&std::clog);
return log;
}
inline LogType& GetLogInstanceForLevel(int level) {
if (level <= LogLevel()) {
return GetErrorLogInstance();
}
return GetNullLogInstance();
}
} // end namespace internal
} // end namespace benchmark
#define VLOG(x) \
(::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \
" ")
#endif

155
vendor/github.com/google/benchmark/src/mutex.h generated vendored Normal file
View File

@ -0,0 +1,155 @@
#ifndef BENCHMARK_MUTEX_H_
#define BENCHMARK_MUTEX_H_
#include <condition_variable>
#include <mutex>
#include "check.h"
// Enable thread safety attributes only with clang.
// The attributes can be safely erased when compiling with other compilers.
#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES)
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
#else
#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
#endif
#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x))
#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
#define ACQUIRED_BEFORE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
#define ACQUIRED_AFTER(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
#define REQUIRES(...) \
THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
#define REQUIRES_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
#define ACQUIRE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
#define ACQUIRE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
#define RELEASE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
#define RELEASE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
#define TRY_ACQUIRE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
#define TRY_ACQUIRE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x))
#define ASSERT_SHARED_CAPABILITY(x) \
THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
#define NO_THREAD_SAFETY_ANALYSIS \
THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
namespace benchmark {
typedef std::condition_variable Condition;
// NOTE: Wrappers for std::mutex and std::unique_lock are provided so that
// we can annotate them with thread safety attributes and use the
// -Wthread-safety warning with clang. The standard library types cannot be
// used directly because they do not provided the required annotations.
class CAPABILITY("mutex") Mutex {
public:
Mutex() {}
void lock() ACQUIRE() { mut_.lock(); }
void unlock() RELEASE() { mut_.unlock(); }
std::mutex& native_handle() { return mut_; }
private:
std::mutex mut_;
};
class SCOPED_CAPABILITY MutexLock {
typedef std::unique_lock<std::mutex> MutexLockImp;
public:
MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {}
~MutexLock() RELEASE() {}
MutexLockImp& native_handle() { return ml_; }
private:
MutexLockImp ml_;
};
class Barrier {
public:
Barrier(int num_threads) : running_threads_(num_threads) {}
// Called by each thread
bool wait() EXCLUDES(lock_) {
bool last_thread = false;
{
MutexLock ml(lock_);
last_thread = createBarrier(ml);
}
if (last_thread) phase_condition_.notify_all();
return last_thread;
}
void removeThread() EXCLUDES(lock_) {
MutexLock ml(lock_);
--running_threads_;
if (entered_ != 0) phase_condition_.notify_all();
}
private:
Mutex lock_;
Condition phase_condition_;
int running_threads_;
// State for barrier management
int phase_number_ = 0;
int entered_ = 0; // Number of threads that have entered this barrier
// Enter the barrier and wait until all other threads have also
// entered the barrier. Returns iff this is the last thread to
// enter the barrier.
bool createBarrier(MutexLock& ml) REQUIRES(lock_) {
CHECK_LT(entered_, running_threads_);
entered_++;
if (entered_ < running_threads_) {
// Wait for all threads to enter
int phase_number_cp = phase_number_;
auto cb = [this, phase_number_cp]() {
return this->phase_number_ > phase_number_cp ||
entered_ == running_threads_; // A thread has aborted in error
};
phase_condition_.wait(ml.native_handle(), cb);
if (phase_number_ > phase_number_cp) return false;
// else (running_threads_ == entered_) and we are the last thread.
}
// Last thread has reached the barrier
phase_number_++;
entered_ = 0;
return true;
}
};
} // end namespace benchmark
#endif // BENCHMARK_MUTEX_H_

140
vendor/github.com/google/benchmark/src/re.h generated vendored Normal file
View File

@ -0,0 +1,140 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BENCHMARK_RE_H_
#define BENCHMARK_RE_H_
#include "internal_macros.h"
// Prefer C regex libraries when compiling w/o exceptions so that we can
// correctly report errors.
#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && defined(HAVE_STD_REGEX) && \
(defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX))
#undef HAVE_STD_REGEX
#endif
#if defined(HAVE_STD_REGEX)
#include <regex>
#elif defined(HAVE_GNU_POSIX_REGEX)
#include <gnuregex.h>
#elif defined(HAVE_POSIX_REGEX)
#include <regex.h>
#else
#error No regular expression backend was found!
#endif
#include <string>
#include "check.h"
namespace benchmark {
// A wrapper around the POSIX regular expression API that provides automatic
// cleanup
class Regex {
public:
Regex() : init_(false) {}
~Regex();
// Compile a regular expression matcher from spec. Returns true on success.
//
// On failure (and if error is not nullptr), error is populated with a human
// readable error message if an error occurs.
bool Init(const std::string& spec, std::string* error);
// Returns whether str matches the compiled regular expression.
bool Match(const std::string& str);
private:
bool init_;
// Underlying regular expression object
#if defined(HAVE_STD_REGEX)
std::regex re_;
#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX)
regex_t re_;
#else
#error No regular expression backend implementation available
#endif
};
#if defined(HAVE_STD_REGEX)
inline bool Regex::Init(const std::string& spec, std::string* error) {
#ifdef BENCHMARK_HAS_NO_EXCEPTIONS
((void)error); // suppress unused warning
#else
try {
#endif
re_ = std::regex(spec, std::regex_constants::extended);
init_ = true;
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
} catch (const std::regex_error& e) {
if (error) {
*error = e.what();
}
}
#endif
return init_;
}
inline Regex::~Regex() {}
inline bool Regex::Match(const std::string& str) {
if (!init_) {
return false;
}
return std::regex_search(str, re_);
}
#else
inline bool Regex::Init(const std::string& spec, std::string* error) {
int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB);
if (ec != 0) {
if (error) {
size_t needed = regerror(ec, &re_, nullptr, 0);
char* errbuf = new char[needed];
regerror(ec, &re_, errbuf, needed);
// regerror returns the number of bytes necessary to null terminate
// the string, so we move that when assigning to error.
CHECK_NE(needed, 0);
error->assign(errbuf, needed - 1);
delete[] errbuf;
}
return false;
}
init_ = true;
return true;
}
inline Regex::~Regex() {
if (init_) {
regfree(&re_);
}
}
inline bool Regex::Match(const std::string& str) {
if (!init_) {
return false;
}
return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0;
}
#endif
} // end namespace benchmark
#endif // BENCHMARK_RE_H_

68
vendor/github.com/google/benchmark/src/reporter.cc generated vendored Normal file
View File

@ -0,0 +1,68 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include "timers.h"
#include <cstdlib>
#include <iostream>
#include <tuple>
#include <vector>
#include "check.h"
#include "stat.h"
namespace benchmark {
BenchmarkReporter::BenchmarkReporter()
: output_stream_(&std::cout), error_stream_(&std::cerr) {}
BenchmarkReporter::~BenchmarkReporter() {}
void BenchmarkReporter::PrintBasicContext(std::ostream *out,
Context const &context) {
CHECK(out) << "cannot be null";
auto &Out = *out;
Out << "Run on (" << context.num_cpus << " X " << context.mhz_per_cpu
<< " MHz CPU " << ((context.num_cpus > 1) ? "s" : "") << ")\n";
Out << LocalDateTimeString() << "\n";
if (context.cpu_scaling_enabled) {
Out << "***WARNING*** CPU scaling is enabled, the benchmark "
"real time measurements may be noisy and will incur extra "
"overhead.\n";
}
#ifndef NDEBUG
Out << "***WARNING*** Library was built as DEBUG. Timings may be "
"affected.\n";
#endif
}
double BenchmarkReporter::Run::GetAdjustedRealTime() const {
double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0) new_time /= static_cast<double>(iterations);
return new_time;
}
double BenchmarkReporter::Run::GetAdjustedCPUTime() const {
double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0) new_time /= static_cast<double>(iterations);
return new_time;
}
} // end namespace benchmark

51
vendor/github.com/google/benchmark/src/sleep.cc generated vendored Normal file
View File

@ -0,0 +1,51 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "sleep.h"
#include <cerrno>
#include <cstdlib>
#include <ctime>
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
#include <Windows.h>
#endif
namespace benchmark {
#ifdef BENCHMARK_OS_WINDOWS
// Window's Sleep takes milliseconds argument.
void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); }
void SleepForSeconds(double seconds) {
SleepForMilliseconds(static_cast<int>(kNumMillisPerSecond * seconds));
}
#else // BENCHMARK_OS_WINDOWS
void SleepForMicroseconds(int microseconds) {
struct timespec sleep_time;
sleep_time.tv_sec = microseconds / kNumMicrosPerSecond;
sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro;
while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR)
; // Ignore signals and wait for the full interval to elapse.
}
void SleepForMilliseconds(int milliseconds) {
SleepForMicroseconds(milliseconds * kNumMicrosPerMilli);
}
void SleepForSeconds(double seconds) {
SleepForMicroseconds(static_cast<int>(seconds * kNumMicrosPerSecond));
}
#endif // BENCHMARK_OS_WINDOWS
} // end namespace benchmark

15
vendor/github.com/google/benchmark/src/sleep.h generated vendored Normal file
View File

@ -0,0 +1,15 @@
#ifndef BENCHMARK_SLEEP_H_
#define BENCHMARK_SLEEP_H_
namespace benchmark {
const int kNumMillisPerSecond = 1000;
const int kNumMicrosPerMilli = 1000;
const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000;
const int kNumNanosPerMicro = 1000;
const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond;
void SleepForMilliseconds(int milliseconds);
void SleepForSeconds(double seconds);
} // end namespace benchmark
#endif // BENCHMARK_SLEEP_H_

310
vendor/github.com/google/benchmark/src/stat.h generated vendored Normal file
View File

@ -0,0 +1,310 @@
#ifndef BENCHMARK_STAT_H_
#define BENCHMARK_STAT_H_
#include <cmath>
#include <limits>
#include <ostream>
#include <type_traits>
namespace benchmark {
template <typename VType, typename NumType>
class Stat1;
template <typename VType, typename NumType>
class Stat1MinMax;
typedef Stat1<float, int64_t> Stat1_f;
typedef Stat1<double, int64_t> Stat1_d;
typedef Stat1MinMax<float, int64_t> Stat1MinMax_f;
typedef Stat1MinMax<double, int64_t> Stat1MinMax_d;
template <typename VType>
class Vector2;
template <typename VType>
class Vector3;
template <typename VType>
class Vector4;
template <typename VType, typename NumType>
class Stat1 {
public:
typedef Stat1<VType, NumType> Self;
Stat1() { Clear(); }
// Create a sample of value dat and weight 1
explicit Stat1(const VType &dat) {
sum_ = dat;
sum_squares_ = Sqr(dat);
numsamples_ = 1;
}
// Create statistics for all the samples between begin (included)
// and end(excluded)
explicit Stat1(const VType *begin, const VType *end) {
Clear();
for (const VType *item = begin; item < end; ++item) {
(*this) += Stat1(*item);
}
}
// Create a sample of value dat and weight w
Stat1(const VType &dat, const NumType &w) {
sum_ = w * dat;
sum_squares_ = w * Sqr(dat);
numsamples_ = w;
}
// Copy operator
Stat1(const Self &stat) {
sum_ = stat.sum_;
sum_squares_ = stat.sum_squares_;
numsamples_ = stat.numsamples_;
}
void Clear() {
numsamples_ = NumType();
sum_squares_ = sum_ = VType();
}
Self &operator=(const Self &stat) {
sum_ = stat.sum_;
sum_squares_ = stat.sum_squares_;
numsamples_ = stat.numsamples_;
return (*this);
}
// Merge statistics from two sample sets.
Self &operator+=(const Self &stat) {
sum_ += stat.sum_;
sum_squares_ += stat.sum_squares_;
numsamples_ += stat.numsamples_;
return (*this);
}
// The operation opposite to +=
Self &operator-=(const Self &stat) {
sum_ -= stat.sum_;
sum_squares_ -= stat.sum_squares_;
numsamples_ -= stat.numsamples_;
return (*this);
}
// Multiply the weight of the set of samples by a factor k
Self &operator*=(const VType &k) {
sum_ *= k;
sum_squares_ *= k;
numsamples_ *= k;
return (*this);
}
// Merge statistics from two sample sets.
Self operator+(const Self &stat) const { return Self(*this) += stat; }
// The operation opposite to +
Self operator-(const Self &stat) const { return Self(*this) -= stat; }
// Multiply the weight of the set of samples by a factor k
Self operator*(const VType &k) const { return Self(*this) *= k; }
// Return the total weight of this sample set
NumType numSamples() const { return numsamples_; }
// Return the sum of this sample set
VType Sum() const { return sum_; }
// Return the mean of this sample set
VType Mean() const {
if (numsamples_ == 0) return VType();
return sum_ * (1.0 / numsamples_);
}
// Return the mean of this sample set and compute the standard deviation at
// the same time.
VType Mean(VType *stddev) const {
if (numsamples_ == 0) return VType();
VType mean = sum_ * (1.0 / numsamples_);
if (stddev) {
// Sample standard deviation is undefined for n = 1
if (numsamples_ == 1) {
*stddev = VType();
} else {
VType avg_squares = sum_squares_ * (1.0 / numsamples_);
*stddev = Sqrt(numsamples_ / (numsamples_ - 1.0) * (avg_squares - Sqr(mean)));
}
}
return mean;
}
// Return the standard deviation of the sample set
VType StdDev() const {
VType stddev = VType();
Mean(&stddev);
return stddev;
}
private:
static_assert(std::is_integral<NumType>::value &&
!std::is_same<NumType, bool>::value,
"NumType must be an integral type that is not bool.");
// Let i be the index of the samples provided (using +=)
// and weight[i],value[i] be the data of sample #i
// then the variables have the following meaning:
NumType numsamples_; // sum of weight[i];
VType sum_; // sum of weight[i]*value[i];
VType sum_squares_; // sum of weight[i]*value[i]^2;
// Template function used to square a number.
// For a vector we square all components
template <typename SType>
static inline SType Sqr(const SType &dat) {
return dat * dat;
}
template <typename SType>
static inline Vector2<SType> Sqr(const Vector2<SType> &dat) {
return dat.MulComponents(dat);
}
template <typename SType>
static inline Vector3<SType> Sqr(const Vector3<SType> &dat) {
return dat.MulComponents(dat);
}
template <typename SType>
static inline Vector4<SType> Sqr(const Vector4<SType> &dat) {
return dat.MulComponents(dat);
}
// Template function used to take the square root of a number.
// For a vector we square all components
template <typename SType>
static inline SType Sqrt(const SType &dat) {
// Avoid NaN due to imprecision in the calculations
if (dat < 0) return 0;
return sqrt(dat);
}
template <typename SType>
static inline Vector2<SType> Sqrt(const Vector2<SType> &dat) {
// Avoid NaN due to imprecision in the calculations
return Max(dat, Vector2<SType>()).Sqrt();
}
template <typename SType>
static inline Vector3<SType> Sqrt(const Vector3<SType> &dat) {
// Avoid NaN due to imprecision in the calculations
return Max(dat, Vector3<SType>()).Sqrt();
}
template <typename SType>
static inline Vector4<SType> Sqrt(const Vector4<SType> &dat) {
// Avoid NaN due to imprecision in the calculations
return Max(dat, Vector4<SType>()).Sqrt();
}
};
// Useful printing function
template <typename VType, typename NumType>
std::ostream &operator<<(std::ostream &out, const Stat1<VType, NumType> &s) {
out << "{ avg = " << s.Mean() << " std = " << s.StdDev()
<< " nsamples = " << s.NumSamples() << "}";
return out;
}
// Stat1MinMax: same as Stat1, but it also
// keeps the Min and Max values; the "-"
// operator is disabled because it cannot be implemented
// efficiently
template <typename VType, typename NumType>
class Stat1MinMax : public Stat1<VType, NumType> {
public:
typedef Stat1MinMax<VType, NumType> Self;
Stat1MinMax() { Clear(); }
// Create a sample of value dat and weight 1
explicit Stat1MinMax(const VType &dat) : Stat1<VType, NumType>(dat) {
max_ = dat;
min_ = dat;
}
// Create statistics for all the samples between begin (included)
// and end(excluded)
explicit Stat1MinMax(const VType *begin, const VType *end) {
Clear();
for (const VType *item = begin; item < end; ++item) {
(*this) += Stat1MinMax(*item);
}
}
// Create a sample of value dat and weight w
Stat1MinMax(const VType &dat, const NumType &w)
: Stat1<VType, NumType>(dat, w) {
max_ = dat;
min_ = dat;
}
// Copy operator
Stat1MinMax(const Self &stat) : Stat1<VType, NumType>(stat) {
max_ = stat.max_;
min_ = stat.min_;
}
void Clear() {
Stat1<VType, NumType>::Clear();
if (std::numeric_limits<VType>::has_infinity) {
min_ = std::numeric_limits<VType>::infinity();
max_ = -std::numeric_limits<VType>::infinity();
} else {
min_ = std::numeric_limits<VType>::max();
max_ = std::numeric_limits<VType>::min();
}
}
Self &operator=(const Self &stat) {
this->Stat1<VType, NumType>::operator=(stat);
max_ = stat.max_;
min_ = stat.min_;
return (*this);
}
// Merge statistics from two sample sets.
Self &operator+=(const Self &stat) {
this->Stat1<VType, NumType>::operator+=(stat);
if (stat.max_ > max_) max_ = stat.max_;
if (stat.min_ < min_) min_ = stat.min_;
return (*this);
}
// Multiply the weight of the set of samples by a factor k
Self &operator*=(const VType &stat) {
this->Stat1<VType, NumType>::operator*=(stat);
return (*this);
}
// Merge statistics from two sample sets.
Self operator+(const Self &stat) const { return Self(*this) += stat; }
// Multiply the weight of the set of samples by a factor k
Self operator*(const VType &k) const { return Self(*this) *= k; }
// Return the maximal value in this sample set
VType Max() const { return max_; }
// Return the minimal value in this sample set
VType Min() const { return min_; }
private:
// The - operation makes no sense with Min/Max
// unless we keep the full list of values (but we don't)
// make it private, and let it undefined so nobody can call it
Self &operator-=(const Self &stat); // senseless. let it undefined.
// The operation opposite to -
Self operator-(const Self &stat) const; // senseless. let it undefined.
// Let i be the index of the samples provided (using +=)
// and weight[i],value[i] be the data of sample #i
// then the variables have the following meaning:
VType max_; // max of value[i]
VType min_; // min of value[i]
};
// Useful printing function
template <typename VType, typename NumType>
std::ostream &operator<<(std::ostream &out,
const Stat1MinMax<VType, NumType> &s) {
out << "{ avg = " << s.Mean() << " std = " << s.StdDev()
<< " nsamples = " << s.NumSamples() << " min = " << s.Min()
<< " max = " << s.Max() << "}";
return out;
}
} // end namespace benchmark
#endif // BENCHMARK_STAT_H_

172
vendor/github.com/google/benchmark/src/string_util.cc generated vendored Normal file
View File

@ -0,0 +1,172 @@
#include "string_util.h"
#include <array>
#include <cmath>
#include <cstdarg>
#include <cstdio>
#include <memory>
#include <sstream>
#include "arraysize.h"
namespace benchmark {
namespace {
// kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta.
const char kBigSIUnits[] = "kMGTPEZY";
// Kibi, Mebi, Gibi, Tebi, Pebi, Exbi, Zebi, Yobi.
const char kBigIECUnits[] = "KMGTPEZY";
// milli, micro, nano, pico, femto, atto, zepto, yocto.
const char kSmallSIUnits[] = "munpfazy";
// We require that all three arrays have the same size.
static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits),
"SI and IEC unit arrays must be the same size");
static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits),
"Small SI and Big SI unit arrays must be the same size");
static const int64_t kUnitsSize = arraysize(kBigSIUnits);
} // end anonymous namespace
void ToExponentAndMantissa(double val, double thresh, int precision,
double one_k, std::string* mantissa,
int64_t* exponent) {
std::stringstream mantissa_stream;
if (val < 0) {
mantissa_stream << "-";
val = -val;
}
// Adjust threshold so that it never excludes things which can't be rendered
// in 'precision' digits.
const double adjusted_threshold =
std::max(thresh, 1.0 / std::pow(10.0, precision));
const double big_threshold = adjusted_threshold * one_k;
const double small_threshold = adjusted_threshold;
// Values in ]simple_threshold,small_threshold[ will be printed as-is
const double simple_threshold = 0.01;
if (val > big_threshold) {
// Positive powers
double scaled = val;
for (size_t i = 0; i < arraysize(kBigSIUnits); ++i) {
scaled /= one_k;
if (scaled <= big_threshold) {
mantissa_stream << scaled;
*exponent = i + 1;
*mantissa = mantissa_stream.str();
return;
}
}
mantissa_stream << val;
*exponent = 0;
} else if (val < small_threshold) {
// Negative powers
if (val < simple_threshold) {
double scaled = val;
for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i) {
scaled *= one_k;
if (scaled >= small_threshold) {
mantissa_stream << scaled;
*exponent = -static_cast<int64_t>(i + 1);
*mantissa = mantissa_stream.str();
return;
}
}
}
mantissa_stream << val;
*exponent = 0;
} else {
mantissa_stream << val;
*exponent = 0;
}
*mantissa = mantissa_stream.str();
}
std::string ExponentToPrefix(int64_t exponent, bool iec) {
if (exponent == 0) return "";
const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1);
if (index >= kUnitsSize) return "";
const char* array =
(exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits);
if (iec)
return array[index] + std::string("i");
else
return std::string(1, array[index]);
}
std::string ToBinaryStringFullySpecified(double value, double threshold,
int precision) {
std::string mantissa;
int64_t exponent;
ToExponentAndMantissa(value, threshold, precision, 1024.0, &mantissa,
&exponent);
return mantissa + ExponentToPrefix(exponent, false);
}
void AppendHumanReadable(int n, std::string* str) {
std::stringstream ss;
// Round down to the nearest SI prefix.
ss << ToBinaryStringFullySpecified(n, 1.0, 0);
*str += ss.str();
}
std::string HumanReadableNumber(double n) {
// 1.1 means that figures up to 1.1k should be shown with the next unit down;
// this softens edge effects.
// 1 means that we should show one decimal place of precision.
return ToBinaryStringFullySpecified(n, 1.1, 1);
}
std::string StringPrintFImp(const char* msg, va_list args) {
// we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp;
va_copy(args_cp, args);
// TODO(ericwf): use std::array for first attempt to avoid one memory
// allocation guess what the size might be
std::array<char, 256> local_buff;
std::size_t size = local_buff.size();
// 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
// in the android-ndk
auto ret = vsnprintf(local_buff.data(), size, msg, args_cp);
va_end(args_cp);
// handle empty expansion
if (ret == 0) return std::string{};
if (static_cast<std::size_t>(ret) < size)
return std::string(local_buff.data());
// we did not provide a long enough buffer on our first attempt.
// add 1 to size to account for null-byte in size cast to prevent overflow
size = static_cast<std::size_t>(ret) + 1;
auto buff_ptr = std::unique_ptr<char[]>(new char[size]);
// 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
// in the android-ndk
ret = vsnprintf(buff_ptr.get(), size, msg, args);
return std::string(buff_ptr.get());
}
std::string StringPrintF(const char* format, ...) {
va_list args;
va_start(args, format);
std::string tmp = StringPrintFImp(format, args);
va_end(args);
return tmp;
}
void ReplaceAll(std::string* str, const std::string& from,
const std::string& to) {
std::size_t start = 0;
while ((start = str->find(from, start)) != std::string::npos) {
str->replace(start, from.length(), to);
start += to.length();
}
}
} // end namespace benchmark

40
vendor/github.com/google/benchmark/src/string_util.h generated vendored Normal file
View File

@ -0,0 +1,40 @@
#ifndef BENCHMARK_STRING_UTIL_H_
#define BENCHMARK_STRING_UTIL_H_
#include <sstream>
#include <string>
#include <utility>
#include "internal_macros.h"
namespace benchmark {
void AppendHumanReadable(int n, std::string* str);
std::string HumanReadableNumber(double n);
std::string StringPrintF(const char* format, ...);
inline std::ostream& StringCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
return out;
}
template <class First, class... Rest>
inline std::ostream& StringCatImp(std::ostream& out, First&& f,
Rest&&... rest) {
out << std::forward<First>(f);
return StringCatImp(out, std::forward<Rest>(rest)...);
}
template <class... Args>
inline std::string StrCat(Args&&... args) {
std::ostringstream ss;
StringCatImp(ss, std::forward<Args>(args)...);
return ss.str();
}
void ReplaceAll(std::string* str, const std::string& from,
const std::string& to);
} // end namespace benchmark
#endif // BENCHMARK_STRING_UTIL_H_

355
vendor/github.com/google/benchmark/src/sysinfo.cc generated vendored Normal file
View File

@ -0,0 +1,355 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "sysinfo.h"
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
#include <Shlwapi.h>
#include <VersionHelpers.h>
#include <Windows.h>
#else
#include <fcntl.h>
#include <sys/resource.h>
#include <sys/time.h>
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <unistd.h>
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX
#include <sys/sysctl.h>
#endif
#endif
#include <cerrno>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <limits>
#include <mutex>
#include "arraysize.h"
#include "check.h"
#include "cycleclock.h"
#include "internal_macros.h"
#include "log.h"
#include "sleep.h"
#include "string_util.h"
namespace benchmark {
namespace {
std::once_flag cpuinfo_init;
double cpuinfo_cycles_per_second = 1.0;
int cpuinfo_num_cpus = 1; // Conservative guess
#if !defined BENCHMARK_OS_MACOSX
const int64_t estimate_time_ms = 1000;
// Helper function estimates cycles/sec by observing cycles elapsed during
// sleep(). Using small sleep time decreases accuracy significantly.
int64_t EstimateCyclesPerSecond() {
const int64_t start_ticks = cycleclock::Now();
SleepForMilliseconds(estimate_time_ms);
return cycleclock::Now() - start_ticks;
}
#endif
#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
// Helper function for reading an int from a file. Returns true if successful
// and the memory location pointed to by value is set to the value read.
bool ReadIntFromFile(const char* file, long* value) {
bool ret = false;
int fd = open(file, O_RDONLY);
if (fd != -1) {
char line[1024];
char* err;
memset(line, '\0', sizeof(line));
ssize_t read_err = read(fd, line, sizeof(line) - 1);
((void)read_err); // prevent unused warning
CHECK(read_err >= 0);
const long temp_value = strtol(line, &err, 10);
if (line[0] != '\0' && (*err == '\n' || *err == '\0')) {
*value = temp_value;
ret = true;
}
close(fd);
}
return ret;
}
#endif
#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
static std::string convertToLowerCase(std::string s) {
for (auto& ch : s)
ch = std::tolower(ch);
return s;
}
static bool startsWithKey(std::string Value, std::string Key,
bool IgnoreCase = true) {
if (IgnoreCase) {
Key = convertToLowerCase(std::move(Key));
Value = convertToLowerCase(std::move(Value));
}
return Value.compare(0, Key.size(), Key) == 0;
}
#endif
void InitializeSystemInfo() {
#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
char line[1024];
char* err;
long freq;
bool saw_mhz = false;
// If the kernel is exporting the tsc frequency use that. There are issues
// where cpuinfo_max_freq cannot be relied on because the BIOS may be
// exporintg an invalid p-state (on x86) or p-states may be used to put the
// processor in a new mode (turbo mode). Essentially, those frequencies
// cannot always be relied upon. The same reasons apply to /proc/cpuinfo as
// well.
if (!saw_mhz &&
ReadIntFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) {
// The value is in kHz (as the file name suggests). For example, on a
// 2GHz warpstation, the file contains the value "2000000".
cpuinfo_cycles_per_second = freq * 1000.0;
saw_mhz = true;
}
// If CPU scaling is in effect, we want to use the *maximum* frequency,
// not whatever CPU speed some random processor happens to be using now.
if (!saw_mhz &&
ReadIntFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
&freq)) {
// The value is in kHz. For example, on a 2GHz warpstation, the file
// contains the value "2000000".
cpuinfo_cycles_per_second = freq * 1000.0;
saw_mhz = true;
}
// Read /proc/cpuinfo for other values, and if there is no cpuinfo_max_freq.
const char* pname = "/proc/cpuinfo";
int fd = open(pname, O_RDONLY);
if (fd == -1) {
perror(pname);
if (!saw_mhz) {
cpuinfo_cycles_per_second =
static_cast<double>(EstimateCyclesPerSecond());
}
return;
}
double bogo_clock = 1.0;
bool saw_bogo = false;
long max_cpu_id = 0;
int num_cpus = 0;
line[0] = line[1] = '\0';
size_t chars_read = 0;
do { // we'll exit when the last read didn't read anything
// Move the next line to the beginning of the buffer
const size_t oldlinelen = strlen(line);
if (sizeof(line) == oldlinelen + 1) // oldlinelen took up entire line
line[0] = '\0';
else // still other lines left to save
memmove(line, line + oldlinelen + 1, sizeof(line) - (oldlinelen + 1));
// Terminate the new line, reading more if we can't find the newline
char* newline = strchr(line, '\n');
if (newline == nullptr) {
const size_t linelen = strlen(line);
const size_t bytes_to_read = sizeof(line) - 1 - linelen;
CHECK(bytes_to_read > 0); // because the memmove recovered >=1 bytes
chars_read = read(fd, line + linelen, bytes_to_read);
line[linelen + chars_read] = '\0';
newline = strchr(line, '\n');
}
if (newline != nullptr) *newline = '\0';
// When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only
// accept postive values. Some environments (virtual machines) report zero,
// which would cause infinite looping in WallTime_Init.
if (!saw_mhz && startsWithKey(line, "cpu MHz")) {
const char* freqstr = strchr(line, ':');
if (freqstr) {
cpuinfo_cycles_per_second = strtod(freqstr + 1, &err) * 1000000.0;
if (freqstr[1] != '\0' && *err == '\0' && cpuinfo_cycles_per_second > 0)
saw_mhz = true;
}
} else if (startsWithKey(line, "bogomips")) {
const char* freqstr = strchr(line, ':');
if (freqstr) {
bogo_clock = strtod(freqstr + 1, &err) * 1000000.0;
if (freqstr[1] != '\0' && *err == '\0' && bogo_clock > 0)
saw_bogo = true;
}
} else if (startsWithKey(line, "processor", /*IgnoreCase*/false)) {
// The above comparison is case-sensitive because ARM kernels often
// include a "Processor" line that tells you about the CPU, distinct
// from the usual "processor" lines that give you CPU ids. No current
// Linux architecture is using "Processor" for CPU ids.
num_cpus++; // count up every time we see an "processor :" entry
const char* id_str = strchr(line, ':');
if (id_str) {
const long cpu_id = strtol(id_str + 1, &err, 10);
if (id_str[1] != '\0' && *err == '\0' && max_cpu_id < cpu_id)
max_cpu_id = cpu_id;
}
}
} while (chars_read > 0);
close(fd);
if (!saw_mhz) {
if (saw_bogo) {
// If we didn't find anything better, we'll use bogomips, but
// we're not happy about it.
cpuinfo_cycles_per_second = bogo_clock;
} else {
// If we don't even have bogomips, we'll use the slow estimation.
cpuinfo_cycles_per_second =
static_cast<double>(EstimateCyclesPerSecond());
}
}
if (num_cpus == 0) {
fprintf(stderr, "Failed to read num. CPUs correctly from /proc/cpuinfo\n");
} else {
if ((max_cpu_id + 1) != num_cpus) {
fprintf(stderr,
"CPU ID assignments in /proc/cpuinfo seem messed up."
" This is usually caused by a bad BIOS.\n");
}
cpuinfo_num_cpus = num_cpus;
}
#elif defined BENCHMARK_OS_FREEBSD
// For this sysctl to work, the machine must be configured without
// SMP, APIC, or APM support. hz should be 64-bit in freebsd 7.0
// and later. Before that, it's a 32-bit quantity (and gives the
// wrong answer on machines faster than 2^32 Hz). See
// http://lists.freebsd.org/pipermail/freebsd-i386/2004-November/001846.html
// But also compare FreeBSD 7.0:
// http://fxr.watson.org/fxr/source/i386/i386/tsc.c?v=RELENG70#L223
// 231 error = sysctl_handle_quad(oidp, &freq, 0, req);
// To FreeBSD 6.3 (it's the same in 6-STABLE):
// http://fxr.watson.org/fxr/source/i386/i386/tsc.c?v=RELENG6#L131
// 139 error = sysctl_handle_int(oidp, &freq, sizeof(freq), req);
#if __FreeBSD__ >= 7
uint64_t hz = 0;
#else
unsigned int hz = 0;
#endif
size_t sz = sizeof(hz);
const char* sysctl_path = "machdep.tsc_freq";
if (sysctlbyname(sysctl_path, &hz, &sz, nullptr, 0) != 0) {
fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n",
sysctl_path, strerror(errno));
cpuinfo_cycles_per_second = static_cast<double>(EstimateCyclesPerSecond());
} else {
cpuinfo_cycles_per_second = hz;
}
// TODO: also figure out cpuinfo_num_cpus
#elif defined BENCHMARK_OS_WINDOWS
// In NT, read MHz from the registry. If we fail to do so or we're in win9x
// then make a crude estimate.
DWORD data, data_size = sizeof(data);
if (IsWindowsXPOrGreater() &&
SUCCEEDED(
SHGetValueA(HKEY_LOCAL_MACHINE,
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
"~MHz", nullptr, &data, &data_size)))
cpuinfo_cycles_per_second =
static_cast<double>((int64_t)data * (int64_t)(1000 * 1000)); // was mhz
else
cpuinfo_cycles_per_second = static_cast<double>(EstimateCyclesPerSecond());
SYSTEM_INFO sysinfo;
// Use memset as opposed to = {} to avoid GCC missing initializer false
// positives.
std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO));
GetSystemInfo(&sysinfo);
cpuinfo_num_cpus = sysinfo.dwNumberOfProcessors; // number of logical
// processors in the current
// group
#elif defined BENCHMARK_OS_MACOSX
int32_t num_cpus = 0;
size_t size = sizeof(num_cpus);
if (::sysctlbyname("hw.ncpu", &num_cpus, &size, nullptr, 0) == 0 &&
(size == sizeof(num_cpus))) {
cpuinfo_num_cpus = num_cpus;
} else {
fprintf(stderr, "%s\n", strerror(errno));
std::exit(EXIT_FAILURE);
}
int64_t cpu_freq = 0;
size = sizeof(cpu_freq);
if (::sysctlbyname("hw.cpufrequency", &cpu_freq, &size, nullptr, 0) == 0 &&
(size == sizeof(cpu_freq))) {
cpuinfo_cycles_per_second = cpu_freq;
} else {
#if defined BENCHMARK_OS_IOS
fprintf(stderr, "CPU frequency cannot be detected. \n");
cpuinfo_cycles_per_second = 0;
#else
fprintf(stderr, "%s\n", strerror(errno));
std::exit(EXIT_FAILURE);
#endif
}
#else
// Generic cycles per second counter
cpuinfo_cycles_per_second = static_cast<double>(EstimateCyclesPerSecond());
#endif
}
} // end namespace
double CyclesPerSecond(void) {
std::call_once(cpuinfo_init, InitializeSystemInfo);
return cpuinfo_cycles_per_second;
}
int NumCPUs(void) {
std::call_once(cpuinfo_init, InitializeSystemInfo);
return cpuinfo_num_cpus;
}
// The ""'s catch people who don't pass in a literal for "str"
#define strliterallen(str) (sizeof("" str "") - 1)
// Must use a string literal for prefix.
#define memprefix(str, len, prefix) \
((((len) >= strliterallen(prefix)) && \
std::memcmp(str, prefix, strliterallen(prefix)) == 0) \
? str + strliterallen(prefix) \
: nullptr)
bool CpuScalingEnabled() {
#ifndef BENCHMARK_OS_WINDOWS
// On Linux, the CPUfreq subsystem exposes CPU information as files on the
// local file system. If reading the exported files fails, then we may not be
// running on Linux, so we silently ignore all the read errors.
for (int cpu = 0, num_cpus = NumCPUs(); cpu < num_cpus; ++cpu) {
std::string governor_file =
StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor");
FILE* file = fopen(governor_file.c_str(), "r");
if (!file) break;
char buff[16];
size_t bytes_read = fread(buff, 1, sizeof(buff), file);
fclose(file);
if (memprefix(buff, bytes_read, "performance") == nullptr) return true;
}
#endif
return false;
}
} // end namespace benchmark

10
vendor/github.com/google/benchmark/src/sysinfo.h generated vendored Normal file
View File

@ -0,0 +1,10 @@
#ifndef BENCHMARK_SYSINFO_H_
#define BENCHMARK_SYSINFO_H_
namespace benchmark {
int NumCPUs();
double CyclesPerSecond();
bool CpuScalingEnabled();
} // end namespace benchmark
#endif // BENCHMARK_SYSINFO_H_

212
vendor/github.com/google/benchmark/src/timers.cc generated vendored Normal file
View File

@ -0,0 +1,212 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "timers.h"
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
#include <Shlwapi.h>
#include <VersionHelpers.h>
#include <Windows.h>
#else
#include <fcntl.h>
#include <sys/resource.h>
#include <sys/time.h>
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <unistd.h>
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX
#include <sys/sysctl.h>
#endif
#if defined(BENCHMARK_OS_MACOSX)
#include <mach/mach_init.h>
#include <mach/mach_port.h>
#include <mach/thread_act.h>
#endif
#endif
#ifdef BENCHMARK_OS_EMSCRIPTEN
#include <emscripten.h>
#endif
#include <cerrno>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <iostream>
#include <limits>
#include <mutex>
#include "check.h"
#include "log.h"
#include "sleep.h"
#include "string_util.h"
namespace benchmark {
// Suppress unused warnings on helper functions.
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
namespace {
#if defined(BENCHMARK_OS_WINDOWS)
double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) {
ULARGE_INTEGER kernel;
ULARGE_INTEGER user;
kernel.HighPart = kernel_time.dwHighDateTime;
kernel.LowPart = kernel_time.dwLowDateTime;
user.HighPart = user_time.dwHighDateTime;
user.LowPart = user_time.dwLowDateTime;
return (static_cast<double>(kernel.QuadPart) +
static_cast<double>(user.QuadPart)) *
1e-7;
}
#else
double MakeTime(struct rusage const& ru) {
return (static_cast<double>(ru.ru_utime.tv_sec) +
static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
static_cast<double>(ru.ru_stime.tv_sec) +
static_cast<double>(ru.ru_stime.tv_usec) * 1e-6);
}
#endif
#if defined(BENCHMARK_OS_MACOSX)
double MakeTime(thread_basic_info_data_t const& info) {
return (static_cast<double>(info.user_time.seconds) +
static_cast<double>(info.user_time.microseconds) * 1e-6 +
static_cast<double>(info.system_time.seconds) +
static_cast<double>(info.system_time.microseconds) * 1e-6);
}
#endif
#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID)
double MakeTime(struct timespec const& ts) {
return ts.tv_sec + (static_cast<double>(ts.tv_nsec) * 1e-9);
}
#endif
BENCHMARK_NORETURN static void DiagnoseAndExit(const char* msg) {
std::cerr << "ERROR: " << msg << std::endl;
std::exit(EXIT_FAILURE);
}
} // end namespace
double ProcessCPUUsage() {
#if defined(BENCHMARK_OS_WINDOWS)
HANDLE proc = GetCurrentProcess();
FILETIME creation_time;
FILETIME exit_time;
FILETIME kernel_time;
FILETIME user_time;
if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time,
&user_time))
return MakeTime(kernel_time, user_time);
DiagnoseAndExit("GetProccessTimes() failed");
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
// clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten.
// Use Emscripten-specific API. Reported CPU time would be exactly the
// same as total time, but this is ok because there aren't long-latency
// syncronous system calls in Emscripten.
return emscripten_get_now() * 1e-3;
#elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX)
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
// https://github.com/google/benchmark/pull/292
struct timespec spec;
if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0)
return MakeTime(spec);
DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed");
#else
struct rusage ru;
if (getrusage(RUSAGE_SELF, &ru) == 0) return MakeTime(ru);
DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed");
#endif
}
double ThreadCPUUsage() {
#if defined(BENCHMARK_OS_WINDOWS)
HANDLE this_thread = GetCurrentThread();
FILETIME creation_time;
FILETIME exit_time;
FILETIME kernel_time;
FILETIME user_time;
GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time,
&user_time);
return MakeTime(kernel_time, user_time);
#elif defined(BENCHMARK_OS_MACOSX)
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
// https://github.com/google/benchmark/pull/292
mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
thread_basic_info_data_t info;
mach_port_t thread = pthread_mach_thread_np(pthread_self());
if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) ==
KERN_SUCCESS) {
return MakeTime(info);
}
DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info");
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
// Emscripten doesn't support traditional threads
return ProcessCPUUsage();
#elif defined(BENCHMARK_OS_RTEMS)
// RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See
// https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c
return ProcessCPUUsage();
#elif defined(CLOCK_THREAD_CPUTIME_ID)
struct timespec ts;
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts);
DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed");
#else
#error Per-thread timing is not available on your system.
#endif
}
namespace {
std::string DateTimeString(bool local) {
typedef std::chrono::system_clock Clock;
std::time_t now = Clock::to_time_t(Clock::now());
const std::size_t kStorageSize = 128;
char storage[kStorageSize];
std::size_t written;
if (local) {
#if defined(BENCHMARK_OS_WINDOWS)
written =
std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
#else
std::tm timeinfo;
std::memset(&timeinfo, 0, sizeof(std::tm));
::localtime_r(&now, &timeinfo);
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
#endif
} else {
#if defined(BENCHMARK_OS_WINDOWS)
written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now));
#else
std::tm timeinfo;
std::memset(&timeinfo, 0, sizeof(std::tm));
::gmtime_r(&now, &timeinfo);
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
#endif
}
CHECK(written < kStorageSize);
((void)written); // prevent unused variable in optimized mode.
return std::string(storage);
}
} // end namespace
std::string LocalDateTimeString() { return DateTimeString(true); }
} // end namespace benchmark

48
vendor/github.com/google/benchmark/src/timers.h generated vendored Normal file
View File

@ -0,0 +1,48 @@
#ifndef BENCHMARK_TIMERS_H
#define BENCHMARK_TIMERS_H
#include <chrono>
#include <string>
namespace benchmark {
// Return the CPU usage of the current process
double ProcessCPUUsage();
// Return the CPU usage of the children of the current process
double ChildrenCPUUsage();
// Return the CPU usage of the current thread
double ThreadCPUUsage();
#if defined(HAVE_STEADY_CLOCK)
template <bool HighResIsSteady = std::chrono::high_resolution_clock::is_steady>
struct ChooseSteadyClock {
typedef std::chrono::high_resolution_clock type;
};
template <>
struct ChooseSteadyClock<false> {
typedef std::chrono::steady_clock type;
};
#endif
struct ChooseClockType {
#if defined(HAVE_STEADY_CLOCK)
typedef ChooseSteadyClock<>::type type;
#else
typedef std::chrono::high_resolution_clock type;
#endif
};
inline double ChronoClockNow() {
typedef ChooseClockType::type ClockType;
using FpSeconds = std::chrono::duration<double, std::chrono::seconds::period>;
return FpSeconds(ClockType::now().time_since_epoch()).count();
}
std::string LocalDateTimeString();
} // end namespace benchmark
#endif // BENCHMARK_TIMERS_H

170
vendor/github.com/google/benchmark/test/CMakeLists.txt generated vendored Normal file
View File

@ -0,0 +1,170 @@
# Enable the tests
find_package(Threads REQUIRED)
include(CheckCXXCompilerFlag)
# NOTE: Some tests use `<cassert>` to perform the test. Therefore we must
# strip -DNDEBUG from the default CMake flags in DEBUG mode.
string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE)
if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" )
add_definitions( -UNDEBUG )
add_definitions(-DTEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS)
# Also remove /D NDEBUG to avoid MSVC warnings about conflicting defines.
foreach (flags_var_to_scrub
CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_RELWITHDEBINFO
CMAKE_C_FLAGS_MINSIZEREL)
string (REGEX REPLACE "(^| )[/-]D *NDEBUG($| )" " "
"${flags_var_to_scrub}" "${${flags_var_to_scrub}}")
endforeach()
endif()
# NOTE: These flags must be added after find_package(Threads REQUIRED) otherwise
# they will break the configuration check.
if (DEFINED BENCHMARK_CXX_LINKER_FLAGS)
list(APPEND CMAKE_EXE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS})
endif()
add_library(output_test_helper STATIC output_test_helper.cc output_test.h)
macro(compile_benchmark_test name)
add_executable(${name} "${name}.cc")
target_link_libraries(${name} benchmark ${CMAKE_THREAD_LIBS_INIT})
endmacro(compile_benchmark_test)
macro(compile_output_test name)
add_executable(${name} "${name}.cc" output_test.h)
target_link_libraries(${name} output_test_helper benchmark
${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
endmacro(compile_output_test)
# Demonstration executable
compile_benchmark_test(benchmark_test)
add_test(benchmark benchmark_test --benchmark_min_time=0.01)
compile_benchmark_test(filter_test)
macro(add_filter_test name filter expect)
add_test(${name} filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect})
add_test(${name}_list_only filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect})
endmacro(add_filter_test)
add_filter_test(filter_simple "Foo" 3)
add_filter_test(filter_suffix "BM_.*" 4)
add_filter_test(filter_regex_all ".*" 5)
add_filter_test(filter_regex_blank "" 5)
add_filter_test(filter_regex_none "monkey" 0)
add_filter_test(filter_regex_wildcard ".*Foo.*" 3)
add_filter_test(filter_regex_begin "^BM_.*" 4)
add_filter_test(filter_regex_begin2 "^N" 1)
add_filter_test(filter_regex_end ".*Ba$" 1)
compile_benchmark_test(options_test)
add_test(options_benchmarks options_test --benchmark_min_time=0.01)
compile_benchmark_test(basic_test)
add_test(basic_benchmark basic_test --benchmark_min_time=0.01)
compile_benchmark_test(diagnostics_test)
add_test(diagnostics_test diagnostics_test --benchmark_min_time=0.01)
compile_benchmark_test(skip_with_error_test)
add_test(skip_with_error_test skip_with_error_test --benchmark_min_time=0.01)
compile_benchmark_test(donotoptimize_test)
# Some of the issues with DoNotOptimize only occur when optimization is enabled
check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
if (BENCHMARK_HAS_O3_FLAG)
set_target_properties(donotoptimize_test PROPERTIES COMPILE_FLAGS "-O3")
endif()
add_test(donotoptimize_test donotoptimize_test --benchmark_min_time=0.01)
compile_benchmark_test(fixture_test)
add_test(fixture_test fixture_test --benchmark_min_time=0.01)
compile_benchmark_test(register_benchmark_test)
add_test(register_benchmark_test register_benchmark_test --benchmark_min_time=0.01)
compile_benchmark_test(map_test)
add_test(map_test map_test --benchmark_min_time=0.01)
compile_benchmark_test(multiple_ranges_test)
add_test(multiple_ranges_test multiple_ranges_test --benchmark_min_time=0.01)
compile_output_test(reporter_output_test)
add_test(reporter_output_test reporter_output_test --benchmark_min_time=0.01)
compile_output_test(user_counters_test)
add_test(user_counters_test user_counters_test --benchmark_min_time=0.01)
compile_output_test(user_counters_tabular_test)
add_test(user_counters_tabular_test user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01)
check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG)
if (BENCHMARK_HAS_CXX03_FLAG)
set(CXX03_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE "-std=c++11" "-std=c++03" CXX03_FLAGS "${CXX03_FLAGS}")
string(REPLACE "-std=c++0x" "-std=c++03" CXX03_FLAGS "${CXX03_FLAGS}")
compile_benchmark_test(cxx03_test)
set_target_properties(cxx03_test
PROPERTIES COMPILE_FLAGS "${CXX03_FLAGS}")
add_test(cxx03 cxx03_test --benchmark_min_time=0.01)
endif()
# Attempt to work around flaky test failures when running on Appveyor servers.
if (DEFINED ENV{APPVEYOR})
set(COMPLEXITY_MIN_TIME "0.5")
else()
set(COMPLEXITY_MIN_TIME "0.01")
endif()
compile_output_test(complexity_test)
add_test(complexity_benchmark complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME})
# Add the coverage command(s)
if(CMAKE_BUILD_TYPE)
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
endif()
if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage")
find_program(GCOV gcov)
find_program(LCOV lcov)
find_program(GENHTML genhtml)
find_program(CTEST ctest)
if (GCOV AND LCOV AND GENHTML AND CTEST AND HAVE_CXX_FLAG_COVERAGE)
add_custom_command(
OUTPUT ${CMAKE_BINARY_DIR}/lcov/index.html
COMMAND ${LCOV} -q -z -d .
COMMAND ${LCOV} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o before.lcov -i
COMMAND ${CTEST} --force-new-ctest-process
COMMAND ${LCOV} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o after.lcov
COMMAND ${LCOV} -q -a before.lcov -a after.lcov --output-file final.lcov
COMMAND ${LCOV} -q -r final.lcov "'${CMAKE_SOURCE_DIR}/test/*'" -o final.lcov
COMMAND ${GENHTML} final.lcov -o lcov --demangle-cpp --sort -p "${CMAKE_BINARY_DIR}" -t benchmark
DEPENDS filter_test benchmark_test options_test basic_test fixture_test cxx03_test complexity_test
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
COMMENT "Running LCOV"
)
add_custom_target(coverage
DEPENDS ${CMAKE_BINARY_DIR}/lcov/index.html
COMMENT "LCOV report at lcov/index.html"
)
message(STATUS "Coverage command added")
else()
if (HAVE_CXX_FLAG_COVERAGE)
set(CXX_FLAG_COVERAGE_MESSAGE supported)
else()
set(CXX_FLAG_COVERAGE_MESSAGE unavailable)
endif()
message(WARNING
"Coverage not available:\n"
" gcov: ${GCOV}\n"
" lcov: ${LCOV}\n"
" genhtml: ${GENHTML}\n"
" ctest: ${CTEST}\n"
" --coverage flag: ${CXX_FLAG_COVERAGE_MESSAGE}")
endif()
endif()

99
vendor/github.com/google/benchmark/test/basic_test.cc generated vendored Normal file
View File

@ -0,0 +1,99 @@
#include "benchmark/benchmark.h"
#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
void BM_empty(benchmark::State& state) {
while (state.KeepRunning()) {
benchmark::DoNotOptimize(state.iterations());
}
}
BENCHMARK(BM_empty);
BENCHMARK(BM_empty)->ThreadPerCpu();
void BM_spin_empty(benchmark::State& state) {
while (state.KeepRunning()) {
for (int x = 0; x < state.range(0); ++x) {
benchmark::DoNotOptimize(x);
}
}
}
BASIC_BENCHMARK_TEST(BM_spin_empty);
BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu();
void BM_spin_pause_before(benchmark::State& state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
while (state.KeepRunning()) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_before);
BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
void BM_spin_pause_during(benchmark::State& state) {
while (state.KeepRunning()) {
state.PauseTiming();
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
state.ResumeTiming();
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_during);
BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
void BM_pause_during(benchmark::State& state) {
while (state.KeepRunning()) {
state.PauseTiming();
state.ResumeTiming();
}
}
BENCHMARK(BM_pause_during);
BENCHMARK(BM_pause_during)->ThreadPerCpu();
BENCHMARK(BM_pause_during)->UseRealTime();
BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
void BM_spin_pause_after(benchmark::State& state) {
while (state.KeepRunning()) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu();
void BM_spin_pause_before_and_after(benchmark::State& state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
while (state.KeepRunning()) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
void BM_empty_stop_start(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_empty_stop_start);
BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
BENCHMARK_MAIN()

View File

@ -0,0 +1,240 @@
#include "benchmark/benchmark.h"
#include <assert.h>
#include <math.h>
#include <stdint.h>
#include <chrono>
#include <cstdlib>
#include <iostream>
#include <limits>
#include <list>
#include <map>
#include <mutex>
#include <set>
#include <sstream>
#include <string>
#include <thread>
#include <utility>
#include <vector>
#if defined(__GNUC__)
#define BENCHMARK_NOINLINE __attribute__((noinline))
#else
#define BENCHMARK_NOINLINE
#endif
namespace {
int BENCHMARK_NOINLINE Factorial(uint32_t n) {
return (n == 1) ? 1 : n * Factorial(n - 1);
}
double CalculatePi(int depth) {
double pi = 0.0;
for (int i = 0; i < depth; ++i) {
double numerator = static_cast<double>(((i % 2) * 2) - 1);
double denominator = static_cast<double>((2 * i) - 1);
pi += numerator / denominator;
}
return (pi - 1.0) * 4;
}
std::set<int> ConstructRandomSet(int size) {
std::set<int> s;
for (int i = 0; i < size; ++i) s.insert(i);
return s;
}
std::mutex test_vector_mu;
std::vector<int>* test_vector = nullptr;
} // end namespace
static void BM_Factorial(benchmark::State& state) {
int fac_42 = 0;
while (state.KeepRunning()) fac_42 = Factorial(8);
// Prevent compiler optimizations
std::stringstream ss;
ss << fac_42;
state.SetLabel(ss.str());
}
BENCHMARK(BM_Factorial);
BENCHMARK(BM_Factorial)->UseRealTime();
static void BM_CalculatePiRange(benchmark::State& state) {
double pi = 0.0;
while (state.KeepRunning()) pi = CalculatePi(state.range(0));
std::stringstream ss;
ss << pi;
state.SetLabel(ss.str());
}
BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024);
static void BM_CalculatePi(benchmark::State& state) {
static const int depth = 1024;
while (state.KeepRunning()) {
benchmark::DoNotOptimize(CalculatePi(depth));
}
}
BENCHMARK(BM_CalculatePi)->Threads(8);
BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32);
BENCHMARK(BM_CalculatePi)->ThreadPerCpu();
static void BM_SetInsert(benchmark::State& state) {
while (state.KeepRunning()) {
state.PauseTiming();
std::set<int> data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
for (int j = 0; j < state.range(1); ++j) data.insert(rand());
}
state.SetItemsProcessed(state.iterations() * state.range(1));
state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int));
}
BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {1, 10}});
template <typename Container,
typename ValueType = typename Container::value_type>
static void BM_Sequential(benchmark::State& state) {
ValueType v = 42;
while (state.KeepRunning()) {
Container c;
for (int i = state.range(0); --i;) c.push_back(v);
}
const size_t items_processed = state.iterations() * state.range(0);
state.SetItemsProcessed(items_processed);
state.SetBytesProcessed(items_processed * sizeof(v));
}
BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int)
->Range(1 << 0, 1 << 10);
BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10);
// Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond.
#if __cplusplus >= 201103L
BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512);
#endif
static void BM_StringCompare(benchmark::State& state) {
std::string s1(state.range(0), '-');
std::string s2(state.range(0), '-');
while (state.KeepRunning()) benchmark::DoNotOptimize(s1.compare(s2));
}
BENCHMARK(BM_StringCompare)->Range(1, 1 << 20);
static void BM_SetupTeardown(benchmark::State& state) {
if (state.thread_index == 0) {
// No need to lock test_vector_mu here as this is running single-threaded.
test_vector = new std::vector<int>();
}
int i = 0;
while (state.KeepRunning()) {
std::lock_guard<std::mutex> l(test_vector_mu);
if (i % 2 == 0)
test_vector->push_back(i);
else
test_vector->pop_back();
++i;
}
if (state.thread_index == 0) {
delete test_vector;
}
}
BENCHMARK(BM_SetupTeardown)->ThreadPerCpu();
static void BM_LongTest(benchmark::State& state) {
double tracker = 0.0;
while (state.KeepRunning()) {
for (int i = 0; i < state.range(0); ++i)
benchmark::DoNotOptimize(tracker += i);
}
}
BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28);
static void BM_ParallelMemset(benchmark::State& state) {
int size = state.range(0) / static_cast<int>(sizeof(int));
int thread_size = size / state.threads;
int from = thread_size * state.thread_index;
int to = from + thread_size;
if (state.thread_index == 0) {
test_vector = new std::vector<int>(size);
}
while (state.KeepRunning()) {
for (int i = from; i < to; i++) {
// No need to lock test_vector_mu as ranges
// do not overlap between threads.
benchmark::DoNotOptimize(test_vector->at(i) = 1);
}
}
if (state.thread_index == 0) {
delete test_vector;
}
}
BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4);
static void BM_ManualTiming(benchmark::State& state) {
size_t slept_for = 0;
int microseconds = state.range(0);
std::chrono::duration<double, std::micro> sleep_duration{
static_cast<double>(microseconds)};
while (state.KeepRunning()) {
auto start = std::chrono::high_resolution_clock::now();
// Simulate some useful workload with a sleep
std::this_thread::sleep_for(
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
auto end = std::chrono::high_resolution_clock::now();
auto elapsed =
std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
state.SetIterationTime(elapsed.count());
slept_for += microseconds;
}
state.SetItemsProcessed(slept_for);
}
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime();
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime();
#if __cplusplus >= 201103L
template <class... Args>
void BM_with_args(benchmark::State& state, Args&&...) {
while (state.KeepRunning()) {
}
}
BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44);
BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"),
std::pair<int, double>(42, 3.8));
void BM_non_template_args(benchmark::State& state, int, double) {
while(state.KeepRunning()) {}
}
BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0);
#endif // __cplusplus >= 201103L
static void BM_DenseThreadRanges(benchmark::State& st) {
switch (st.range(0)) {
case 1:
assert(st.threads == 1 || st.threads == 2 || st.threads == 3);
break;
case 2:
assert(st.threads == 1 || st.threads == 3 || st.threads == 4);
break;
case 3:
assert(st.threads == 5 || st.threads == 8 || st.threads == 11 ||
st.threads == 14);
break;
default:
assert(false && "Invalid test case number");
}
while (st.KeepRunning()) {
}
}
BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3);
BENCHMARK(BM_DenseThreadRanges)->Arg(2)->DenseThreadRange(1, 4, 2);
BENCHMARK(BM_DenseThreadRanges)->Arg(3)->DenseThreadRange(5, 14, 3);
BENCHMARK_MAIN()

View File

@ -0,0 +1,167 @@
#undef NDEBUG
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstdlib>
#include <vector>
#include "benchmark/benchmark.h"
#include "output_test.h"
namespace {
#define ADD_COMPLEXITY_CASES(...) \
int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name,
std::string big_o) {
SetSubstitutions({{"%bigo_name", big_o_test_name},
{"%rms_name", rms_test_name},
{"%bigo_str", "[ ]* %float " + big_o},
{"%bigo", big_o},
{"%rms", "[ ]*[0-9]+ %"}});
AddCases(
TC_ConsoleOut,
{{"^%bigo_name %bigo_str %bigo_str[ ]*$"},
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
{"^%rms_name %rms %rms[ ]*$", MR_Next}});
AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"},
{"\"cpu_coefficient\": [0-9]+,$", MR_Next},
{"\"real_coefficient\": [0-9]{1,5},$", MR_Next},
{"\"big_o\": \"%bigo\",$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next},
{"\"name\": \"%rms_name\",$"},
{"\"rms\": %float$", MR_Next},
{"}", MR_Next}});
AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"},
{"^\"%bigo_name\"", MR_Not},
{"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}});
return 0;
}
} // end namespace
// ========================================================================= //
// --------------------------- Testing BigO O(1) --------------------------- //
// ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) {
while (state.KeepRunning()) {
for (int i = 0; i < 1024; ++i) {
benchmark::DoNotOptimize(&i);
}
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity();
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](int) {
return 1.0;
});
const char *big_o_1_test_name = "BM_Complexity_O1_BigO";
const char *rms_o_1_test_name = "BM_Complexity_O1_RMS";
const char *enum_big_o_1 = "\\([0-9]+\\)";
// FIXME: Tolerate both '(1)' and 'lgN' as output when the complexity is auto
// deduced.
// See https://github.com/google/benchmark/issues/272
const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)";
const char *lambda_big_o_1 = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, enum_big_o_1);
// Add auto enum tests
ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, auto_big_o_1);
// Add lambda tests
ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1);
// ========================================================================= //
// --------------------------- Testing BigO O(N) --------------------------- //
// ========================================================================= //
std::vector<int> ConstructRandomVector(int size) {
std::vector<int> v;
v.reserve(size);
for (int i = 0; i < size; ++i) {
v.push_back(std::rand() % size);
}
return v;
}
void BM_Complexity_O_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range(0));
const int item_not_in_vector =
state.range(0) * 2; // Test worst case scenario (item not in vector)
while (state.KeepRunning()) {
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity(benchmark::oN);
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity([](int n) -> double { return n; });
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity();
const char *big_o_n_test_name = "BM_Complexity_O_N_BigO";
const char *rms_o_n_test_name = "BM_Complexity_O_N_RMS";
const char *enum_auto_big_o_n = "N";
const char *lambda_big_o_n = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n);
// Add lambda tests
ADD_COMPLEXITY_CASES(big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n);
// ========================================================================= //
// ------------------------- Testing BigO O(N*lgN) ------------------------- //
// ========================================================================= //
static void BM_Complexity_O_N_log_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range(0));
while (state.KeepRunning()) {
std::sort(v.begin(), v.end());
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity(benchmark::oNLogN);
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity([](int n) { return n * log2(n); });
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity();
const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO";
const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS";
const char *enum_auto_big_o_n_lg_n = "NlgN";
const char *lambda_big_o_n_lg_n = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name,
enum_auto_big_o_n_lg_n);
// Add lambda tests
ADD_COMPLEXITY_CASES(big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name,
lambda_big_o_n_lg_n);
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char *argv[]) { RunOutputTests(argc, argv); }

48
vendor/github.com/google/benchmark/test/cxx03_test.cc generated vendored Normal file
View File

@ -0,0 +1,48 @@
#undef NDEBUG
#include <cassert>
#include <cstddef>
#include "benchmark/benchmark.h"
#if __cplusplus >= 201103L
#error C++11 or greater detected. Should be C++03.
#endif
void BM_empty(benchmark::State& state) {
while (state.KeepRunning()) {
volatile std::size_t x = state.iterations();
((void)x);
}
}
BENCHMARK(BM_empty);
// The new C++11 interface for args/ranges requires initializer list support.
// Therefore we provide the old interface to support C++03.
void BM_old_arg_range_interface(benchmark::State& state) {
assert((state.range(0) == 1 && state.range(1) == 2) ||
(state.range(0) == 5 && state.range(1) == 6));
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_old_arg_range_interface)->ArgPair(1, 2)->RangePair(5, 5, 6, 6);
template <class T, class U>
void BM_template2(benchmark::State& state) {
BM_empty(state);
}
BENCHMARK_TEMPLATE2(BM_template2, int, long);
template <class T>
void BM_template1(benchmark::State& state) {
BM_empty(state);
}
BENCHMARK_TEMPLATE(BM_template1, long);
BENCHMARK_TEMPLATE1(BM_template1, int);
void BM_counters(benchmark::State& state) {
BM_empty(state);
state.counters["Foo"] = 2;
}
BENCHMARK(BM_counters);
BENCHMARK_MAIN()

View File

@ -0,0 +1,64 @@
// Testing:
// State::PauseTiming()
// State::ResumeTiming()
// Test that CHECK's within these function diagnose when they are called
// outside of the KeepRunning() loop.
//
// NOTE: Users should NOT include or use src/check.h. This is only done in
// order to test library internals.
#include <cstdlib>
#include <stdexcept>
#include "../src/check.h"
#include "benchmark/benchmark.h"
#if defined(__GNUC__) && !defined(__EXCEPTIONS)
#define TEST_HAS_NO_EXCEPTIONS
#endif
void TestHandler() {
#ifndef TEST_HAS_NO_EXCEPTIONS
throw std::logic_error("");
#else
std::abort();
#endif
}
void try_invalid_pause_resume(benchmark::State& state) {
#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS)
try {
state.PauseTiming();
std::abort();
} catch (std::logic_error const&) {
}
try {
state.ResumeTiming();
std::abort();
} catch (std::logic_error const&) {
}
#else
(void)state; // avoid unused warning
#endif
}
void BM_diagnostic_test(benchmark::State& state) {
static bool called_once = false;
if (called_once == false) try_invalid_pause_resume(state);
while (state.KeepRunning()) {
benchmark::DoNotOptimize(state.iterations());
}
if (called_once == false) try_invalid_pause_resume(state);
called_once = true;
}
BENCHMARK(BM_diagnostic_test);
int main(int argc, char* argv[]) {
benchmark::internal::GetAbortHandler() = &TestHandler;
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
}

View File

@ -0,0 +1,52 @@
#include "benchmark/benchmark.h"
#include <cstdint>
namespace {
#if defined(__GNUC__)
std::uint64_t double_up(const std::uint64_t x) __attribute__((const));
#endif
std::uint64_t double_up(const std::uint64_t x) { return x * 2; }
}
// Using DoNotOptimize on types like BitRef seem to cause a lot of problems
// with the inline assembly on both GCC and Clang.
struct BitRef {
int index;
unsigned char &byte;
public:
static BitRef Make() {
static unsigned char arr[2] = {};
BitRef b(1, arr[0]);
return b;
}
private:
BitRef(int i, unsigned char& b) : index(i), byte(b) {}
};
int main(int, char*[]) {
// this test verifies compilation of DoNotOptimize() for some types
char buffer8[8];
benchmark::DoNotOptimize(buffer8);
char buffer20[20];
benchmark::DoNotOptimize(buffer20);
char buffer1024[1024];
benchmark::DoNotOptimize(buffer1024);
benchmark::DoNotOptimize(&buffer1024[0]);
int x = 123;
benchmark::DoNotOptimize(x);
benchmark::DoNotOptimize(&x);
benchmark::DoNotOptimize(x += 42);
benchmark::DoNotOptimize(double_up(x));
// These tests are to e
benchmark::DoNotOptimize(BitRef::Make());
BitRef lval = BitRef::Make();
benchmark::DoNotOptimize(lval);
}

104
vendor/github.com/google/benchmark/test/filter_test.cc generated vendored Normal file
View File

@ -0,0 +1,104 @@
#include "benchmark/benchmark.h"
#include <cassert>
#include <cmath>
#include <cstdint>
#include <cstdlib>
#include <iostream>
#include <limits>
#include <sstream>
#include <string>
namespace {
class TestReporter : public benchmark::ConsoleReporter {
public:
virtual bool ReportContext(const Context& context) {
return ConsoleReporter::ReportContext(context);
};
virtual void ReportRuns(const std::vector<Run>& report) {
++count_;
ConsoleReporter::ReportRuns(report);
};
TestReporter() : count_(0) {}
virtual ~TestReporter() {}
size_t GetCount() const { return count_; }
private:
mutable size_t count_;
};
} // end namespace
static void NoPrefix(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(NoPrefix);
static void BM_Foo(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_Foo);
static void BM_Bar(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_Bar);
static void BM_FooBar(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_FooBar);
static void BM_FooBa(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_FooBa);
int main(int argc, char **argv) {
bool list_only = false;
for (int i = 0; i < argc; ++i)
list_only |= std::string(argv[i]).find("--benchmark_list_tests") !=
std::string::npos;
benchmark::Initialize(&argc, argv);
TestReporter test_reporter;
const size_t returned_count =
benchmark::RunSpecifiedBenchmarks(&test_reporter);
if (argc == 2) {
// Make sure we ran all of the tests
std::stringstream ss(argv[1]);
size_t expected_return;
ss >> expected_return;
if (returned_count != expected_return) {
std::cerr << "ERROR: Expected " << expected_return
<< " tests to match the filter but returned_count = "
<< returned_count << std::endl;
return -1;
}
const size_t expected_reports = list_only ? 0 : expected_return;
const size_t reports_count = test_reporter.GetCount();
if (reports_count != expected_reports) {
std::cerr << "ERROR: Expected " << expected_reports
<< " tests to be run but reported_count = " << reports_count
<< std::endl;
return -1;
}
}
return 0;
}

View File

@ -0,0 +1,49 @@
#include "benchmark/benchmark.h"
#include <cassert>
#include <memory>
class MyFixture : public ::benchmark::Fixture {
public:
void SetUp(const ::benchmark::State& state) {
if (state.thread_index == 0) {
assert(data.get() == nullptr);
data.reset(new int(42));
}
}
void TearDown(const ::benchmark::State& state) {
if (state.thread_index == 0) {
assert(data.get() != nullptr);
data.reset();
}
}
~MyFixture() { assert(data == nullptr); }
std::unique_ptr<int> data;
};
BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) {
assert(data.get() != nullptr);
assert(*data == 42);
while (st.KeepRunning()) {
}
}
BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) {
if (st.thread_index == 0) {
assert(data.get() != nullptr);
assert(*data == 42);
}
while (st.KeepRunning()) {
assert(data.get() != nullptr);
assert(*data == 42);
}
st.SetItemsProcessed(st.range(0));
}
BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42);
BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu();
BENCHMARK_MAIN()

56
vendor/github.com/google/benchmark/test/map_test.cc generated vendored Normal file
View File

@ -0,0 +1,56 @@
#include "benchmark/benchmark.h"
#include <cstdlib>
#include <map>
namespace {
std::map<int, int> ConstructRandomMap(int size) {
std::map<int, int> m;
for (int i = 0; i < size; ++i) {
m.insert(std::make_pair(rand() % size, rand() % size));
}
return m;
}
} // namespace
// Basic version.
static void BM_MapLookup(benchmark::State& state) {
const int size = state.range(0);
while (state.KeepRunning()) {
state.PauseTiming();
std::map<int, int> m = ConstructRandomMap(size);
state.ResumeTiming();
for (int i = 0; i < size; ++i) {
benchmark::DoNotOptimize(m.find(rand() % size));
}
}
state.SetItemsProcessed(state.iterations() * size);
}
BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12);
// Using fixtures.
class MapFixture : public ::benchmark::Fixture {
public:
void SetUp(const ::benchmark::State& st) {
m = ConstructRandomMap(st.range(0));
}
void TearDown(const ::benchmark::State&) { m.clear(); }
std::map<int, int> m;
};
BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) {
const int size = state.range(0);
while (state.KeepRunning()) {
for (int i = 0; i < size; ++i) {
benchmark::DoNotOptimize(m.find(rand() % size));
}
}
state.SetItemsProcessed(state.iterations() * size);
}
BENCHMARK_REGISTER_F(MapFixture, Lookup)->Range(1 << 3, 1 << 12);
BENCHMARK_MAIN()

View File

@ -0,0 +1,74 @@
#include "benchmark/benchmark.h"
#include <cassert>
#include <set>
class MultipleRangesFixture : public ::benchmark::Fixture {
public:
MultipleRangesFixture()
: expectedValues({{1, 3, 5},
{1, 3, 8},
{1, 3, 15},
{2, 3, 5},
{2, 3, 8},
{2, 3, 15},
{1, 4, 5},
{1, 4, 8},
{1, 4, 15},
{2, 4, 5},
{2, 4, 8},
{2, 4, 15},
{1, 7, 5},
{1, 7, 8},
{1, 7, 15},
{2, 7, 5},
{2, 7, 8},
{2, 7, 15},
{7, 6, 3}}) {}
void SetUp(const ::benchmark::State& state) {
std::vector<int> ranges = {state.range(0), state.range(1), state.range(2)};
assert(expectedValues.find(ranges) != expectedValues.end());
actualValues.insert(ranges);
}
virtual ~MultipleRangesFixture() {
assert(actualValues.size() == expectedValues.size());
}
std::set<std::vector<int>> expectedValues;
std::set<std::vector<int>> actualValues;
};
BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) {
while (state.KeepRunning()) {
int product = state.range(0) * state.range(1) * state.range(2);
for (int x = 0; x < product; x++) {
benchmark::DoNotOptimize(x);
}
}
}
BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty)
->RangeMultiplier(2)
->Ranges({{1, 2}, {3, 7}, {5, 15}})
->Args({7, 6, 3});
void BM_CheckDefaultArgument(benchmark::State& state) {
// Test that the 'range()' without an argument is the same as 'range(0)'.
assert(state.range() == state.range(0));
assert(state.range() != state.range(1));
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}});
static void BM_MultipleRanges(benchmark::State& st) {
while (st.KeepRunning()) {
}
}
BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}});
BENCHMARK_MAIN()

View File

@ -0,0 +1,65 @@
#include "benchmark/benchmark.h"
#include <chrono>
#include <thread>
#if defined(NDEBUG)
#undef NDEBUG
#endif
#include <cassert>
void BM_basic(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
void BM_basic_slow(benchmark::State& state) {
std::chrono::milliseconds sleep_duration(state.range(0));
while (state.KeepRunning()) {
std::this_thread::sleep_for(
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
}
}
BENCHMARK(BM_basic);
BENCHMARK(BM_basic)->Arg(42);
BENCHMARK(BM_basic_slow)->Arg(10)->Unit(benchmark::kNanosecond);
BENCHMARK(BM_basic_slow)->Arg(100)->Unit(benchmark::kMicrosecond);
BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kMillisecond);
BENCHMARK(BM_basic)->Range(1, 8);
BENCHMARK(BM_basic)->RangeMultiplier(2)->Range(1, 8);
BENCHMARK(BM_basic)->DenseRange(10, 15);
BENCHMARK(BM_basic)->Args({42, 42});
BENCHMARK(BM_basic)->Ranges({{64, 512}, {64, 512}});
BENCHMARK(BM_basic)->MinTime(0.7);
BENCHMARK(BM_basic)->UseRealTime();
BENCHMARK(BM_basic)->ThreadRange(2, 4);
BENCHMARK(BM_basic)->ThreadPerCpu();
BENCHMARK(BM_basic)->Repetitions(3);
void CustomArgs(benchmark::internal::Benchmark* b) {
for (int i = 0; i < 10; ++i) {
b->Arg(i);
}
}
BENCHMARK(BM_basic)->Apply(CustomArgs);
void BM_explicit_iteration_count(benchmark::State& st) {
// Test that benchmarks specified with an explicit iteration count are
// only run once.
static bool invoked_before = false;
assert(!invoked_before);
invoked_before = true;
// Test that the requested iteration count is respected.
assert(st.max_iterations == 42);
size_t actual_iterations = 0;
while (st.KeepRunning())
++actual_iterations;
assert(st.iterations() == st.max_iterations);
assert(st.iterations() == 42);
}
BENCHMARK(BM_explicit_iteration_count)->Iterations(42);
BENCHMARK_MAIN()

201
vendor/github.com/google/benchmark/test/output_test.h generated vendored Normal file
View File

@ -0,0 +1,201 @@
#ifndef TEST_OUTPUT_TEST_H
#define TEST_OUTPUT_TEST_H
#undef NDEBUG
#include <initializer_list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <functional>
#include <sstream>
#include "../src/re.h"
#include "benchmark/benchmark.h"
#define CONCAT2(x, y) x##y
#define CONCAT(x, y) CONCAT2(x, y)
#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = ::AddCases(__VA_ARGS__)
#define SET_SUBSTITUTIONS(...) \
int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__)
enum MatchRules {
MR_Default, // Skip non-matching lines until a match is found.
MR_Next, // Match must occur on the next line.
MR_Not // No line between the current position and the next match matches
// the regex
};
struct TestCase {
TestCase(std::string re, int rule = MR_Default);
std::string regex_str;
int match_rule;
std::string substituted_regex;
std::shared_ptr<benchmark::Regex> regex;
};
enum TestCaseID {
TC_ConsoleOut,
TC_ConsoleErr,
TC_JSONOut,
TC_JSONErr,
TC_CSVOut,
TC_CSVErr,
TC_NumID // PRIVATE
};
// Add a list of test cases to be run against the output specified by
// 'ID'
int AddCases(TestCaseID ID, std::initializer_list<TestCase> il);
// Add or set a list of substitutions to be performed on constructed regex's
// See 'output_test_helper.cc' for a list of default substitutions.
int SetSubstitutions(
std::initializer_list<std::pair<std::string, std::string>> il);
// Run all output tests.
void RunOutputTests(int argc, char* argv[]);
// ========================================================================= //
// ------------------------- Results checking ------------------------------ //
// ========================================================================= //
// Call this macro to register a benchmark for checking its results. This
// should be all that's needed. It subscribes a function to check the (CSV)
// results of a benchmark. This is done only after verifying that the output
// strings are really as expected.
// bm_name_pattern: a name or a regex pattern which will be matched against
// all the benchmark names. Matching benchmarks
// will be the subject of a call to checker_function
// checker_function: should be of type ResultsCheckFn (see below)
#define CHECK_BENCHMARK_RESULTS(bm_name_pattern, checker_function) \
size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function)
struct Results;
typedef std::function< void(Results const&) > ResultsCheckFn;
size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn);
// Class holding the results of a benchmark.
// It is passed in calls to checker functions.
struct Results {
// the benchmark name
std::string name;
// the benchmark fields
std::map< std::string, std::string > values;
Results(const std::string& n) : name(n) {}
int NumThreads() const;
typedef enum { kCpuTime, kRealTime } BenchmarkTime;
// get cpu_time or real_time in seconds
double GetTime(BenchmarkTime which) const;
// get the real_time duration of the benchmark in seconds.
// it is better to use fuzzy float checks for this, as the float
// ASCII formatting is lossy.
double DurationRealTime() const {
return GetAs< double >("iterations") * GetTime(kRealTime);
}
// get the cpu_time duration of the benchmark in seconds
double DurationCPUTime() const {
return GetAs< double >("iterations") * GetTime(kCpuTime);
}
// get the string for a result by name, or nullptr if the name
// is not found
const std::string* Get(const char* entry_name) const {
auto it = values.find(entry_name);
if(it == values.end()) return nullptr;
return &it->second;
}
// get a result by name, parsed as a specific type.
// NOTE: for counters, use GetCounterAs instead.
template <class T>
T GetAs(const char* entry_name) const;
// counters are written as doubles, so they have to be read first
// as a double, and only then converted to the asked type.
template <class T>
T GetCounterAs(const char* entry_name) const {
double dval = GetAs< double >(entry_name);
T tval = static_cast< T >(dval);
return tval;
}
};
template <class T>
T Results::GetAs(const char* entry_name) const {
auto *sv = Get(entry_name);
CHECK(sv != nullptr && !sv->empty());
std::stringstream ss;
ss << *sv;
T out;
ss >> out;
CHECK(!ss.fail());
return out;
}
//----------------------------------
// Macros to help in result checking. Do not use them with arguments causing
// side-effects.
#define _CHECK_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value) \
CONCAT(CHECK_, relationship) \
(entry.getfn< var_type >(var_name), (value)) << "\n" \
<< __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \
<< __FILE__ << ":" << __LINE__ << ": " \
<< "expected (" << #var_type << ")" << (var_name) \
<< "=" << (entry).getfn< var_type >(var_name) \
<< " to be " #relationship " to " << (value) << "\n"
// check with tolerance. eps_factor is the tolerance window, which is
// interpreted relative to value (eg, 0.1 means 10% of value).
#define _CHECK_FLOAT_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value, eps_factor) \
CONCAT(CHECK_FLOAT_, relationship) \
(entry.getfn< var_type >(var_name), (value), (eps_factor) * (value)) << "\n" \
<< __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \
<< __FILE__ << ":" << __LINE__ << ": " \
<< "expected (" << #var_type << ")" << (var_name) \
<< "=" << (entry).getfn< var_type >(var_name) \
<< " to be " #relationship " to " << (value) << "\n" \
<< __FILE__ << ":" << __LINE__ << ": " \
<< "with tolerance of " << (eps_factor) * (value) \
<< " (" << (eps_factor)*100. << "%), " \
<< "but delta was " << ((entry).getfn< var_type >(var_name) - (value)) \
<< " (" << (((entry).getfn< var_type >(var_name) - (value)) \
/ \
((value) > 1.e-5 || value < -1.e-5 ? value : 1.e-5)*100.) \
<< "%)"
#define CHECK_RESULT_VALUE(entry, var_type, var_name, relationship, value) \
_CHECK_RESULT_VALUE(entry, GetAs, var_type, var_name, relationship, value)
#define CHECK_COUNTER_VALUE(entry, var_type, var_name, relationship, value) \
_CHECK_RESULT_VALUE(entry, GetCounterAs, var_type, var_name, relationship, value)
#define CHECK_FLOAT_RESULT_VALUE(entry, var_name, relationship, value, eps_factor) \
_CHECK_FLOAT_RESULT_VALUE(entry, GetAs, double, var_name, relationship, value, eps_factor)
#define CHECK_FLOAT_COUNTER_VALUE(entry, var_name, relationship, value, eps_factor) \
_CHECK_FLOAT_RESULT_VALUE(entry, GetCounterAs, double, var_name, relationship, value, eps_factor)
// ========================================================================= //
// --------------------------- Misc Utilities ------------------------------ //
// ========================================================================= //
namespace {
const char* const dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
} // end namespace
#endif // TEST_OUTPUT_TEST_H

View File

@ -0,0 +1,423 @@
#include <iostream>
#include <map>
#include <memory>
#include <sstream>
#include <cstring>
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include "../src/re.h" // NOTE: re.h is for internal use only
#include "output_test.h"
#include "../src/benchmark_api_internal.h"
// ========================================================================= //
// ------------------------------ Internals -------------------------------- //
// ========================================================================= //
namespace internal {
namespace {
using TestCaseList = std::vector<TestCase>;
// Use a vector because the order elements are added matters during iteration.
// std::map/unordered_map don't guarantee that.
// For example:
// SetSubstitutions({{"%HelloWorld", "Hello"}, {"%Hello", "Hi"}});
// Substitute("%HelloWorld") // Always expands to Hello.
using SubMap = std::vector<std::pair<std::string, std::string>>;
TestCaseList& GetTestCaseList(TestCaseID ID) {
// Uses function-local statics to ensure initialization occurs
// before first use.
static TestCaseList lists[TC_NumID];
return lists[ID];
}
SubMap& GetSubstitutions() {
// Don't use 'dec_re' from header because it may not yet be initialized.
static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
static SubMap map = {
{"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"},
// human-readable float
{"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"},
{"%int", "[ ]*[0-9]+"},
{" %s ", "[ ]+"},
{"%time", "[ ]*[0-9]{1,5} ns"},
{"%console_report", "[ ]*[0-9]{1,5} ns [ ]*[0-9]{1,5} ns [ ]*[0-9]+"},
{"%console_us_report", "[ ]*[0-9] us [ ]*[0-9] us [ ]*[0-9]+"},
{"%csv_header",
"name,iterations,real_time,cpu_time,time_unit,bytes_per_second,"
"items_per_second,label,error_occurred,error_message"},
{"%csv_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,,,"},
{"%csv_us_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",us,,,,,"},
{"%csv_bytes_report",
"[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + ",,,,"},
{"%csv_items_report",
"[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,," + safe_dec_re + ",,,"},
{"%csv_bytes_items_report",
"[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re +
"," + safe_dec_re + ",,,"},
{"%csv_label_report_begin", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,"},
{"%csv_label_report_end", ",,"}};
return map;
}
std::string PerformSubstitutions(std::string source) {
SubMap const& subs = GetSubstitutions();
using SizeT = std::string::size_type;
for (auto const& KV : subs) {
SizeT pos;
SizeT next_start = 0;
while ((pos = source.find(KV.first, next_start)) != std::string::npos) {
next_start = pos + KV.second.size();
source.replace(pos, KV.first.size(), KV.second);
}
}
return source;
}
void CheckCase(std::stringstream& remaining_output, TestCase const& TC,
TestCaseList const& not_checks) {
std::string first_line;
bool on_first = true;
std::string line;
while (remaining_output.eof() == false) {
CHECK(remaining_output.good());
std::getline(remaining_output, line);
if (on_first) {
first_line = line;
on_first = false;
}
for (const auto& NC : not_checks) {
CHECK(!NC.regex->Match(line))
<< "Unexpected match for line \"" << line << "\" for MR_Not regex \""
<< NC.regex_str << "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
if (TC.regex->Match(line)) return;
CHECK(TC.match_rule != MR_Next)
<< "Expected line \"" << line << "\" to match regex \"" << TC.regex_str
<< "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
CHECK(remaining_output.eof() == false)
<< "End of output reached before match for regex \"" << TC.regex_str
<< "\" was found"
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
void CheckCases(TestCaseList const& checks, std::stringstream& output) {
std::vector<TestCase> not_checks;
for (size_t i = 0; i < checks.size(); ++i) {
const auto& TC = checks[i];
if (TC.match_rule == MR_Not) {
not_checks.push_back(TC);
continue;
}
CheckCase(output, TC, not_checks);
not_checks.clear();
}
}
class TestReporter : public benchmark::BenchmarkReporter {
public:
TestReporter(std::vector<benchmark::BenchmarkReporter*> reps)
: reporters_(reps) {}
virtual bool ReportContext(const Context& context) {
bool last_ret = false;
bool first = true;
for (auto rep : reporters_) {
bool new_ret = rep->ReportContext(context);
CHECK(first || new_ret == last_ret)
<< "Reports return different values for ReportContext";
first = false;
last_ret = new_ret;
}
(void)first;
return last_ret;
}
void ReportRuns(const std::vector<Run>& report) {
for (auto rep : reporters_) rep->ReportRuns(report);
}
void Finalize() {
for (auto rep : reporters_) rep->Finalize();
}
private:
std::vector<benchmark::BenchmarkReporter *> reporters_;
};
}
} // end namespace internal
// ========================================================================= //
// -------------------------- Results checking ----------------------------- //
// ========================================================================= //
namespace internal {
// Utility class to manage subscribers for checking benchmark results.
// It works by parsing the CSV output to read the results.
class ResultsChecker {
public:
struct PatternAndFn : public TestCase { // reusing TestCase for its regexes
PatternAndFn(const std::string& rx, ResultsCheckFn fn_)
: TestCase(rx), fn(fn_) {}
ResultsCheckFn fn;
};
std::vector< PatternAndFn > check_patterns;
std::vector< Results > results;
std::vector< std::string > field_names;
void Add(const std::string& entry_pattern, ResultsCheckFn fn);
void CheckResults(std::stringstream& output);
private:
void SetHeader_(const std::string& csv_header);
void SetValues_(const std::string& entry_csv_line);
std::vector< std::string > SplitCsv_(const std::string& line);
};
// store the static ResultsChecker in a function to prevent initialization
// order problems
ResultsChecker& GetResultsChecker() {
static ResultsChecker rc;
return rc;
}
// add a results checker for a benchmark
void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) {
check_patterns.emplace_back(entry_pattern, fn);
}
// check the results of all subscribed benchmarks
void ResultsChecker::CheckResults(std::stringstream& output) {
// first reset the stream to the start
{
auto start = std::ios::streampos(0);
// clear before calling tellg()
output.clear();
// seek to zero only when needed
if(output.tellg() > start) output.seekg(start);
// and just in case
output.clear();
}
// now go over every line and publish it to the ResultsChecker
std::string line;
bool on_first = true;
while (output.eof() == false) {
CHECK(output.good());
std::getline(output, line);
if (on_first) {
SetHeader_(line); // this is important
on_first = false;
continue;
}
SetValues_(line);
}
// finally we can call the subscribed check functions
for(const auto& p : check_patterns) {
VLOG(2) << "--------------------------------\n";
VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n";
for(const auto& r : results) {
if(!p.regex->Match(r.name)) {
VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n";
continue;
} else {
VLOG(2) << p.regex_str << " is matched by " << r.name << "\n";
}
VLOG(1) << "Checking results of " << r.name << ": ... \n";
p.fn(r);
VLOG(1) << "Checking results of " << r.name << ": OK.\n";
}
}
}
// prepare for the names in this header
void ResultsChecker::SetHeader_(const std::string& csv_header) {
field_names = SplitCsv_(csv_header);
}
// set the values for a benchmark
void ResultsChecker::SetValues_(const std::string& entry_csv_line) {
if(entry_csv_line.empty()) return; // some lines are empty
CHECK(!field_names.empty());
auto vals = SplitCsv_(entry_csv_line);
CHECK_EQ(vals.size(), field_names.size());
results.emplace_back(vals[0]); // vals[0] is the benchmark name
auto &entry = results.back();
for (size_t i = 1, e = vals.size(); i < e; ++i) {
entry.values[field_names[i]] = vals[i];
}
}
// a quick'n'dirty csv splitter (eliminating quotes)
std::vector< std::string > ResultsChecker::SplitCsv_(const std::string& line) {
std::vector< std::string > out;
if(line.empty()) return out;
if(!field_names.empty()) out.reserve(field_names.size());
size_t prev = 0, pos = line.find_first_of(','), curr = pos;
while(pos != line.npos) {
CHECK(curr > 0);
if(line[prev] == '"') ++prev;
if(line[curr-1] == '"') --curr;
out.push_back(line.substr(prev, curr-prev));
prev = pos + 1;
pos = line.find_first_of(',', pos + 1);
curr = pos;
}
curr = line.size();
if(line[prev] == '"') ++prev;
if(line[curr-1] == '"') --curr;
out.push_back(line.substr(prev, curr-prev));
return out;
}
} // end namespace internal
size_t AddChecker(const char* bm_name, ResultsCheckFn fn)
{
auto &rc = internal::GetResultsChecker();
rc.Add(bm_name, fn);
return rc.results.size();
}
int Results::NumThreads() const {
auto pos = name.find("/threads:");
if(pos == name.npos) return 1;
auto end = name.find('/', pos + 9);
std::stringstream ss;
ss << name.substr(pos + 9, end);
int num = 1;
ss >> num;
CHECK(!ss.fail());
return num;
}
double Results::GetTime(BenchmarkTime which) const {
CHECK(which == kCpuTime || which == kRealTime);
const char *which_str = which == kCpuTime ? "cpu_time" : "real_time";
double val = GetAs< double >(which_str);
auto unit = Get("time_unit");
CHECK(unit);
if(*unit == "ns") {
return val * 1.e-9;
} else if(*unit == "us") {
return val * 1.e-6;
} else if(*unit == "ms") {
return val * 1.e-3;
} else if(*unit == "s") {
return val;
} else {
CHECK(1 == 0) << "unknown time unit: " << *unit;
return 0;
}
}
// ========================================================================= //
// -------------------------- Public API Definitions------------------------ //
// ========================================================================= //
TestCase::TestCase(std::string re, int rule)
: regex_str(std::move(re)),
match_rule(rule),
substituted_regex(internal::PerformSubstitutions(regex_str)),
regex(std::make_shared<benchmark::Regex>()) {
std::string err_str;
regex->Init(substituted_regex,& err_str);
CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex
<< "\""
<< "\n originally \"" << regex_str << "\""
<< "\n got error: " << err_str;
}
int AddCases(TestCaseID ID, std::initializer_list<TestCase> il) {
auto& L = internal::GetTestCaseList(ID);
L.insert(L.end(), il);
return 0;
}
int SetSubstitutions(
std::initializer_list<std::pair<std::string, std::string>> il) {
auto& subs = internal::GetSubstitutions();
for (auto KV : il) {
bool exists = false;
KV.second = internal::PerformSubstitutions(KV.second);
for (auto& EKV : subs) {
if (EKV.first == KV.first) {
EKV.second = std::move(KV.second);
exists = true;
break;
}
}
if (!exists) subs.push_back(std::move(KV));
}
return 0;
}
void RunOutputTests(int argc, char* argv[]) {
using internal::GetTestCaseList;
benchmark::Initialize(&argc, argv);
auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/true);
benchmark::ConsoleReporter CR(options);
benchmark::JSONReporter JR;
benchmark::CSVReporter CSVR;
struct ReporterTest {
const char* name;
std::vector<TestCase>& output_cases;
std::vector<TestCase>& error_cases;
benchmark::BenchmarkReporter& reporter;
std::stringstream out_stream;
std::stringstream err_stream;
ReporterTest(const char* n, std::vector<TestCase>& out_tc,
std::vector<TestCase>& err_tc,
benchmark::BenchmarkReporter& br)
: name(n), output_cases(out_tc), error_cases(err_tc), reporter(br) {
reporter.SetOutputStream(&out_stream);
reporter.SetErrorStream(&err_stream);
}
} TestCases[] = {
{"ConsoleReporter", GetTestCaseList(TC_ConsoleOut),
GetTestCaseList(TC_ConsoleErr), CR},
{"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr),
JR},
{"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr),
CSVR},
};
// Create the test reporter and run the benchmarks.
std::cout << "Running benchmarks...\n";
internal::TestReporter test_rep({&CR, &JR, &CSVR});
benchmark::RunSpecifiedBenchmarks(&test_rep);
for (auto& rep_test : TestCases) {
std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n";
std::string banner(msg.size() - 1, '-');
std::cout << banner << msg << banner << "\n";
std::cerr << rep_test.err_stream.str();
std::cout << rep_test.out_stream.str();
internal::CheckCases(rep_test.error_cases, rep_test.err_stream);
internal::CheckCases(rep_test.output_cases, rep_test.out_stream);
std::cout << "\n";
}
// now that we know the output is as expected, we can dispatch
// the checks to subscribees.
auto &csv = TestCases[2];
// would use == but gcc spits a warning
CHECK(std::strcmp(csv.name, "CSVReporter") == 0);
internal::GetResultsChecker().CheckResults(csv.out_stream);
}

Some files were not shown because too many files have changed in this diff Show More