summaryrefslogtreecommitdiff
path: root/gnu/llvm
diff options
context:
space:
mode:
authorPatrick Wildt <patrick@cvs.openbsd.org>2021-01-02 20:29:15 +0000
committerPatrick Wildt <patrick@cvs.openbsd.org>2021-01-02 20:29:15 +0000
commit5905fe1caa31a70abed3aa58485dfc77ac2ffde1 (patch)
treebd2a69f71b52b223af5cd92d391ae49b43e83ac9 /gnu/llvm
parente8f072a51297013f4aa2c74a625b27819150133c (diff)
Import libc++ 10.0.1 release.
Diffstat (limited to 'gnu/llvm')
-rw-r--r--gnu/llvm/libcxx/benchmarks/algorithms.bench.cpp163
-rw-r--r--gnu/llvm/libcxx/cmake/Modules/DefineLinkerScript.cmake16
-rw-r--r--gnu/llvm/libcxx/cmake/Modules/HandleOutOfTreeLLVM.cmake196
-rw-r--r--gnu/llvm/libcxx/include/__functional_base642
-rw-r--r--gnu/llvm/libcxx/include/__libcpp_version2
-rw-r--r--gnu/llvm/libcxx/include/__nullptr4
-rw-r--r--gnu/llvm/libcxx/include/__string273
-rw-r--r--gnu/llvm/libcxx/include/__tuple4
-rw-r--r--gnu/llvm/libcxx/lib/libc++abi.v1.exp10
-rw-r--r--gnu/llvm/libcxx/lib/libc++abi.v2.exp10
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/.clang-format1
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/.gitignore8
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/.travis.yml93
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/AUTHORS12
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/CMakeLists.txt68
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/CONTRIBUTORS19
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/README.md1350
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/WORKSPACE50
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/appveyor.yml2
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/cmake/AddCXXCompilerFlag.cmake12
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/cmake/CXXFeatureCheck.cmake9
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/cmake/GetGitVersion.cmake22
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/cmake/benchmark.pc.in5
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/cmake/gnu_posix_regex.cpp1
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/cmake/posix_regex.cpp1
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/cmake/std_regex.cpp1
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/docs/AssemblyTests.md1
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/docs/tools.md6
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/include/benchmark/benchmark.h291
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/CMakeLists.txt40
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/benchmark.cc342
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_api_internal.cc91
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_api_internal.h74
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_register.cc135
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_register.h93
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_runner.cc409
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_runner.h61
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/commandlineflags.cc132
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/commandlineflags.h79
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/complexity.cc34
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/console_reporter.cc25
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/counter.cc9
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/counter.h8
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/csv_reporter.cc29
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/cycleclock.h67
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/internal_macros.h12
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/json_reporter.cc89
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/mutex.h44
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/reporter.cc17
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/sleep.cc16
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/statistics.cc9
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/string_util.cc19
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/string_util.h7
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/sysinfo.cc108
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/thread_manager.h4
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/thread_timer.h29
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/src/timers.cc80
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/AssemblyTests.cmake1
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/CMakeLists.txt83
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/basic_test.cc27
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/benchmark_gtest.cc134
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/complexity_test.cc95
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/cxx03_test.cc2
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/filter_test.cc26
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/fixture_test.cc18
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/map_test.cc4
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/memory_manager_test.cc12
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc5
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/options_test.cc11
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/output_test.h12
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/output_test_helper.cc21
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/register_benchmark_test.cc2
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/reporter_output_test.cc372
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/skip_with_error_test.cc10
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/state_assembly_test.cc2
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/statistics_gtest.cc4
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/string_util_gtest.cc15
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc268
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/user_counters_test.cc147
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/test/user_counters_thousands_test.cc22
-rwxr-xr-xgnu/llvm/libcxx/utils/google-benchmark/tools/compare.py37
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run1.json19
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run2.json19
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/report.py753
-rw-r--r--gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/util.py23
-rw-r--r--gnu/llvm/libcxx/utils/libcxx/test/config.py908
-rw-r--r--gnu/llvm/libcxx/utils/libcxx/test/target_info.py198
-rw-r--r--gnu/llvm/libcxx/utils/libcxx/util.py13
-rwxr-xr-xgnu/llvm/libcxx/utils/merge_archives.py2
89 files changed, 3534 insertions, 5065 deletions
diff --git a/gnu/llvm/libcxx/benchmarks/algorithms.bench.cpp b/gnu/llvm/libcxx/benchmarks/algorithms.bench.cpp
index 93383e2b9cd..b259d476457 100644
--- a/gnu/llvm/libcxx/benchmarks/algorithms.bench.cpp
+++ b/gnu/llvm/libcxx/benchmarks/algorithms.bench.cpp
@@ -14,23 +14,14 @@
namespace {
-enum class ValueType { Uint32, Uint64, Pair, Tuple, String };
-struct AllValueTypes : EnumValuesAsTuple<AllValueTypes, ValueType, 5> {
- static constexpr const char* Names[] = {
- "uint32", "uint64", "pair<uint32, uint32>",
- "tuple<uint32, uint64, uint32>", "string"};
+enum class ValueType { Uint32, String };
+struct AllValueTypes : EnumValuesAsTuple<AllValueTypes, ValueType, 2> {
+ static constexpr const char* Names[] = {"uint32", "string"};
};
template <class V>
-using Value = std::conditional_t<
- V() == ValueType::Uint32, uint32_t,
- std::conditional_t<
- V() == ValueType::Uint64, uint64_t,
- std::conditional_t<
- V() == ValueType::Pair, std::pair<uint32_t, uint32_t>,
- std::conditional_t<V() == ValueType::Tuple,
- std::tuple<uint32_t, uint64_t, uint32_t>,
- std::string> > > >;
+using Value =
+ std::conditional_t<V() == ValueType::Uint32, uint32_t, std::string>;
enum class Order {
Random,
@@ -46,8 +37,7 @@ struct AllOrders : EnumValuesAsTuple<AllOrders, Order, 6> {
"PipeOrgan", "Heap"};
};
-template <typename T>
-void fillValues(std::vector<T>& V, size_t N, Order O) {
+void fillValues(std::vector<uint32_t>& V, size_t N, Order O) {
if (O == Order::SingleElement) {
V.resize(N, 0);
} else {
@@ -56,49 +46,13 @@ void fillValues(std::vector<T>& V, size_t N, Order O) {
}
}
-template <typename T>
-void fillValues(std::vector<std::pair<T, T> >& V, size_t N, Order O) {
- if (O == Order::SingleElement) {
- V.resize(N, std::make_pair(0, 0));
- } else {
- while (V.size() < N)
- // Half of array will have the same first element.
- if (V.size() % 2) {
- V.push_back(std::make_pair(V.size(), V.size()));
- } else {
- V.push_back(std::make_pair(0, V.size()));
- }
- }
-}
-
-template <typename T1, typename T2, typename T3>
-void fillValues(std::vector<std::tuple<T1, T2, T3> >& V, size_t N, Order O) {
- if (O == Order::SingleElement) {
- V.resize(N, std::make_tuple(0, 0, 0));
- } else {
- while (V.size() < N)
- // One third of array will have the same first element.
- // One third of array will have the same first element and the same second element.
- switch (V.size() % 3) {
- case 0:
- V.push_back(std::make_tuple(V.size(), V.size(), V.size()));
- break;
- case 1:
- V.push_back(std::make_tuple(0, V.size(), V.size()));
- break;
- case 2:
- V.push_back(std::make_tuple(0, 0, V.size()));
- break;
- }
- }
-}
-
void fillValues(std::vector<std::string>& V, size_t N, Order O) {
+
if (O == Order::SingleElement) {
- V.resize(N, getRandomString(64));
+ V.resize(N, getRandomString(1024));
} else {
while (V.size() < N)
- V.push_back(getRandomString(64));
+ V.push_back(getRandomString(1024));
}
}
@@ -131,24 +85,21 @@ void sortValues(T& V, Order O) {
}
}
-constexpr size_t TestSetElements =
-#if !TEST_HAS_FEATURE(memory_sanitizer)
- 1 << 18;
-#else
- 1 << 14;
-#endif
-
template <class ValueType>
std::vector<std::vector<Value<ValueType> > > makeOrderedValues(size_t N,
Order O) {
- std::vector<std::vector<Value<ValueType> > > Ret;
- const size_t NumCopies = std::max(size_t{1}, TestSetElements / N);
- Ret.resize(NumCopies);
- for (auto& V : Ret) {
- fillValues(V, N, O);
- sortValues(V, O);
- }
- return Ret;
+ // Let's make sure that all random sequences of the same size are the same.
+ // That way we can compare the different algorithms with the same input.
+ static std::map<std::pair<size_t, Order>, std::vector<Value<ValueType> > >
+ Cached;
+
+ auto& Values = Cached[{N, O}];
+ if (Values.empty()) {
+ fillValues(Values, N, O);
+ sortValues(Values, O);
+ };
+ const size_t NumCopies = std::max(size_t{1}, 1000 / N);
+ return { NumCopies, Values };
}
template <class T, class U>
@@ -160,28 +111,19 @@ TEST_ALWAYS_INLINE void resetCopies(benchmark::State& state, T& Copies,
state.ResumeTiming();
}
-enum class BatchSize {
- CountElements,
- CountBatch,
-};
-
template <class ValueType, class F>
void runOpOnCopies(benchmark::State& state, size_t Quantity, Order O,
- BatchSize Count, F Body) {
+ bool CountElements, F f) {
auto Copies = makeOrderedValues<ValueType>(Quantity, O);
- auto Orig = Copies;
+ const auto Orig = Copies[0];
- const size_t Batch = Count == BatchSize::CountElements
- ? Copies.size() * Quantity
- : Copies.size();
+ const size_t Batch = CountElements ? Copies.size() * Quantity : Copies.size();
while (state.KeepRunningBatch(Batch)) {
for (auto& Copy : Copies) {
- Body(Copy);
+ f(Copy);
benchmark::DoNotOptimize(Copy);
}
- state.PauseTiming();
- Copies = Orig;
- state.ResumeTiming();
+ resetCopies(state, Copies, Orig);
}
}
@@ -190,9 +132,9 @@ struct Sort {
size_t Quantity;
void run(benchmark::State& state) const {
- runOpOnCopies<ValueType>(
- state, Quantity, Order(), BatchSize::CountElements,
- [](auto& Copy) { std::sort(Copy.begin(), Copy.end()); });
+ runOpOnCopies<ValueType>(state, Quantity, Order(), false, [](auto& Copy) {
+ std::sort(Copy.begin(), Copy.end());
+ });
}
bool skip() const { return Order() == ::Order::Heap; }
@@ -208,9 +150,9 @@ struct StableSort {
size_t Quantity;
void run(benchmark::State& state) const {
- runOpOnCopies<ValueType>(
- state, Quantity, Order(), BatchSize::CountElements,
- [](auto& Copy) { std::stable_sort(Copy.begin(), Copy.end()); });
+ runOpOnCopies<ValueType>(state, Quantity, Order(), false, [](auto& Copy) {
+ std::stable_sort(Copy.begin(), Copy.end());
+ });
}
bool skip() const { return Order() == ::Order::Heap; }
@@ -226,9 +168,9 @@ struct MakeHeap {
size_t Quantity;
void run(benchmark::State& state) const {
- runOpOnCopies<ValueType>(
- state, Quantity, Order(), BatchSize::CountElements,
- [](auto& Copy) { std::make_heap(Copy.begin(), Copy.end()); });
+ runOpOnCopies<ValueType>(state, Quantity, Order(), false, [](auto& Copy) {
+ std::make_heap(Copy.begin(), Copy.end());
+ });
}
std::string name() const {
@@ -243,7 +185,7 @@ struct SortHeap {
void run(benchmark::State& state) const {
runOpOnCopies<ValueType>(
- state, Quantity, Order::Heap, BatchSize::CountElements,
+ state, Quantity, Order::Heap, false,
[](auto& Copy) { std::sort_heap(Copy.begin(), Copy.end()); });
}
@@ -257,11 +199,10 @@ struct MakeThenSortHeap {
size_t Quantity;
void run(benchmark::State& state) const {
- runOpOnCopies<ValueType>(state, Quantity, Order(), BatchSize::CountElements,
- [](auto& Copy) {
- std::make_heap(Copy.begin(), Copy.end());
- std::sort_heap(Copy.begin(), Copy.end());
- });
+ runOpOnCopies<ValueType>(state, Quantity, Order(), false, [](auto& Copy) {
+ std::make_heap(Copy.begin(), Copy.end());
+ std::sort_heap(Copy.begin(), Copy.end());
+ });
}
std::string name() const {
@@ -275,12 +216,11 @@ struct PushHeap {
size_t Quantity;
void run(benchmark::State& state) const {
- runOpOnCopies<ValueType>(
- state, Quantity, Order(), BatchSize::CountElements, [](auto& Copy) {
- for (auto I = Copy.begin(), E = Copy.end(); I != E; ++I) {
- std::push_heap(Copy.begin(), I + 1);
- }
- });
+ runOpOnCopies<ValueType>(state, Quantity, Order(), true, [](auto& Copy) {
+ for (auto I = Copy.begin(), E = Copy.end(); I != E; ++I) {
+ std::push_heap(Copy.begin(), I + 1);
+ }
+ });
}
bool skip() const { return Order() == ::Order::Heap; }
@@ -296,12 +236,11 @@ struct PopHeap {
size_t Quantity;
void run(benchmark::State& state) const {
- runOpOnCopies<ValueType>(
- state, Quantity, Order(), BatchSize::CountElements, [](auto& Copy) {
- for (auto B = Copy.begin(), I = Copy.end(); I != B; --I) {
- std::pop_heap(B, I);
- }
- });
+ runOpOnCopies<ValueType>(state, Quantity, Order(), true, [](auto& Copy) {
+ for (auto B = Copy.begin(), I = Copy.end(); I != B; --I) {
+ std::pop_heap(B, I);
+ }
+ });
}
std::string name() const {
@@ -334,4 +273,4 @@ int main(int argc, char** argv) {
makeCartesianProductBenchmark<PushHeap, AllValueTypes, AllOrders>(Quantities);
makeCartesianProductBenchmark<PopHeap, AllValueTypes>(Quantities);
benchmark::RunSpecifiedBenchmarks();
-} \ No newline at end of file
+}
diff --git a/gnu/llvm/libcxx/cmake/Modules/DefineLinkerScript.cmake b/gnu/llvm/libcxx/cmake/Modules/DefineLinkerScript.cmake
index be7f026af7e..2e68121f618 100644
--- a/gnu/llvm/libcxx/cmake/Modules/DefineLinkerScript.cmake
+++ b/gnu/llvm/libcxx/cmake/Modules/DefineLinkerScript.cmake
@@ -31,17 +31,13 @@ function(define_linker_script target)
set(link_libraries)
if (interface_libs)
foreach(lib IN LISTS interface_libs)
- if ("${lib}" STREQUAL "cxx-headers")
- continue()
+ if (TARGET "${lib}" OR
+ (${lib} MATCHES "cxxabi(_static|_shared)?" AND HAVE_LIBCXXABI) OR
+ (${lib} MATCHES "unwind(_static|_shared)?" AND HAVE_LIBUNWIND))
+ list(APPEND link_libraries "${CMAKE_LINK_LIBRARY_FLAG}$<TARGET_PROPERTY:${lib},OUTPUT_NAME>")
+ else()
+ list(APPEND link_libraries "${CMAKE_LINK_LIBRARY_FLAG}${lib}")
endif()
- # If ${lib} is not a target, we use a dummy target which we know will
- # have an OUTPUT_NAME property so that CMake doesn't fail when evaluating
- # the non-selected branch of the `IF`. It doesn't matter what it evaluates
- # to because it's not selected, but it must not cause an error.
- # See https://gitlab.kitware.com/cmake/cmake/-/issues/21045.
- set(output_name_tgt "$<IF:$<TARGET_EXISTS:${lib}>,${lib},${target}>")
- set(libname "$<IF:$<TARGET_EXISTS:${lib}>,$<TARGET_PROPERTY:${output_name_tgt},OUTPUT_NAME>,${lib}>")
- list(APPEND link_libraries "${CMAKE_LINK_LIBRARY_FLAG}${libname}")
endforeach()
endif()
string(REPLACE ";" " " link_libraries "${link_libraries}")
diff --git a/gnu/llvm/libcxx/cmake/Modules/HandleOutOfTreeLLVM.cmake b/gnu/llvm/libcxx/cmake/Modules/HandleOutOfTreeLLVM.cmake
index ad2820b324d..5746afb5eb3 100644
--- a/gnu/llvm/libcxx/cmake/Modules/HandleOutOfTreeLLVM.cmake
+++ b/gnu/llvm/libcxx/cmake/Modules/HandleOutOfTreeLLVM.cmake
@@ -1,78 +1,142 @@
-if (NOT DEFINED LLVM_PATH)
- set(LLVM_PATH ${CMAKE_CURRENT_LIST_DIR}/../../../llvm CACHE PATH "" FORCE)
-endif()
+macro(find_llvm_parts)
+# Rely on llvm-config.
+ set(CONFIG_OUTPUT)
+ if(NOT LLVM_CONFIG_PATH)
+ find_program(LLVM_CONFIG_PATH "llvm-config")
+ endif()
+ if(DEFINED LLVM_PATH)
+ set(LLVM_INCLUDE_DIR ${LLVM_INCLUDE_DIR} CACHE PATH "Path to llvm/include")
+ set(LLVM_PATH ${LLVM_PATH} CACHE PATH "Path to LLVM source tree")
+ set(LLVM_MAIN_SRC_DIR ${LLVM_PATH})
+ set(LLVM_CMAKE_PATH "${LLVM_PATH}/cmake/modules")
+ if (NOT IS_DIRECTORY "${LLVM_PATH}")
+ message(FATAL_ERROR "The provided LLVM_PATH (${LLVM_PATH}) is not a valid directory")
+ endif()
+ elseif(LLVM_CONFIG_PATH)
+ message(STATUS "Found LLVM_CONFIG_PATH as ${LLVM_CONFIG_PATH}")
+ set(LIBCXX_USING_INSTALLED_LLVM 1)
+ set(CONFIG_COMMAND ${LLVM_CONFIG_PATH}
+ "--includedir"
+ "--prefix"
+ "--src-root")
+ execute_process(
+ COMMAND ${CONFIG_COMMAND}
+ RESULT_VARIABLE HAD_ERROR
+ OUTPUT_VARIABLE CONFIG_OUTPUT
+ )
+ if(NOT HAD_ERROR)
+ string(REGEX REPLACE
+ "[ \t]*[\r\n]+[ \t]*" ";"
+ CONFIG_OUTPUT ${CONFIG_OUTPUT})
+ else()
+ string(REPLACE ";" " " CONFIG_COMMAND_STR "${CONFIG_COMMAND}")
+ message(STATUS "${CONFIG_COMMAND_STR}")
+ message(FATAL_ERROR "llvm-config failed with status ${HAD_ERROR}")
+ endif()
-if(NOT IS_DIRECTORY ${LLVM_PATH})
- message(FATAL_ERROR
- "The provided LLVM_PATH (${LLVM_PATH}) is not a valid directory. Note that "
- "building libc++ outside of the monorepo is not supported anymore. Please "
- "use a Standalone build against the monorepo, a Runtimes build or a classic "
- "monorepo build.")
-endif()
+ list(GET CONFIG_OUTPUT 0 INCLUDE_DIR)
+ list(GET CONFIG_OUTPUT 1 LLVM_OBJ_ROOT)
+ list(GET CONFIG_OUTPUT 2 MAIN_SRC_DIR)
-set(LLVM_INCLUDE_DIR ${LLVM_PATH}/include CACHE PATH "Path to llvm/include")
-set(LLVM_PATH ${LLVM_PATH} CACHE PATH "Path to LLVM source tree")
-set(LLVM_MAIN_SRC_DIR ${LLVM_PATH})
-set(LLVM_CMAKE_PATH "${LLVM_PATH}/cmake/modules")
+ set(LLVM_INCLUDE_DIR ${INCLUDE_DIR} CACHE PATH "Path to llvm/include")
+ set(LLVM_BINARY_DIR ${LLVM_OBJ_ROOT} CACHE PATH "Path to LLVM build tree")
+ set(LLVM_MAIN_SRC_DIR ${MAIN_SRC_DIR} CACHE PATH "Path to LLVM source tree")
-if (EXISTS "${LLVM_CMAKE_PATH}")
- list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_PATH}")
-elseif (EXISTS "${LLVM_MAIN_SRC_DIR}/cmake/modules")
- list(APPEND CMAKE_MODULE_PATH "${LLVM_MAIN_SRC_DIR}/cmake/modules")
-else()
- message(FATAL_ERROR "Neither ${LLVM_CMAKE_PATH} nor ${LLVM_MAIN_SRC_DIR}/cmake/modules found. "
- "This is not a supported configuration.")
-endif()
+ # --cmakedir is supported since llvm r291218 (4.0 release)
+ execute_process(
+ COMMAND ${LLVM_CONFIG_PATH} --cmakedir
+ RESULT_VARIABLE HAD_ERROR
+ OUTPUT_VARIABLE CONFIG_OUTPUT
+ ERROR_QUIET)
+ if(NOT HAD_ERROR)
+ string(STRIP "${CONFIG_OUTPUT}" LLVM_CMAKE_PATH_FROM_LLVM_CONFIG)
+ file(TO_CMAKE_PATH "${LLVM_CMAKE_PATH_FROM_LLVM_CONFIG}" LLVM_CMAKE_PATH)
+ else()
+ file(TO_CMAKE_PATH "${LLVM_BINARY_DIR}" LLVM_BINARY_DIR_CMAKE_STYLE)
+ set(LLVM_CMAKE_PATH "${LLVM_BINARY_DIR_CMAKE_STYLE}/lib${LLVM_LIBDIR_SUFFIX}/cmake/llvm")
+ endif()
+ else()
+ set(LLVM_FOUND OFF)
+ message(WARNING "UNSUPPORTED LIBCXX CONFIGURATION DETECTED: "
+ "llvm-config not found and LLVM_PATH not defined.\n"
+ "Reconfigure with -DLLVM_CONFIG_PATH=path/to/llvm-config "
+ "or -DLLVM_PATH=path/to/llvm-source-root.")
+ return()
+ endif()
-message(STATUS "Configuring for standalone build.")
+ if (EXISTS "${LLVM_CMAKE_PATH}")
+ list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_PATH}")
+ elseif (EXISTS "${LLVM_MAIN_SRC_DIR}/cmake/modules")
+ list(APPEND CMAKE_MODULE_PATH "${LLVM_MAIN_SRC_DIR}/cmake/modules")
+ else()
+ set(LLVM_FOUND OFF)
+ message(WARNING "Neither ${LLVM_CMAKE_PATH} nor ${LLVM_MAIN_SRC_DIR}/cmake/modules found")
+ return()
+ endif()
-# By default, we target the host, but this can be overridden at CMake invocation time.
-include(GetHostTriple)
-get_host_triple(LLVM_INFERRED_HOST_TRIPLE)
-set(LLVM_HOST_TRIPLE "${LLVM_INFERRED_HOST_TRIPLE}" CACHE STRING "Host on which LLVM binaries will run")
-set(LLVM_DEFAULT_TARGET_TRIPLE "${LLVM_HOST_TRIPLE}" CACHE STRING "Target triple used by default.")
+ set(LLVM_FOUND ON)
+endmacro(find_llvm_parts)
-# Add LLVM Functions --------------------------------------------------------
-if (WIN32)
- set(LLVM_ON_UNIX 0)
- set(LLVM_ON_WIN32 1)
-else()
- set(LLVM_ON_UNIX 1)
- set(LLVM_ON_WIN32 0)
-endif()
+macro(configure_out_of_tree_llvm)
+ message(STATUS "Configuring for standalone build.")
+ set(LIBCXX_STANDALONE_BUILD 1)
-include(AddLLVM OPTIONAL)
+ find_llvm_parts()
-# LLVM Options --------------------------------------------------------------
-if (NOT DEFINED LLVM_INCLUDE_TESTS)
- set(LLVM_INCLUDE_TESTS ON)
-endif()
-if (NOT DEFINED LLVM_INCLUDE_DOCS)
- set(LLVM_INCLUDE_DOCS ON)
-endif()
-if (NOT DEFINED LLVM_ENABLE_SPHINX)
- set(LLVM_ENABLE_SPHINX OFF)
-endif()
+ # Add LLVM Functions --------------------------------------------------------
+ if (LLVM_FOUND AND LIBCXX_USING_INSTALLED_LLVM)
+ include(LLVMConfig) # For TARGET_TRIPLE
+ else()
+ if (WIN32)
+ set(LLVM_ON_UNIX 0)
+ set(LLVM_ON_WIN32 1)
+ else()
+ set(LLVM_ON_UNIX 1)
+ set(LLVM_ON_WIN32 0)
+ endif()
+ endif()
+ if (LLVM_FOUND)
+ include(AddLLVM OPTIONAL)
+ endif()
-if (LLVM_INCLUDE_TESTS)
- # Required LIT Configuration ------------------------------------------------
- # Define the default arguments to use with 'lit', and an option for the user
- # to override.
- set(LLVM_DEFAULT_EXTERNAL_LIT "${LLVM_MAIN_SRC_DIR}/utils/lit/lit.py")
- set(LIT_ARGS_DEFAULT "-sv --show-xfail --show-unsupported")
- if (MSVC OR XCODE)
- set(LIT_ARGS_DEFAULT "${LIT_ARGS_DEFAULT} --no-progress-bar")
+ # LLVM Options --------------------------------------------------------------
+ if (NOT DEFINED LLVM_INCLUDE_TESTS)
+ set(LLVM_INCLUDE_TESTS ${LLVM_FOUND})
+ endif()
+ if (NOT DEFINED LLVM_INCLUDE_DOCS)
+ set(LLVM_INCLUDE_DOCS ${LLVM_FOUND})
+ endif()
+ if (NOT DEFINED LLVM_ENABLE_SPHINX)
+ set(LLVM_ENABLE_SPHINX OFF)
endif()
- set(LLVM_LIT_ARGS "${LIT_ARGS_DEFAULT}" CACHE STRING "Default options for lit")
-endif()
-# Required doc configuration
-if (LLVM_ENABLE_SPHINX)
- find_package(Sphinx REQUIRED)
-endif()
+ # In a standalone build, we don't have llvm to automatically generate the
+ # llvm-lit script for us. So we need to provide an explicit directory that
+ # the configurator should write the script into.
+ set(LLVM_LIT_OUTPUT_DIR "${libcxx_BINARY_DIR}/bin")
+
+ if (LLVM_INCLUDE_TESTS)
+ # Required LIT Configuration ------------------------------------------------
+ # Define the default arguments to use with 'lit', and an option for the user
+ # to override.
+ set(LLVM_DEFAULT_EXTERNAL_LIT "${LLVM_MAIN_SRC_DIR}/utils/lit/lit.py")
+ set(LIT_ARGS_DEFAULT "-sv --show-xfail --show-unsupported")
+ if (MSVC OR XCODE)
+ set(LIT_ARGS_DEFAULT "${LIT_ARGS_DEFAULT} --no-progress-bar")
+ endif()
+ set(LLVM_LIT_ARGS "${LIT_ARGS_DEFAULT}" CACHE STRING "Default options for lit")
+ endif()
+
+ # Required doc configuration
+ if (LLVM_ENABLE_SPHINX)
+ find_package(Sphinx REQUIRED)
+ endif()
+
+ if (LLVM_ON_UNIX AND NOT APPLE)
+ set(LLVM_HAVE_LINK_VERSION_SCRIPT 1)
+ else()
+ set(LLVM_HAVE_LINK_VERSION_SCRIPT 0)
+ endif()
+endmacro(configure_out_of_tree_llvm)
-if (LLVM_ON_UNIX AND NOT APPLE)
- set(LLVM_HAVE_LINK_VERSION_SCRIPT 1)
-else()
- set(LLVM_HAVE_LINK_VERSION_SCRIPT 0)
-endif()
+configure_out_of_tree_llvm()
diff --git a/gnu/llvm/libcxx/include/__functional_base b/gnu/llvm/libcxx/include/__functional_base
index ccc3f3a58ca..ca761c409b6 100644
--- a/gnu/llvm/libcxx/include/__functional_base
+++ b/gnu/llvm/libcxx/include/__functional_base
@@ -11,22 +11,642 @@
#define _LIBCPP_FUNCTIONAL_BASE
#include <__config>
-#include <__functional/binary_function.h>
-#include <__functional/invoke.h>
-#include <__functional/operations.h>
-#include <__functional/reference_wrapper.h>
-#include <__functional/unary_function.h>
-#include <__functional/weak_result_type.h>
-#include <__memory/allocator_arg_t.h>
-#include <__memory/uses_allocator.h>
-#include <exception>
-#include <new>
#include <type_traits>
#include <typeinfo>
+#include <exception>
+#include <new>
#include <utility>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
#endif
-#endif // _LIBCPP_FUNCTIONAL_BASE
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <class _Arg1, class _Arg2, class _Result>
+struct _LIBCPP_TEMPLATE_VIS binary_function
+{
+ typedef _Arg1 first_argument_type;
+ typedef _Arg2 second_argument_type;
+ typedef _Result result_type;
+};
+
+template <class _Tp>
+struct __has_result_type
+{
+private:
+ struct __two {char __lx; char __lxx;};
+ template <class _Up> static __two __test(...);
+ template <class _Up> static char __test(typename _Up::result_type* = 0);
+public:
+ static const bool value = sizeof(__test<_Tp>(0)) == 1;
+};
+
+#if _LIBCPP_STD_VER > 11
+template <class _Tp = void>
+#else
+template <class _Tp>
+#endif
+struct _LIBCPP_TEMPLATE_VIS less : binary_function<_Tp, _Tp, bool>
+{
+ _LIBCPP_CONSTEXPR_AFTER_CXX11 _LIBCPP_INLINE_VISIBILITY
+ bool operator()(const _Tp& __x, const _Tp& __y) const
+ {return __x < __y;}
+};
+
+#if _LIBCPP_STD_VER > 11
+template <>
+struct _LIBCPP_TEMPLATE_VIS less<void>
+{
+ template <class _T1, class _T2>
+ _LIBCPP_CONSTEXPR_AFTER_CXX11 _LIBCPP_INLINE_VISIBILITY
+ auto operator()(_T1&& __t, _T2&& __u) const
+ _NOEXCEPT_(noexcept(_VSTD::forward<_T1>(__t) < _VSTD::forward<_T2>(__u)))
+ -> decltype (_VSTD::forward<_T1>(__t) < _VSTD::forward<_T2>(__u))
+ { return _VSTD::forward<_T1>(__t) < _VSTD::forward<_T2>(__u); }
+ typedef void is_transparent;
+};
+#endif
+
+// __weak_result_type
+
+template <class _Tp>
+struct __derives_from_unary_function
+{
+private:
+ struct __two {char __lx; char __lxx;};
+ static __two __test(...);
+ template <class _Ap, class _Rp>
+ static unary_function<_Ap, _Rp>
+ __test(const volatile unary_function<_Ap, _Rp>*);
+public:
+ static const bool value = !is_same<decltype(__test((_Tp*)0)), __two>::value;
+ typedef decltype(__test((_Tp*)0)) type;
+};
+
+template <class _Tp>
+struct __derives_from_binary_function
+{
+private:
+ struct __two {char __lx; char __lxx;};
+ static __two __test(...);
+ template <class _A1, class _A2, class _Rp>
+ static binary_function<_A1, _A2, _Rp>
+ __test(const volatile binary_function<_A1, _A2, _Rp>*);
+public:
+ static const bool value = !is_same<decltype(__test((_Tp*)0)), __two>::value;
+ typedef decltype(__test((_Tp*)0)) type;
+};
+
+template <class _Tp, bool = __derives_from_unary_function<_Tp>::value>
+struct __maybe_derive_from_unary_function // bool is true
+ : public __derives_from_unary_function<_Tp>::type
+{
+};
+
+template <class _Tp>
+struct __maybe_derive_from_unary_function<_Tp, false>
+{
+};
+
+template <class _Tp, bool = __derives_from_binary_function<_Tp>::value>
+struct __maybe_derive_from_binary_function // bool is true
+ : public __derives_from_binary_function<_Tp>::type
+{
+};
+
+template <class _Tp>
+struct __maybe_derive_from_binary_function<_Tp, false>
+{
+};
+
+template <class _Tp, bool = __has_result_type<_Tp>::value>
+struct __weak_result_type_imp // bool is true
+ : public __maybe_derive_from_unary_function<_Tp>,
+ public __maybe_derive_from_binary_function<_Tp>
+{
+ typedef _LIBCPP_NODEBUG_TYPE typename _Tp::result_type result_type;
+};
+
+template <class _Tp>
+struct __weak_result_type_imp<_Tp, false>
+ : public __maybe_derive_from_unary_function<_Tp>,
+ public __maybe_derive_from_binary_function<_Tp>
+{
+};
+
+template <class _Tp>
+struct __weak_result_type
+ : public __weak_result_type_imp<_Tp>
+{
+};
+
+// 0 argument case
+
+template <class _Rp>
+struct __weak_result_type<_Rp ()>
+{
+ typedef _LIBCPP_NODEBUG_TYPE _Rp result_type;
+};
+
+template <class _Rp>
+struct __weak_result_type<_Rp (&)()>
+{
+ typedef _LIBCPP_NODEBUG_TYPE _Rp result_type;
+};
+
+template <class _Rp>
+struct __weak_result_type<_Rp (*)()>
+{
+ typedef _LIBCPP_NODEBUG_TYPE _Rp result_type;
+};
+
+// 1 argument case
+
+template <class _Rp, class _A1>
+struct __weak_result_type<_Rp (_A1)>
+ : public unary_function<_A1, _Rp>
+{
+};
+
+template <class _Rp, class _A1>
+struct __weak_result_type<_Rp (&)(_A1)>
+ : public unary_function<_A1, _Rp>
+{
+};
+
+template <class _Rp, class _A1>
+struct __weak_result_type<_Rp (*)(_A1)>
+ : public unary_function<_A1, _Rp>
+{
+};
+
+template <class _Rp, class _Cp>
+struct __weak_result_type<_Rp (_Cp::*)()>
+ : public unary_function<_Cp*, _Rp>
+{
+};
+
+template <class _Rp, class _Cp>
+struct __weak_result_type<_Rp (_Cp::*)() const>
+ : public unary_function<const _Cp*, _Rp>
+{
+};
+
+template <class _Rp, class _Cp>
+struct __weak_result_type<_Rp (_Cp::*)() volatile>
+ : public unary_function<volatile _Cp*, _Rp>
+{
+};
+
+template <class _Rp, class _Cp>
+struct __weak_result_type<_Rp (_Cp::*)() const volatile>
+ : public unary_function<const volatile _Cp*, _Rp>
+{
+};
+
+// 2 argument case
+
+template <class _Rp, class _A1, class _A2>
+struct __weak_result_type<_Rp (_A1, _A2)>
+ : public binary_function<_A1, _A2, _Rp>
+{
+};
+
+template <class _Rp, class _A1, class _A2>
+struct __weak_result_type<_Rp (*)(_A1, _A2)>
+ : public binary_function<_A1, _A2, _Rp>
+{
+};
+
+template <class _Rp, class _A1, class _A2>
+struct __weak_result_type<_Rp (&)(_A1, _A2)>
+ : public binary_function<_A1, _A2, _Rp>
+{
+};
+
+template <class _Rp, class _Cp, class _A1>
+struct __weak_result_type<_Rp (_Cp::*)(_A1)>
+ : public binary_function<_Cp*, _A1, _Rp>
+{
+};
+
+template <class _Rp, class _Cp, class _A1>
+struct __weak_result_type<_Rp (_Cp::*)(_A1) const>
+ : public binary_function<const _Cp*, _A1, _Rp>
+{
+};
+
+template <class _Rp, class _Cp, class _A1>
+struct __weak_result_type<_Rp (_Cp::*)(_A1) volatile>
+ : public binary_function<volatile _Cp*, _A1, _Rp>
+{
+};
+
+template <class _Rp, class _Cp, class _A1>
+struct __weak_result_type<_Rp (_Cp::*)(_A1) const volatile>
+ : public binary_function<const volatile _Cp*, _A1, _Rp>
+{
+};
+
+
+#ifndef _LIBCPP_CXX03_LANG
+// 3 or more arguments
+
+template <class _Rp, class _A1, class _A2, class _A3, class ..._A4>
+struct __weak_result_type<_Rp (_A1, _A2, _A3, _A4...)>
+{
+ typedef _Rp result_type;
+};
+
+template <class _Rp, class _A1, class _A2, class _A3, class ..._A4>
+struct __weak_result_type<_Rp (&)(_A1, _A2, _A3, _A4...)>
+{
+ typedef _Rp result_type;
+};
+
+template <class _Rp, class _A1, class _A2, class _A3, class ..._A4>
+struct __weak_result_type<_Rp (*)(_A1, _A2, _A3, _A4...)>
+{
+ typedef _Rp result_type;
+};
+
+template <class _Rp, class _Cp, class _A1, class _A2, class ..._A3>
+struct __weak_result_type<_Rp (_Cp::*)(_A1, _A2, _A3...)>
+{
+ typedef _Rp result_type;
+};
+
+template <class _Rp, class _Cp, class _A1, class _A2, class ..._A3>
+struct __weak_result_type<_Rp (_Cp::*)(_A1, _A2, _A3...) const>
+{
+ typedef _Rp result_type;
+};
+
+template <class _Rp, class _Cp, class _A1, class _A2, class ..._A3>
+struct __weak_result_type<_Rp (_Cp::*)(_A1, _A2, _A3...) volatile>
+{
+ typedef _Rp result_type;
+};
+
+template <class _Rp, class _Cp, class _A1, class _A2, class ..._A3>
+struct __weak_result_type<_Rp (_Cp::*)(_A1, _A2, _A3...) const volatile>
+{
+ typedef _Rp result_type;
+};
+
+template <class _Tp, class ..._Args>
+struct __invoke_return
+{
+ typedef decltype(__invoke(_VSTD::declval<_Tp>(), _VSTD::declval<_Args>()...)) type;
+};
+
+#else // defined(_LIBCPP_CXX03_LANG)
+
+#include <__functional_base_03>
+
+#endif // !defined(_LIBCPP_CXX03_LANG)
+
+
+template <class _Ret>
+struct __invoke_void_return_wrapper
+{
+#ifndef _LIBCPP_CXX03_LANG
+ template <class ..._Args>
+ static _Ret __call(_Args&&... __args) {
+ return __invoke(_VSTD::forward<_Args>(__args)...);
+ }
+#else
+ template <class _Fn>
+ static _Ret __call(_Fn __f) {
+ return __invoke(__f);
+ }
+
+ template <class _Fn, class _A0>
+ static _Ret __call(_Fn __f, _A0& __a0) {
+ return __invoke(__f, __a0);
+ }
+
+ template <class _Fn, class _A0, class _A1>
+ static _Ret __call(_Fn __f, _A0& __a0, _A1& __a1) {
+ return __invoke(__f, __a0, __a1);
+ }
+
+ template <class _Fn, class _A0, class _A1, class _A2>
+ static _Ret __call(_Fn __f, _A0& __a0, _A1& __a1, _A2& __a2){
+ return __invoke(__f, __a0, __a1, __a2);
+ }
+#endif
+};
+
+template <>
+struct __invoke_void_return_wrapper<void>
+{
+#ifndef _LIBCPP_CXX03_LANG
+ template <class ..._Args>
+ static void __call(_Args&&... __args) {
+ __invoke(_VSTD::forward<_Args>(__args)...);
+ }
+#else
+ template <class _Fn>
+ static void __call(_Fn __f) {
+ __invoke(__f);
+ }
+
+ template <class _Fn, class _A0>
+ static void __call(_Fn __f, _A0& __a0) {
+ __invoke(__f, __a0);
+ }
+
+ template <class _Fn, class _A0, class _A1>
+ static void __call(_Fn __f, _A0& __a0, _A1& __a1) {
+ __invoke(__f, __a0, __a1);
+ }
+
+ template <class _Fn, class _A0, class _A1, class _A2>
+ static void __call(_Fn __f, _A0& __a0, _A1& __a1, _A2& __a2) {
+ __invoke(__f, __a0, __a1, __a2);
+ }
+#endif
+};
+
+template <class _Tp>
+class _LIBCPP_TEMPLATE_VIS reference_wrapper
+ : public __weak_result_type<_Tp>
+{
+public:
+ // types
+ typedef _Tp type;
+private:
+ type* __f_;
+
+public:
+ // construct/copy/destroy
+ _LIBCPP_INLINE_VISIBILITY reference_wrapper(type& __f) _NOEXCEPT
+ : __f_(_VSTD::addressof(__f)) {}
+#ifndef _LIBCPP_CXX03_LANG
+ private: reference_wrapper(type&&); public: // = delete; // do not bind to temps
+#endif
+
+ // access
+ _LIBCPP_INLINE_VISIBILITY operator type& () const _NOEXCEPT {return *__f_;}
+ _LIBCPP_INLINE_VISIBILITY type& get() const _NOEXCEPT {return *__f_;}
+
+#ifndef _LIBCPP_CXX03_LANG
+ // invoke
+ template <class... _ArgTypes>
+ _LIBCPP_INLINE_VISIBILITY
+ typename __invoke_of<type&, _ArgTypes...>::type
+ operator() (_ArgTypes&&... __args) const {
+ return __invoke(get(), _VSTD::forward<_ArgTypes>(__args)...);
+ }
+#else
+
+ _LIBCPP_INLINE_VISIBILITY
+ typename __invoke_return<type>::type
+ operator() () const {
+ return __invoke(get());
+ }
+
+ template <class _A0>
+ _LIBCPP_INLINE_VISIBILITY
+ typename __invoke_return0<type, _A0>::type
+ operator() (_A0& __a0) const {
+ return __invoke(get(), __a0);
+ }
+
+ template <class _A0>
+ _LIBCPP_INLINE_VISIBILITY
+ typename __invoke_return0<type, _A0 const>::type
+ operator() (_A0 const& __a0) const {
+ return __invoke(get(), __a0);
+ }
+
+ template <class _A0, class _A1>
+ _LIBCPP_INLINE_VISIBILITY
+ typename __invoke_return1<type, _A0, _A1>::type
+ operator() (_A0& __a0, _A1& __a1) const {
+ return __invoke(get(), __a0, __a1);
+ }
+
+ template <class _A0, class _A1>
+ _LIBCPP_INLINE_VISIBILITY
+ typename __invoke_return1<type, _A0 const, _A1>::type
+ operator() (_A0 const& __a0, _A1& __a1) const {
+ return __invoke(get(), __a0, __a1);
+ }
+
+ template <class _A0, class _A1>
+ _LIBCPP_INLINE_VISIBILITY
+ typename __invoke_return1<type, _A0, _A1 const>::type
+ operator() (_A0& __a0, _A1 const& __a1) const {
+ return __invoke(get(), __a0, __a1);
+ }
+
+ template <class _A0, class _A1>
+ _LIBCPP_INLINE_VISIBILITY
+ typename __invoke_return1<type, _A0 const, _A1 const>::type
+ operator() (_A0 const& __a0, _A1 const& __a1) const {
+ return __invoke(get(), __a0, __a1);
+ }
+
+ template <class _A0, class _A1, class _A2>
+ _LIBCPP_INLINE_VISIBILITY
+ typename __invoke_return2<type, _A0, _A1, _A2>::type
+ operator() (_A0& __a0, _A1& __a1, _A2& __a2) const {
+ return __invoke(get(), __a0, __a1, __a2);
+ }
+
+ template <class _A0, class _A1, class _A2>
+ _LIBCPP_INLINE_VISIBILITY
+ typename __invoke_return2<type, _A0 const, _A1, _A2>::type
+ operator() (_A0 const& __a0, _A1& __a1, _A2& __a2) const {
+ return __invoke(get(), __a0, __a1, __a2);
+ }
+
+ template <class _A0, class _A1, class _A2>
+ _LIBCPP_INLINE_VISIBILITY
+ typename __invoke_return2<type, _A0, _A1 const, _A2>::type
+ operator() (_A0& __a0, _A1 const& __a1, _A2& __a2) const {
+ return __invoke(get(), __a0, __a1, __a2);
+ }
+
+ template <class _A0, class _A1, class _A2>
+ _LIBCPP_INLINE_VISIBILITY
+ typename __invoke_return2<type, _A0, _A1, _A2 const>::type
+ operator() (_A0& __a0, _A1& __a1, _A2 const& __a2) const {
+ return __invoke(get(), __a0, __a1, __a2);
+ }
+
+ template <class _A0, class _A1, class _A2>
+ _LIBCPP_INLINE_VISIBILITY
+ typename __invoke_return2<type, _A0 const, _A1 const, _A2>::type
+ operator() (_A0 const& __a0, _A1 const& __a1, _A2& __a2) const {
+ return __invoke(get(), __a0, __a1, __a2);
+ }
+
+ template <class _A0, class _A1, class _A2>
+ _LIBCPP_INLINE_VISIBILITY
+ typename __invoke_return2<type, _A0 const, _A1, _A2 const>::type
+ operator() (_A0 const& __a0, _A1& __a1, _A2 const& __a2) const {
+ return __invoke(get(), __a0, __a1, __a2);
+ }
+
+ template <class _A0, class _A1, class _A2>
+ _LIBCPP_INLINE_VISIBILITY
+ typename __invoke_return2<type, _A0, _A1 const, _A2 const>::type
+ operator() (_A0& __a0, _A1 const& __a1, _A2 const& __a2) const {
+ return __invoke(get(), __a0, __a1, __a2);
+ }
+
+ template <class _A0, class _A1, class _A2>
+ _LIBCPP_INLINE_VISIBILITY
+ typename __invoke_return2<type, _A0 const, _A1 const, _A2 const>::type
+ operator() (_A0 const& __a0, _A1 const& __a1, _A2 const& __a2) const {
+ return __invoke(get(), __a0, __a1, __a2);
+ }
+#endif // _LIBCPP_CXX03_LANG
+};
+
+
+template <class _Tp>
+inline _LIBCPP_INLINE_VISIBILITY
+reference_wrapper<_Tp>
+ref(_Tp& __t) _NOEXCEPT
+{
+ return reference_wrapper<_Tp>(__t);
+}
+
+template <class _Tp>
+inline _LIBCPP_INLINE_VISIBILITY
+reference_wrapper<_Tp>
+ref(reference_wrapper<_Tp> __t) _NOEXCEPT
+{
+ return ref(__t.get());
+}
+
+template <class _Tp>
+inline _LIBCPP_INLINE_VISIBILITY
+reference_wrapper<const _Tp>
+cref(const _Tp& __t) _NOEXCEPT
+{
+ return reference_wrapper<const _Tp>(__t);
+}
+
+template <class _Tp>
+inline _LIBCPP_INLINE_VISIBILITY
+reference_wrapper<const _Tp>
+cref(reference_wrapper<_Tp> __t) _NOEXCEPT
+{
+ return cref(__t.get());
+}
+
+#ifndef _LIBCPP_CXX03_LANG
+template <class _Tp> void ref(const _Tp&&) = delete;
+template <class _Tp> void cref(const _Tp&&) = delete;
+#endif
+
+#if _LIBCPP_STD_VER > 11
+template <class _Tp, class, class = void>
+struct __is_transparent : false_type {};
+
+template <class _Tp, class _Up>
+struct __is_transparent<_Tp, _Up,
+ typename __void_t<typename _Tp::is_transparent>::type>
+ : true_type {};
+#endif
+
+// allocator_arg_t
+
+struct _LIBCPP_TEMPLATE_VIS allocator_arg_t { explicit allocator_arg_t() = default; };
+
+#if defined(_LIBCPP_CXX03_LANG) || defined(_LIBCPP_BUILDING_LIBRARY)
+extern _LIBCPP_EXPORTED_FROM_ABI const allocator_arg_t allocator_arg;
+#else
+/* _LIBCPP_INLINE_VAR */ constexpr allocator_arg_t allocator_arg = allocator_arg_t();
+#endif
+
+// uses_allocator
+
+template <class _Tp>
+struct __has_allocator_type
+{
+private:
+ struct __two {char __lx; char __lxx;};
+ template <class _Up> static __two __test(...);
+ template <class _Up> static char __test(typename _Up::allocator_type* = 0);
+public:
+ static const bool value = sizeof(__test<_Tp>(0)) == 1;
+};
+
+template <class _Tp, class _Alloc, bool = __has_allocator_type<_Tp>::value>
+struct __uses_allocator
+ : public integral_constant<bool,
+ is_convertible<_Alloc, typename _Tp::allocator_type>::value>
+{
+};
+
+template <class _Tp, class _Alloc>
+struct __uses_allocator<_Tp, _Alloc, false>
+ : public false_type
+{
+};
+
+template <class _Tp, class _Alloc>
+struct _LIBCPP_TEMPLATE_VIS uses_allocator
+ : public __uses_allocator<_Tp, _Alloc>
+{
+};
+
+#if _LIBCPP_STD_VER > 14
+template <class _Tp, class _Alloc>
+_LIBCPP_INLINE_VAR constexpr size_t uses_allocator_v = uses_allocator<_Tp, _Alloc>::value;
+#endif
+
+#ifndef _LIBCPP_CXX03_LANG
+
+// allocator construction
+
+template <class _Tp, class _Alloc, class ..._Args>
+struct __uses_alloc_ctor_imp
+{
+ typedef _LIBCPP_NODEBUG_TYPE typename __uncvref<_Alloc>::type _RawAlloc;
+ static const bool __ua = uses_allocator<_Tp, _RawAlloc>::value;
+ static const bool __ic =
+ is_constructible<_Tp, allocator_arg_t, _Alloc, _Args...>::value;
+ static const int value = __ua ? 2 - __ic : 0;
+};
+
+template <class _Tp, class _Alloc, class ..._Args>
+struct __uses_alloc_ctor
+ : integral_constant<int, __uses_alloc_ctor_imp<_Tp, _Alloc, _Args...>::value>
+ {};
+
+template <class _Tp, class _Allocator, class... _Args>
+inline _LIBCPP_INLINE_VISIBILITY
+void __user_alloc_construct_impl (integral_constant<int, 0>, _Tp *__storage, const _Allocator &, _Args &&... __args )
+{
+ new (__storage) _Tp (_VSTD::forward<_Args>(__args)...);
+}
+
+// FIXME: This should have a version which takes a non-const alloc.
+template <class _Tp, class _Allocator, class... _Args>
+inline _LIBCPP_INLINE_VISIBILITY
+void __user_alloc_construct_impl (integral_constant<int, 1>, _Tp *__storage, const _Allocator &__a, _Args &&... __args )
+{
+ new (__storage) _Tp (allocator_arg, __a, _VSTD::forward<_Args>(__args)...);
+}
+
+// FIXME: This should have a version which takes a non-const alloc.
+template <class _Tp, class _Allocator, class... _Args>
+inline _LIBCPP_INLINE_VISIBILITY
+void __user_alloc_construct_impl (integral_constant<int, 2>, _Tp *__storage, const _Allocator &__a, _Args &&... __args )
+{
+ new (__storage) _Tp (_VSTD::forward<_Args>(__args)..., __a);
+}
+
+#endif // _LIBCPP_CXX03_LANG
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP_FUNCTIONAL_BASE
diff --git a/gnu/llvm/libcxx/include/__libcpp_version b/gnu/llvm/libcxx/include/__libcpp_version
index 09514aa4db9..5caff40c4a0 100644
--- a/gnu/llvm/libcxx/include/__libcpp_version
+++ b/gnu/llvm/libcxx/include/__libcpp_version
@@ -1 +1 @@
-13000
+10000
diff --git a/gnu/llvm/libcxx/include/__nullptr b/gnu/llvm/libcxx/include/__nullptr
index e1475116423..45529a710b6 100644
--- a/gnu/llvm/libcxx/include/__nullptr
+++ b/gnu/llvm/libcxx/include/__nullptr
@@ -56,6 +56,6 @@ namespace std
typedef decltype(nullptr) nullptr_t;
}
-#endif // _LIBCPP_HAS_NO_NULLPTR
+#endif // _LIBCPP_HAS_NO_NULLPTR
-#endif // _LIBCPP_NULLPTR
+#endif // _LIBCPP_NULLPTR
diff --git a/gnu/llvm/libcxx/include/__string b/gnu/llvm/libcxx/include/__string
index b77a7fb4f8d..056b9b80ea5 100644
--- a/gnu/llvm/libcxx/include/__string
+++ b/gnu/llvm/libcxx/include/__string
@@ -10,21 +10,53 @@
#ifndef _LIBCPP___STRING
#define _LIBCPP___STRING
+/*
+ string synopsis
+
+namespace std
+{
+
+template <class charT>
+struct char_traits
+{
+ typedef charT char_type;
+ typedef ... int_type;
+ typedef streamoff off_type;
+ typedef streampos pos_type;
+ typedef mbstate_t state_type;
+
+ static constexpr void assign(char_type& c1, const char_type& c2) noexcept;
+ static constexpr bool eq(char_type c1, char_type c2) noexcept;
+ static constexpr bool lt(char_type c1, char_type c2) noexcept;
+
+ static constexpr int compare(const char_type* s1, const char_type* s2, size_t n);
+ static constexpr size_t length(const char_type* s);
+ static constexpr const char_type*
+ find(const char_type* s, size_t n, const char_type& a);
+
+ static constexpr char_type* move(char_type* s1, const char_type* s2, size_t n); // constexpr in C++20
+ static constexpr char_type* copy(char_type* s1, const char_type* s2, size_t n); // constexpr in C++20
+ static constexpr char_type* assign(char_type* s, size_t n, char_type a); // constexpr in C++20
+
+ static constexpr int_type not_eof(int_type c) noexcept;
+ static constexpr char_type to_char_type(int_type c) noexcept;
+ static constexpr int_type to_int_type(char_type c) noexcept;
+ static constexpr bool eq_int_type(int_type c1, int_type c2) noexcept;
+ static constexpr int_type eof() noexcept;
+};
+
+template <> struct char_traits<char>;
+template <> struct char_traits<wchar_t>;
+template <> struct char_traits<char8_t>; // c++20
+
+} // std
+
+*/
+
#include <__config>
-#include <__algorithm/copy.h>
-#include <__algorithm/copy_backward.h>
-#include <__algorithm/copy_n.h>
-#include <__algorithm/fill_n.h>
-#include <__algorithm/find_first_of.h>
-#include <__algorithm/find_end.h>
-#include <__algorithm/min.h>
-#include <__functional/hash.h> // for __murmur2_or_cityhash
-#include <__iterator/iterator_traits.h>
-#include <cstdio> // for EOF
-#include <cstdint> // for uint_least16_t
-#include <cstring> // for memcpy
-#include <cwchar> // for wmemcpy
-#include <type_traits> // for __libcpp_is_constant_evaluated
+#include <algorithm> // for search and min
+#include <cstdio> // For EOF.
+#include <memory> // for __murmur2_or_cityhash
#include <__debug>
@@ -38,123 +70,6 @@ _LIBCPP_PUSH_MACROS
_LIBCPP_BEGIN_NAMESPACE_STD
-// The the extern template ABI lists are kept outside of <string> to improve the
-// readability of that header.
-
-// The extern template ABI lists are kept outside of <string> to improve the
-// readability of that header. We maintain 2 ABI lists:
-// - _LIBCPP_STRING_V1_EXTERN_TEMPLATE_LIST
-// - _LIBCPP_STRING_UNSTABLE_EXTERN_TEMPLATE_LIST
-// As the name implies, the ABI lists define the V1 (Stable) and unstable ABI.
-//
-// For unstable, we may explicitly remove function that are external in V1,
-// and add (new) external functions to better control inlining and compiler
-// optimization opportunities.
-//
-// For stable, the ABI list should rarely change, except for adding new
-// functions supporting new c++ version / API changes. Typically entries
-// must never be removed from the stable list.
-#define _LIBCPP_STRING_V1_EXTERN_TEMPLATE_LIST(_Func, _CharType) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::replace(size_type, size_type, value_type const*, size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::rfind(value_type const*, size_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::__init(value_type const*, size_type, size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::basic_string(basic_string const&)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::replace(size_type, size_type, value_type const*)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::basic_string(basic_string const&, allocator<_CharType> const&)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::find_last_not_of(value_type const*, size_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::~basic_string()) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::find_first_not_of(value_type const*, size_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::insert(size_type, size_type, value_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::operator=(value_type)) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::__init(value_type const*, size_type)) \
- _Func(_LIBCPP_FUNC_VIS const _CharType& basic_string<_CharType>::at(size_type) const) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::insert(size_type, value_type const*, size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::find_first_of(value_type const*, size_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::replace(size_type, size_type, size_type, value_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::assign(value_type const*, size_type)) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::reserve(size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::append(value_type const*, size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::assign(basic_string const&, size_type, size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::copy(value_type*, size_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::basic_string(basic_string const&, size_type, size_type, allocator<_CharType> const&)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::find(value_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::__init(size_type, value_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::insert(size_type, value_type const*)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::find_last_of(value_type const*, size_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::__grow_by(size_type, size_type, size_type, size_type, size_type, size_type)) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::__grow_by_and_replace(size_type, size_type, size_type, size_type, size_type, size_type, value_type const*)) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::push_back(value_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::append(size_type, value_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::rfind(value_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS const basic_string<_CharType>::size_type basic_string<_CharType>::npos) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::assign(size_type, value_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::erase(size_type, size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::append(basic_string const&, size_type, size_type)) \
- _Func(_LIBCPP_FUNC_VIS int basic_string<_CharType>::compare(value_type const*) const) \
- _Func(_LIBCPP_FUNC_VIS int basic_string<_CharType>::compare(size_type, size_type, value_type const*) const) \
- _Func(_LIBCPP_FUNC_VIS _CharType& basic_string<_CharType>::at(size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::assign(value_type const*)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::find(value_type const*, size_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS int basic_string<_CharType>::compare(size_type, size_type, basic_string const&, size_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS int basic_string<_CharType>::compare(size_type, size_type, value_type const*, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::operator=(basic_string const&)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::append(value_type const*)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::replace(size_type, size_type, basic_string const&, size_type, size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::iterator basic_string<_CharType>::insert(basic_string::const_iterator, value_type)) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::resize(size_type, value_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::insert(size_type, basic_string const&, size_type, size_type))
-
-#define _LIBCPP_STRING_UNSTABLE_EXTERN_TEMPLATE_LIST(_Func, _CharType) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::replace(size_type, size_type, value_type const*, size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::rfind(value_type const*, size_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::__init(value_type const*, size_type, size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::replace(size_type, size_type, value_type const*)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::find_last_not_of(value_type const*, size_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::~basic_string()) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::find_first_not_of(value_type const*, size_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::insert(size_type, size_type, value_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::operator=(value_type)) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::__init(value_type const*, size_type)) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::__init_copy_ctor_external(value_type const*, size_type)) \
- _Func(_LIBCPP_FUNC_VIS const _CharType& basic_string<_CharType>::at(size_type) const) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::insert(size_type, value_type const*, size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::find_first_of(value_type const*, size_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::replace(size_type, size_type, size_type, value_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::__assign_external(value_type const*, size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::__assign_external(value_type const*)) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::reserve(size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::append(value_type const*, size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::assign(basic_string const&, size_type, size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::copy(value_type*, size_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::basic_string(basic_string const&, size_type, size_type, allocator<_CharType> const&)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::find(value_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::__init(size_type, value_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::insert(size_type, value_type const*)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::find_last_of(value_type const*, size_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::__grow_by(size_type, size_type, size_type, size_type, size_type, size_type)) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::__grow_by_and_replace(size_type, size_type, size_type, size_type, size_type, size_type, value_type const*)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::__assign_no_alias<false>(value_type const*, size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::__assign_no_alias<true>(value_type const*, size_type)) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::push_back(value_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::append(size_type, value_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::rfind(value_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS const basic_string<_CharType>::size_type basic_string<_CharType>::npos) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::assign(size_type, value_type)) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::__erase_external_with_move(size_type, size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::append(basic_string const&, size_type, size_type)) \
- _Func(_LIBCPP_FUNC_VIS int basic_string<_CharType>::compare(value_type const*) const) \
- _Func(_LIBCPP_FUNC_VIS int basic_string<_CharType>::compare(size_type, size_type, value_type const*) const) \
- _Func(_LIBCPP_FUNC_VIS _CharType& basic_string<_CharType>::at(size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::size_type basic_string<_CharType>::find(value_type const*, size_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS int basic_string<_CharType>::compare(size_type, size_type, basic_string const&, size_type, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS int basic_string<_CharType>::compare(size_type, size_type, value_type const*, size_type) const) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::append(value_type const*)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::replace(size_type, size_type, basic_string const&, size_type, size_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>::iterator basic_string<_CharType>::insert(basic_string::const_iterator, value_type)) \
- _Func(_LIBCPP_FUNC_VIS void basic_string<_CharType>::resize(size_type, value_type)) \
- _Func(_LIBCPP_FUNC_VIS basic_string<_CharType>& basic_string<_CharType>::insert(size_type, basic_string const&, size_type, size_type))
-
-
// char_traits
template <class _CharT>
@@ -236,7 +151,7 @@ char_traits<_CharT>::find(const char_type* __s, size_t __n, const char_type& __a
return __s;
++__s;
}
- return nullptr;
+ return 0;
}
template <class _CharT>
@@ -286,7 +201,7 @@ char_traits<_CharT>::assign(char_type* __s, size_t __n, char_type __a)
// constexpr versions of move/copy/assign.
template <class _CharT>
-static inline _LIBCPP_CONSTEXPR_AFTER_CXX17
+static inline _LIBCPP_CONSTEXPR_AFTER_CXX17_WITH_IS_CONSTANT_EVALUATED
_CharT* __move_constexpr(_CharT* __s1, const _CharT* __s2, size_t __n) _NOEXCEPT
{
if (__n == 0) return __s1;
@@ -299,7 +214,7 @@ _CharT* __move_constexpr(_CharT* __s1, const _CharT* __s2, size_t __n) _NOEXCEPT
}
template <class _CharT>
-static inline _LIBCPP_CONSTEXPR_AFTER_CXX17
+static inline _LIBCPP_CONSTEXPR_AFTER_CXX17_WITH_IS_CONSTANT_EVALUATED
_CharT* __copy_constexpr(_CharT* __s1, const _CharT* __s2, size_t __n) _NOEXCEPT
{
_VSTD::copy_n(__s2, __n, __s1);
@@ -307,7 +222,7 @@ _CharT* __copy_constexpr(_CharT* __s1, const _CharT* __s2, size_t __n) _NOEXCEPT
}
template <class _CharT>
-static inline _LIBCPP_CONSTEXPR_AFTER_CXX17
+static inline _LIBCPP_CONSTEXPR_AFTER_CXX17_WITH_IS_CONSTANT_EVALUATED
_CharT* __assign_constexpr(_CharT* __s, size_t __n, _CharT __a) _NOEXCEPT
{
_VSTD::fill_n(__s, __n, __a);
@@ -338,27 +253,27 @@ struct _LIBCPP_TEMPLATE_VIS char_traits<char>
length(const char_type* __s) _NOEXCEPT {return __builtin_strlen(__s);}
static _LIBCPP_CONSTEXPR_AFTER_CXX14
const char_type* find(const char_type* __s, size_t __n, const char_type& __a) _NOEXCEPT;
- static inline _LIBCPP_CONSTEXPR_AFTER_CXX17
+ static inline _LIBCPP_CONSTEXPR_AFTER_CXX17_WITH_IS_CONSTANT_EVALUATED
char_type* move(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT
{
return __libcpp_is_constant_evaluated()
- ? _VSTD::__move_constexpr(__s1, __s2, __n)
- : __n == 0 ? __s1 : (char_type*)_VSTD::memmove(__s1, __s2, __n);
+ ? __move_constexpr(__s1, __s2, __n)
+ : __n == 0 ? __s1 : (char_type*)memmove(__s1, __s2, __n);
}
- static inline _LIBCPP_CONSTEXPR_AFTER_CXX17
+ static inline _LIBCPP_CONSTEXPR_AFTER_CXX17_WITH_IS_CONSTANT_EVALUATED
char_type* copy(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT
{
_LIBCPP_ASSERT(__s2 < __s1 || __s2 >= __s1+__n, "char_traits::copy overlapped range");
return __libcpp_is_constant_evaluated()
- ? _VSTD::__copy_constexpr(__s1, __s2, __n)
- : __n == 0 ? __s1 : (char_type*)_VSTD::memcpy(__s1, __s2, __n);
+ ? __copy_constexpr(__s1, __s2, __n)
+ : __n == 0 ? __s1 : (char_type*)memcpy(__s1, __s2, __n);
}
- static inline _LIBCPP_CONSTEXPR_AFTER_CXX17
+ static inline _LIBCPP_CONSTEXPR_AFTER_CXX17_WITH_IS_CONSTANT_EVALUATED
char_type* assign(char_type* __s, size_t __n, char_type __a) _NOEXCEPT
{
return __libcpp_is_constant_evaluated()
- ? _VSTD::__assign_constexpr(__s, __n, __a)
- : __n == 0 ? __s : (char_type*)_VSTD::memset(__s, to_int_type(__a), __n);
+ ? __assign_constexpr(__s, __n, __a)
+ : __n == 0 ? __s : (char_type*)memset(__s, to_int_type(__a), __n);
}
static inline _LIBCPP_CONSTEXPR int_type not_eof(int_type __c) _NOEXCEPT
@@ -382,7 +297,7 @@ char_traits<char>::compare(const char_type* __s1, const char_type* __s2, size_t
#if __has_feature(cxx_constexpr_string_builtins)
return __builtin_memcmp(__s1, __s2, __n);
#elif _LIBCPP_STD_VER <= 14
- return _VSTD::memcmp(__s1, __s2, __n);
+ return memcmp(__s1, __s2, __n);
#else
for (; __n; --__n, ++__s1, ++__s2)
{
@@ -404,7 +319,7 @@ char_traits<char>::find(const char_type* __s, size_t __n, const char_type& __a)
#if __has_feature(cxx_constexpr_string_builtins)
return __builtin_char_memchr(__s, to_int_type(__a), __n);
#elif _LIBCPP_STD_VER <= 14
- return (const char_type*) _VSTD::memchr(__s, to_int_type(__a), __n);
+ return (const char_type*) memchr(__s, to_int_type(__a), __n);
#else
for (; __n; --__n)
{
@@ -441,27 +356,27 @@ struct _LIBCPP_TEMPLATE_VIS char_traits<wchar_t>
size_t length(const char_type* __s) _NOEXCEPT;
static _LIBCPP_CONSTEXPR_AFTER_CXX14
const char_type* find(const char_type* __s, size_t __n, const char_type& __a) _NOEXCEPT;
- static inline _LIBCPP_CONSTEXPR_AFTER_CXX17
+ static inline _LIBCPP_CONSTEXPR_AFTER_CXX17_WITH_IS_CONSTANT_EVALUATED
char_type* move(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT
{
return __libcpp_is_constant_evaluated()
- ? _VSTD::__move_constexpr(__s1, __s2, __n)
- : __n == 0 ? __s1 : _VSTD::wmemmove(__s1, __s2, __n);
+ ? __move_constexpr(__s1, __s2, __n)
+ : __n == 0 ? __s1 : wmemmove(__s1, __s2, __n);
}
- static inline _LIBCPP_CONSTEXPR_AFTER_CXX17
+ static inline _LIBCPP_CONSTEXPR_AFTER_CXX17_WITH_IS_CONSTANT_EVALUATED
char_type* copy(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT
{
_LIBCPP_ASSERT(__s2 < __s1 || __s2 >= __s1+__n, "char_traits::copy overlapped range");
return __libcpp_is_constant_evaluated()
- ? _VSTD::__copy_constexpr(__s1, __s2, __n)
- : __n == 0 ? __s1 : _VSTD::wmemcpy(__s1, __s2, __n);
+ ? __copy_constexpr(__s1, __s2, __n)
+ : __n == 0 ? __s1 : wmemcpy(__s1, __s2, __n);
}
- static inline _LIBCPP_CONSTEXPR_AFTER_CXX17
+ static inline _LIBCPP_CONSTEXPR_AFTER_CXX17_WITH_IS_CONSTANT_EVALUATED
char_type* assign(char_type* __s, size_t __n, char_type __a) _NOEXCEPT
{
return __libcpp_is_constant_evaluated()
- ? _VSTD::__assign_constexpr(__s, __n, __a)
- : __n == 0 ? __s : _VSTD::wmemset(__s, __a, __n);
+ ? __assign_constexpr(__s, __n, __a)
+ : __n == 0 ? __s : wmemset(__s, __a, __n);
}
static inline _LIBCPP_CONSTEXPR int_type not_eof(int_type __c) _NOEXCEPT
{return eq_int_type(__c, eof()) ? ~eof() : __c;}
@@ -484,7 +399,7 @@ char_traits<wchar_t>::compare(const char_type* __s1, const char_type* __s2, size
#if __has_feature(cxx_constexpr_string_builtins)
return __builtin_wmemcmp(__s1, __s2, __n);
#elif _LIBCPP_STD_VER <= 14
- return _VSTD::wmemcmp(__s1, __s2, __n);
+ return wmemcmp(__s1, __s2, __n);
#else
for (; __n; --__n, ++__s1, ++__s2)
{
@@ -516,7 +431,7 @@ char_traits<wchar_t>::length(const char_type* __s) _NOEXCEPT
#if __has_feature(cxx_constexpr_string_builtins)
return __builtin_wcslen(__s);
#elif _LIBCPP_STD_VER <= 14
- return _VSTD::wcslen(__s);
+ return wcslen(__s);
#else
size_t __len = 0;
for (; !eq(*__s, char_type(0)); ++__s)
@@ -534,7 +449,7 @@ char_traits<wchar_t>::find(const char_type* __s, size_t __n, const char_type& __
#if __has_feature(cxx_constexpr_string_builtins)
return __builtin_wmemchr(__s, __a, __n);
#elif _LIBCPP_STD_VER <= 14
- return _VSTD::wmemchr(__s, __a, __n);
+ return wmemchr(__s, __a, __n);
#else
for (; __n; --__n)
{
@@ -547,7 +462,7 @@ char_traits<wchar_t>::find(const char_type* __s, size_t __n, const char_type& __
}
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef _LIBCPP_NO_HAS_CHAR8_T
template <>
struct _LIBCPP_TEMPLATE_VIS char_traits<char8_t>
@@ -574,29 +489,29 @@ struct _LIBCPP_TEMPLATE_VIS char_traits<char8_t>
_LIBCPP_INLINE_VISIBILITY static constexpr
const char_type* find(const char_type* __s, size_t __n, const char_type& __a) _NOEXCEPT;
- static _LIBCPP_CONSTEXPR_AFTER_CXX17
+ static _LIBCPP_CONSTEXPR_AFTER_CXX17_WITH_IS_CONSTANT_EVALUATED
char_type* move(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT
{
return __libcpp_is_constant_evaluated()
- ? _VSTD::__move_constexpr(__s1, __s2, __n)
- : __n == 0 ? __s1 : (char_type*)_VSTD::memmove(__s1, __s2, __n);
+ ? __move_constexpr(__s1, __s2, __n)
+ : __n == 0 ? __s1 : (char_type*)memmove(__s1, __s2, __n);
}
- static _LIBCPP_CONSTEXPR_AFTER_CXX17
+ static _LIBCPP_CONSTEXPR_AFTER_CXX17_WITH_IS_CONSTANT_EVALUATED
char_type* copy(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT
{
_LIBCPP_ASSERT(__s2 < __s1 || __s2 >= __s1+__n, "char_traits::copy overlapped range");
return __libcpp_is_constant_evaluated()
- ? _VSTD::__copy_constexpr(__s1, __s2, __n)
- : __n == 0 ? __s1 : (char_type*)_VSTD::memcpy(__s1, __s2, __n);
+ ? __copy_constexpr(__s1, __s2, __n)
+ : __n == 0 ? __s1 : (char_type*)memcpy(__s1, __s2, __n);
}
- static _LIBCPP_CONSTEXPR_AFTER_CXX17
+ static _LIBCPP_CONSTEXPR_AFTER_CXX17_WITH_IS_CONSTANT_EVALUATED
char_type* assign(char_type* __s, size_t __n, char_type __a) _NOEXCEPT
{
return __libcpp_is_constant_evaluated()
- ? _VSTD::__assign_constexpr(__s, __n, __a)
- : __n == 0 ? __s : (char_type*)_VSTD::memset(__s, to_int_type(__a), __n);
+ ? __assign_constexpr(__s, __n, __a)
+ : __n == 0 ? __s : (char_type*)memset(__s, to_int_type(__a), __n);
}
static inline constexpr int_type not_eof(int_type __c) noexcept
@@ -651,10 +566,10 @@ char_traits<char8_t>::find(const char_type* __s, size_t __n, const char_type& __
return __s;
++__s;
}
- return nullptr;
+ return 0;
}
-#endif // #_LIBCPP_HAS_NO_CHAR8_T
+#endif // #_LIBCPP_NO_HAS_CHAR8_T
#ifndef _LIBCPP_HAS_NO_UNICODE_CHARS
@@ -733,7 +648,7 @@ char_traits<char16_t>::find(const char_type* __s, size_t __n, const char_type& _
return __s;
++__s;
}
- return nullptr;
+ return 0;
}
inline _LIBCPP_CONSTEXPR_AFTER_CXX17
@@ -853,7 +768,7 @@ char_traits<char32_t>::find(const char_type* __s, size_t __n, const char_type& _
return __s;
++__s;
}
- return nullptr;
+ return 0;
}
inline _LIBCPP_CONSTEXPR_AFTER_CXX17
@@ -898,7 +813,7 @@ char_traits<char32_t>::assign(char_type* __s, size_t __n, char_type __a) _NOEXCE
return __r;
}
-#endif // _LIBCPP_HAS_NO_UNICODE_CHARS
+#endif // _LIBCPP_HAS_NO_UNICODE_CHARS
// helper fns for basic_string and string_view
@@ -911,7 +826,7 @@ __str_find(const _CharT *__p, _SizeT __sz,
if (__pos >= __sz)
return __npos;
const _CharT* __r = _Traits::find(__p + __pos, __sz - __pos, __c);
- if (__r == nullptr)
+ if (__r == 0)
return __npos;
return static_cast<_SizeT>(__r - __p);
}
@@ -919,7 +834,7 @@ __str_find(const _CharT *__p, _SizeT __sz,
template <class _CharT, class _Traits>
inline _LIBCPP_CONSTEXPR_AFTER_CXX11 const _CharT *
__search_substring(const _CharT *__first1, const _CharT *__last1,
- const _CharT *__first2, const _CharT *__last2) _NOEXCEPT {
+ const _CharT *__first2, const _CharT *__last2) {
// Take advantage of knowing source and pattern lengths.
// Stop short when source is smaller than pattern.
const ptrdiff_t __len2 = __last2 - __first2;
@@ -940,7 +855,7 @@ __search_substring(const _CharT *__first1, const _CharT *__last1,
// Find __f2 the first byte matching in __first1.
__first1 = _Traits::find(__first1, __len1 - __len2 + 1, __f2);
- if (__first1 == nullptr)
+ if (__first1 == 0)
return __last1;
// It is faster to compare from the first byte of __first1 even if we
@@ -1063,7 +978,7 @@ __str_find_first_not_of(const _CharT *__p, _SizeT __sz,
{
const _CharT* __pe = __p + __sz;
for (const _CharT* __ps = __p + __pos; __ps != __pe; ++__ps)
- if (_Traits::find(__s, __n, *__ps) == nullptr)
+ if (_Traits::find(__s, __n, *__ps) == 0)
return static_cast<_SizeT>(__ps - __p);
}
return __npos;
@@ -1097,7 +1012,7 @@ __str_find_last_not_of(const _CharT *__p, _SizeT __sz,
else
__pos = __sz;
for (const _CharT* __ps = __p + __pos; __ps != __p;)
- if (_Traits::find(__s, __n, *--__ps) == nullptr)
+ if (_Traits::find(__s, __n, *--__ps) == 0)
return static_cast<_SizeT>(__ps - __p);
return __npos;
}
@@ -1143,4 +1058,4 @@ _LIBCPP_END_NAMESPACE_STD
_LIBCPP_POP_MACROS
-#endif // _LIBCPP___STRING
+#endif // _LIBCPP___STRING
diff --git a/gnu/llvm/libcxx/include/__tuple b/gnu/llvm/libcxx/include/__tuple
index 082ec869eec..4da9ec55f35 100644
--- a/gnu/llvm/libcxx/include/__tuple
+++ b/gnu/llvm/libcxx/include/__tuple
@@ -134,7 +134,7 @@ template<> struct __parity<7> { template<size_t _Np> struct __pmake : __repeat<t
} // namespace detail
-#endif // !__has_builtin(__make_integer_seq) || defined(_LIBCPP_TESTING_FALLBACK_MAKE_INTEGER_SEQUENCE)
+#endif // !__has_builtin(__make_integer_seq) || defined(_LIBCPP_TESTING_FALLBACK_MAKE_INTEGER_SEQUENCE)
#if __has_builtin(__make_integer_seq)
template <size_t _Ep, size_t _Sp>
@@ -548,4 +548,4 @@ struct __sfinae_assign_base<false, true> {
_LIBCPP_END_NAMESPACE_STD
-#endif // _LIBCPP___TUPLE
+#endif // _LIBCPP___TUPLE
diff --git a/gnu/llvm/libcxx/lib/libc++abi.v1.exp b/gnu/llvm/libcxx/lib/libc++abi.v1.exp
index c245b17a905..879b4dd141e 100644
--- a/gnu/llvm/libcxx/lib/libc++abi.v1.exp
+++ b/gnu/llvm/libcxx/lib/libc++abi.v1.exp
@@ -1,11 +1,20 @@
+___cxa_allocate_exception
+___cxa_end_catch
___cxa_demangle
+___cxa_current_exception_type
+___cxa_call_unexpected
+___cxa_free_exception
+___cxa_get_exception_ptr
___cxa_get_globals
___cxa_get_globals_fast
___cxa_guard_abort
___cxa_guard_acquire
___cxa_guard_release
+___cxa_rethrow
___cxa_pure_virtual
___cxa_deleted_virtual
+___cxa_begin_catch
+___cxa_throw
___cxa_vec_cctor
___cxa_vec_cleanup
___cxa_vec_ctor
@@ -17,6 +26,7 @@ ___cxa_vec_new
___cxa_vec_new2
___cxa_vec_new3
___dynamic_cast
+___gxx_personality_v0
__ZTIDi
__ZTIDn
__ZTIDs
diff --git a/gnu/llvm/libcxx/lib/libc++abi.v2.exp b/gnu/llvm/libcxx/lib/libc++abi.v2.exp
index 6a3e6b9f022..dfc308f8010 100644
--- a/gnu/llvm/libcxx/lib/libc++abi.v2.exp
+++ b/gnu/llvm/libcxx/lib/libc++abi.v2.exp
@@ -1,11 +1,20 @@
+___cxa_allocate_exception
+___cxa_end_catch
___cxa_demangle
+___cxa_current_exception_type
+___cxa_call_unexpected
+___cxa_free_exception
+___cxa_get_exception_ptr
___cxa_get_globals
___cxa_get_globals_fast
___cxa_guard_abort
___cxa_guard_acquire
___cxa_guard_release
+___cxa_rethrow
___cxa_pure_virtual
___cxa_deleted_virtual
+___cxa_begin_catch
+___cxa_throw
___cxa_throw_bad_array_new_length
___cxa_uncaught_exceptions
___cxa_vec_cctor
@@ -19,6 +28,7 @@ ___cxa_vec_new
___cxa_vec_new2
___cxa_vec_new3
___dynamic_cast
+___gxx_personality_v0
__ZTIDi
__ZTIDn
__ZTIDs
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/.clang-format b/gnu/llvm/libcxx/utils/google-benchmark/.clang-format
index e7d00feaa08..06ea346a106 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/.clang-format
+++ b/gnu/llvm/libcxx/utils/google-benchmark/.clang-format
@@ -1,5 +1,4 @@
---
Language: Cpp
BasedOnStyle: Google
-PointerAlignment: Left
...
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/.gitignore b/gnu/llvm/libcxx/utils/google-benchmark/.gitignore
index be55d774e21..8c30e28f53a 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/.gitignore
+++ b/gnu/llvm/libcxx/utils/google-benchmark/.gitignore
@@ -8,7 +8,6 @@
!/cmake/*.cmake
!/test/AssemblyTests.cmake
*~
-*.swp
*.pyc
__pycache__
@@ -57,10 +56,3 @@ build*/
# Visual Studio 2015/2017 cache/options directory
.vs/
CMakeSettings.json
-
-# Visual Studio Code cache/options directory
-.vscode/
-
-# Python build stuff
-dist/
-*.egg-info*
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/.travis.yml b/gnu/llvm/libcxx/utils/google-benchmark/.travis.yml
index 8cfed3d10da..4625dfb0878 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/.travis.yml
+++ b/gnu/llvm/libcxx/utils/google-benchmark/.travis.yml
@@ -2,6 +2,10 @@ sudo: required
dist: trusty
language: cpp
+env:
+ global:
+ - /usr/local/bin:$PATH
+
matrix:
include:
- compiler: gcc
@@ -11,145 +15,132 @@ matrix:
- lcov
env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Coverage
- compiler: gcc
+ env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug
+ - compiler: gcc
+ env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release
+ - compiler: gcc
addons:
apt:
packages:
- g++-multilib
- - libc6:i386
- env:
- - COMPILER=g++
- - C_COMPILER=gcc
- - BUILD_TYPE=Debug
- - BUILD_32_BITS=ON
- - EXTRA_FLAGS="-m32"
+ env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug BUILD_32_BITS=ON
- compiler: gcc
addons:
apt:
packages:
- g++-multilib
- - libc6:i386
- env:
- - COMPILER=g++
- - C_COMPILER=gcc
- - BUILD_TYPE=Release
- - BUILD_32_BITS=ON
- - EXTRA_FLAGS="-m32"
+ env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release BUILD_32_BITS=ON
- compiler: gcc
env:
- INSTALL_GCC6_FROM_PPA=1
- COMPILER=g++-6 C_COMPILER=gcc-6 BUILD_TYPE=Debug
- ENABLE_SANITIZER=1
- EXTRA_FLAGS="-fno-omit-frame-pointer -g -O2 -fsanitize=undefined,address -fuse-ld=gold"
+ - compiler: clang
+ env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Debug
+ - compiler: clang
+ env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Release
# Clang w/ libc++
- compiler: clang
- dist: xenial
addons:
apt:
packages:
clang-3.8
env:
- - INSTALL_GCC6_FROM_PPA=1
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
- LIBCXX_BUILD=1
- - EXTRA_CXX_FLAGS="-stdlib=libc++"
+ - EXTRA_FLAGS="-stdlib=libc++"
- compiler: clang
- dist: xenial
addons:
apt:
packages:
clang-3.8
env:
- - INSTALL_GCC6_FROM_PPA=1
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release
- LIBCXX_BUILD=1
- - EXTRA_CXX_FLAGS="-stdlib=libc++"
+ - EXTRA_FLAGS="-stdlib=libc++"
# Clang w/ 32bit libc++
- compiler: clang
- dist: xenial
addons:
apt:
packages:
- clang-3.8
- g++-multilib
- - libc6:i386
env:
- - INSTALL_GCC6_FROM_PPA=1
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
- LIBCXX_BUILD=1
- BUILD_32_BITS=ON
- - EXTRA_FLAGS="-m32"
- - EXTRA_CXX_FLAGS="-stdlib=libc++"
+ - EXTRA_FLAGS="-stdlib=libc++ -m32"
# Clang w/ 32bit libc++
- compiler: clang
- dist: xenial
addons:
apt:
packages:
- clang-3.8
- g++-multilib
- - libc6:i386
env:
- - INSTALL_GCC6_FROM_PPA=1
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release
- LIBCXX_BUILD=1
- BUILD_32_BITS=ON
- - EXTRA_FLAGS="-m32"
- - EXTRA_CXX_FLAGS="-stdlib=libc++"
+ - EXTRA_FLAGS="-stdlib=libc++ -m32"
# Clang w/ libc++, ASAN, UBSAN
- compiler: clang
- dist: xenial
addons:
apt:
packages:
clang-3.8
env:
- - INSTALL_GCC6_FROM_PPA=1
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
- LIBCXX_BUILD=1 LIBCXX_SANITIZER="Undefined;Address"
- ENABLE_SANITIZER=1
- - EXTRA_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=undefined,address -fno-sanitize-recover=all"
- - EXTRA_CXX_FLAGS="-stdlib=libc++"
+ - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=undefined,address -fno-sanitize-recover=all"
- UBSAN_OPTIONS=print_stacktrace=1
# Clang w/ libc++ and MSAN
- compiler: clang
- dist: xenial
addons:
apt:
packages:
clang-3.8
env:
- - INSTALL_GCC6_FROM_PPA=1
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
- LIBCXX_BUILD=1 LIBCXX_SANITIZER=MemoryWithOrigins
- ENABLE_SANITIZER=1
- - EXTRA_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins"
- - EXTRA_CXX_FLAGS="-stdlib=libc++"
+ - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins"
# Clang w/ libc++ and MSAN
- compiler: clang
- dist: xenial
addons:
apt:
packages:
clang-3.8
env:
- - INSTALL_GCC6_FROM_PPA=1
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=RelWithDebInfo
- LIBCXX_BUILD=1 LIBCXX_SANITIZER=Thread
- ENABLE_SANITIZER=1
- - EXTRA_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all"
- - EXTRA_CXX_FLAGS="-stdlib=libc++"
+ - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all"
- os: osx
osx_image: xcode8.3
compiler: clang
env:
- - COMPILER=clang++
- - BUILD_TYPE=Release
- - BUILD_32_BITS=ON
- - EXTRA_FLAGS="-m32"
+ - COMPILER=clang++ BUILD_TYPE=Debug
+ - os: osx
+ osx_image: xcode8.3
+ compiler: clang
+ env:
+ - COMPILER=clang++ BUILD_TYPE=Release
+ - os: osx
+ osx_image: xcode8.3
+ compiler: clang
+ env:
+ - COMPILER=clang++ BUILD_TYPE=Release BUILD_32_BITS=ON
+ - os: osx
+ osx_image: xcode8.3
+ compiler: gcc
+ env:
+ - COMPILER=g++-7 C_COMPILER=gcc-7 BUILD_TYPE=Debug
before_script:
- if [ -n "${LIBCXX_BUILD}" ]; then
- source .libcxx-setup.sh;
+ source .travis-libcxx-setup.sh;
fi
- if [ -n "${ENABLE_SANITIZER}" ]; then
export EXTRA_OPTIONS="-DBENCHMARK_ENABLE_ASSEMBLY_TESTS=OFF";
@@ -187,17 +178,17 @@ install:
fi
- if [ "${TRAVIS_OS_NAME}" == "linux" ]; then
sudo apt-get update -qq;
- sudo apt-get install -qq unzip cmake3;
- wget https://github.com/bazelbuild/bazel/releases/download/3.2.0/bazel-3.2.0-installer-linux-x86_64.sh --output-document bazel-installer.sh;
+ sudo apt-get install -qq unzip;
+ wget https://github.com/bazelbuild/bazel/releases/download/0.10.1/bazel-0.10.1-installer-linux-x86_64.sh --output-document bazel-installer.sh;
travis_wait sudo bash bazel-installer.sh;
fi
- if [ "${TRAVIS_OS_NAME}" == "osx" ]; then
- curl -L -o bazel-installer.sh https://github.com/bazelbuild/bazel/releases/download/3.2.0/bazel-3.2.0-installer-darwin-x86_64.sh;
+ curl -L -o bazel-installer.sh https://github.com/bazelbuild/bazel/releases/download/0.10.1/bazel-0.10.1-installer-darwin-x86_64.sh;
travis_wait sudo bash bazel-installer.sh;
fi
script:
- - cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_C_FLAGS="${EXTRA_FLAGS}" -DCMAKE_CXX_FLAGS="${EXTRA_FLAGS} ${EXTRA_CXX_FLAGS}" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON -DBENCHMARK_BUILD_32_BITS=${BUILD_32_BITS} ${EXTRA_OPTIONS} ..
+ - cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_CXX_FLAGS="${EXTRA_FLAGS}" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON -DBENCHMARK_BUILD_32_BITS=${BUILD_32_BITS} ${EXTRA_OPTIONS} ..
- make
- ctest -C ${BUILD_TYPE} --output-on-failure
- bazel test -c dbg --define google_benchmark.have_regex=posix --announce_rc --verbose_failures --test_output=errors --keep_going //test/...
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/AUTHORS b/gnu/llvm/libcxx/utils/google-benchmark/AUTHORS
index 838dd4f5bd5..09e2e0551ad 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/AUTHORS
+++ b/gnu/llvm/libcxx/utils/google-benchmark/AUTHORS
@@ -9,32 +9,24 @@
# Please keep the list sorted.
Albert Pretorius <pretoalb@gmail.com>
-Alex Steele <steeleal123@gmail.com>
-Andriy Berestovskyy <berestovskyy@gmail.com>
Arne Beer <arne@twobeer.de>
Carto
-Christian Wassermann <christian_wassermann@web.de>
Christopher Seymour <chris.j.seymour@hotmail.com>
-Colin Braley <braley.colin@gmail.com>
-Daniel Harvey <danielharvey458@gmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
Deniz Evrenci <denizevrenci@gmail.com>
Dirac Research
Dominik Czarnota <dominik.b.czarnota@gmail.com>
-Eric Backus <eric_backus@alum.mit.edu>
Eric Fiselier <eric@efcs.ca>
Eugene Zhuk <eugene.zhuk@gmail.com>
Evgeny Safronov <division494@gmail.com>
Federico Ficarelli <federico.ficarelli@gmail.com>
Felix Homann <linuxaudio@showlabor.de>
-Gergő Szitár <szitar.gergo@gmail.com>
Google Inc.
International Business Machines Corporation
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
Jern-Kuan Leong <jernkuan@gmail.com>
JianXiong Zhou <zhoujianxiong2@gmail.com>
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
-Jordan Williams <jwillikers@protonmail.com>
Jussi Knuuttila <jussi.knuuttila@gmail.com>
Kaito Udagawa <umireon@gmail.com>
Kishan Kumar <kumar.kishan@outlook.com>
@@ -43,18 +35,14 @@ Matt Clarkson <mattyclarkson@gmail.com>
Maxim Vafin <maxvafin@gmail.com>
MongoDB Inc.
Nick Hutchinson <nshutchinson@gmail.com>
-Norman Heino <norman.heino@gmail.com>
Oleksandr Sochka <sasha.sochka@gmail.com>
Ori Livneh <ori.livneh@gmail.com>
Paul Redmond <paul.redmond@gmail.com>
Radoslav Yovchev <radoslav.tm@gmail.com>
Roman Lebedev <lebedev.ri@gmail.com>
-Sayan Bhattacharjee <aero.sayan@gmail.com>
Shuo Chen <chenshuo@chenshuo.com>
Steinar H. Gunderson <sgunderson@bigfoot.com>
Stripe, Inc.
-Tobias Schmidt <tobias.schmidt@in.tum.de>
Yixuan Qiu <yixuanq@gmail.com>
Yusuke Suzuki <utatane.tea@gmail.com>
Zbigniew Skowron <zbychs@gmail.com>
-Min-Yih Hsu <yihshyng223@gmail.com>
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/CMakeLists.txt b/gnu/llvm/libcxx/utils/google-benchmark/CMakeLists.txt
index ef8dcdc68cf..310c7ee9f6b 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/CMakeLists.txt
+++ b/gnu/llvm/libcxx/utils/google-benchmark/CMakeLists.txt
@@ -1,20 +1,17 @@
-cmake_minimum_required (VERSION 3.5.1)
+cmake_minimum_required (VERSION 2.8.12)
+
+project (benchmark)
foreach(p
- CMP0048 # OK to clear PROJECT_VERSION on project()
CMP0054 # CMake 3.1
CMP0056 # export EXE_LINKER_FLAGS to try_run
CMP0057 # Support no if() IN_LIST operator
- CMP0063 # Honor visibility properties for all targets
- CMP0077 # Allow option() overrides in importing projects
)
if(POLICY ${p})
cmake_policy(SET ${p} NEW)
endif()
endforeach()
-project (benchmark VERSION 1.5.4 LANGUAGES CXX)
-
option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON)
option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON)
option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF)
@@ -34,20 +31,6 @@ option(BENCHMARK_DOWNLOAD_DEPENDENCIES "Allow the downloading and in-tree buildi
# in cases where it is not possible to build or find a valid version of gtest.
option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which depend on gtest" ON)
-option(BENCHMARK_ENABLE_LIBPFM "Enable performance counters provided by libpfm" OFF)
-
-set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
-if(MSVC)
- # As of CMake 3.18, CMAKE_SYSTEM_PROCESSOR is not set properly for MSVC and
- # cross-compilation (e.g. Host=x86_64, target=aarch64) requires using the
- # undocumented, but working variable.
- # See https://gitlab.kitware.com/cmake/cmake/-/issues/15170
- set(CMAKE_SYSTEM_PROCESSOR ${MSVC_CXX_ARCHITECTURE_ID})
- if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "ARM")
- set(CMAKE_CROSSCOMPILING TRUE)
- endif()
-endif()
-
set(ENABLE_ASSEMBLY_TESTS_DEFAULT OFF)
function(should_enable_assembly_tests)
if(CMAKE_BUILD_TYPE)
@@ -94,14 +77,8 @@ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
include(GetGitVersion)
get_git_version(GIT_VERSION)
-# If no git version can be determined, use the version
-# from the project() command
-if ("${GIT_VERSION}" STREQUAL "0.0.0")
- set(VERSION "${benchmark_VERSION}")
-else()
- set(VERSION "${GIT_VERSION}")
-endif()
# Tell the user what versions we are using
+string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]+" VERSION ${GIT_VERSION})
message(STATUS "Version: ${VERSION}")
# The version of the libraries
@@ -163,10 +140,6 @@ else()
add_cxx_compiler_flag(-Werror RELEASE)
add_cxx_compiler_flag(-Werror RELWITHDEBINFO)
add_cxx_compiler_flag(-Werror MINSIZEREL)
- if (NOT BENCHMARK_ENABLE_TESTING)
- # Disable warning when compiling tests as gtest does not use 'override'.
- add_cxx_compiler_flag(-Wsuggest-override)
- endif()
add_cxx_compiler_flag(-pedantic)
add_cxx_compiler_flag(-pedantic-errors)
add_cxx_compiler_flag(-Wshorten-64-to-32)
@@ -209,15 +182,10 @@ else()
add_definitions(-D_GNU_SOURCE=1)
endif()
- if (QNXNTO)
- add_definitions(-D_QNX_SOURCE)
- endif()
-
# Link time optimisation
if (BENCHMARK_ENABLE_LTO)
add_cxx_compiler_flag(-flto)
- add_cxx_compiler_flag(-Wno-lto-type-mismatch)
- if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
+ if ("${CMAKE_C_COMPILER_ID}" STREQUAL "GNU")
find_program(GCC_AR gcc-ar)
if (GCC_AR)
set(CMAKE_AR ${GCC_AR})
@@ -226,7 +194,7 @@ else()
if (GCC_RANLIB)
set(CMAKE_RANLIB ${GCC_RANLIB})
endif()
- elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
+ elseif("${CMAKE_C_COMPILER_ID}" MATCHES "Clang")
include(llvm-toolchain)
endif()
endif()
@@ -268,17 +236,11 @@ if (BENCHMARK_USE_LIBCXX)
endif()
endif(BENCHMARK_USE_LIBCXX)
-set(EXTRA_CXX_FLAGS "")
-if (WIN32 AND "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
- # Clang on Windows fails to compile the regex feature check under C++11
- set(EXTRA_CXX_FLAGS "-DCMAKE_CXX_STANDARD=14")
-endif()
-
# C++ feature checks
# Determine the correct regular expression engine to use
-cxx_feature_check(STD_REGEX ${EXTRA_CXX_FLAGS})
-cxx_feature_check(GNU_POSIX_REGEX ${EXTRA_CXX_FLAGS})
-cxx_feature_check(POSIX_REGEX ${EXTRA_CXX_FLAGS})
+cxx_feature_check(STD_REGEX)
+cxx_feature_check(GNU_POSIX_REGEX)
+cxx_feature_check(POSIX_REGEX)
if(NOT HAVE_STD_REGEX AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX)
message(FATAL_ERROR "Failed to determine the source files for the regular expression backend")
endif()
@@ -286,16 +248,10 @@ if (NOT BENCHMARK_ENABLE_EXCEPTIONS AND HAVE_STD_REGEX
AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX)
message(WARNING "Using std::regex with exceptions disabled is not fully supported")
endif()
-
cxx_feature_check(STEADY_CLOCK)
# Ensure we have pthreads
-set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
-if (BENCHMARK_ENABLE_LIBPFM)
- find_package(PFM)
-endif()
-
# Set up directories
include_directories(${PROJECT_SOURCE_DIR}/include)
@@ -304,10 +260,8 @@ add_subdirectory(src)
if (BENCHMARK_ENABLE_TESTING)
enable_testing()
- if (BENCHMARK_ENABLE_GTEST_TESTS AND
- NOT (TARGET gtest AND TARGET gtest_main AND
- TARGET gmock AND TARGET gmock_main))
- include(GoogleTest)
+ if (BENCHMARK_ENABLE_GTEST_TESTS)
+ include(HandleGTest)
endif()
add_subdirectory(test)
endif()
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/CONTRIBUTORS b/gnu/llvm/libcxx/utils/google-benchmark/CONTRIBUTORS
index 7489731de5a..ee74ff886c0 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/CONTRIBUTORS
+++ b/gnu/llvm/libcxx/utils/google-benchmark/CONTRIBUTORS
@@ -22,47 +22,34 @@
#
# Please keep the list sorted.
-Abhina Sreeskantharajan <abhina.sreeskantharajan@ibm.com>
Albert Pretorius <pretoalb@gmail.com>
-Alex Steele <steelal123@gmail.com>
-Andriy Berestovskyy <berestovskyy@gmail.com>
Arne Beer <arne@twobeer.de>
Billy Robert O'Neal III <billy.oneal@gmail.com> <bion@microsoft.com>
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
-Christian Wassermann <christian_wassermann@web.de>
Christopher Seymour <chris.j.seymour@hotmail.com>
-Colin Braley <braley.colin@gmail.com>
Cyrille Faucheux <cyrille.faucheux@gmail.com>
-Daniel Harvey <danielharvey458@gmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
Deniz Evrenci <denizevrenci@gmail.com>
Dominic Hamon <dma@stripysock.com> <dominic@google.com>
Dominik Czarnota <dominik.b.czarnota@gmail.com>
-Eric Backus <eric_backus@alum.mit.edu>
Eric Fiselier <eric@efcs.ca>
Eugene Zhuk <eugene.zhuk@gmail.com>
Evgeny Safronov <division494@gmail.com>
-Fanbo Meng <fanbo.meng@ibm.com>
Federico Ficarelli <federico.ficarelli@gmail.com>
Felix Homann <linuxaudio@showlabor.de>
-Geoffrey Martin-Noble <gcmn@google.com> <gmngeoffrey@gmail.com>
-Gergő Szitár <szitar.gergo@gmail.com>
-Hannes Hauswedell <h2@fsfe.org>
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
Jern-Kuan Leong <jernkuan@gmail.com>
JianXiong Zhou <zhoujianxiong2@gmail.com>
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
John Millikin <jmillikin@stripe.com>
-Jordan Williams <jwillikers@protonmail.com>
Jussi Knuuttila <jussi.knuuttila@gmail.com>
Kai Wolf <kai.wolf@gmail.com>
-Kaito Udagawa <umireon@gmail.com>
Kishan Kumar <kumar.kishan@outlook.com>
+Kaito Udagawa <umireon@gmail.com>
Lei Xu <eddyxu@gmail.com>
Matt Clarkson <mattyclarkson@gmail.com>
Maxim Vafin <maxvafin@gmail.com>
Nick Hutchinson <nshutchinson@gmail.com>
-Norman Heino <norman.heino@gmail.com>
Oleksandr Sochka <sasha.sochka@gmail.com>
Ori Livneh <ori.livneh@gmail.com>
Pascal Leroy <phl@google.com>
@@ -73,13 +60,9 @@ Raul Marin <rmrodriguez@cartodb.com>
Ray Glover <ray.glover@uk.ibm.com>
Robert Guo <robert.guo@mongodb.com>
Roman Lebedev <lebedev.ri@gmail.com>
-Sayan Bhattacharjee <aero.sayan@gmail.com>
Shuo Chen <chenshuo@chenshuo.com>
-Steven Wan <wan.yu@ibm.com>
-Tobias Schmidt <tobias.schmidt@in.tum.de>
Tobias Ulvgård <tobias.ulvgard@dirac.se>
Tom Madams <tom.ej.madams@gmail.com> <tmadams@google.com>
Yixuan Qiu <yixuanq@gmail.com>
Yusuke Suzuki <utatane.tea@gmail.com>
Zbigniew Skowron <zbychs@gmail.com>
-Min-Yih Hsu <yihshyng223@gmail.com>
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/README.md b/gnu/llvm/libcxx/utils/google-benchmark/README.md
index aa61cef1b16..858ea2334ef 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/README.md
+++ b/gnu/llvm/libcxx/utils/google-benchmark/README.md
@@ -1,111 +1,32 @@
-# Benchmark
-
-[![build-and-test](https://github.com/google/benchmark/workflows/build-and-test/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Abuild-and-test)
-[![bazel](https://github.com/google/benchmark/actions/workflows/bazel.yml/badge.svg)](https://github.com/google/benchmark/actions/workflows/bazel.yml)
-[![pylint](https://github.com/google/benchmark/workflows/pylint/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Apylint)
-[![test-bindings](https://github.com/google/benchmark/workflows/test-bindings/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Atest-bindings)
-
+# benchmark
[![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark)
+[![Build status](https://ci.appveyor.com/api/projects/status/u0qsyp7t1tk7cpxs/branch/master?svg=true)](https://ci.appveyor.com/project/google/benchmark/branch/master)
[![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark)
+[![slackin](https://slackin-iqtfqnpzxd.now.sh/badge.svg)](https://slackin-iqtfqnpzxd.now.sh/)
-
-A library to benchmark code snippets, similar to unit tests. Example:
-
-```c++
-#include <benchmark/benchmark.h>
-
-static void BM_SomeFunction(benchmark::State& state) {
- // Perform setup here
- for (auto _ : state) {
- // This code gets timed
- SomeFunction();
- }
-}
-// Register the function as a benchmark
-BENCHMARK(BM_SomeFunction);
-// Run the benchmark
-BENCHMARK_MAIN();
-```
-
-To get started, see [Requirements](#requirements) and
-[Installation](#installation). See [Usage](#usage) for a full example and the
-[User Guide](#user-guide) for a more comprehensive feature overview.
-
-It may also help to read the [Google Test documentation](https://github.com/google/googletest/blob/master/docs/primer.md)
-as some of the structural aspects of the APIs are similar.
-
-### Resources
+A library to support the benchmarking of functions, similar to unit-tests.
[Discussion group](https://groups.google.com/d/forum/benchmark-discuss)
-IRC channels:
-* [libera](https://libera.chat) #benchmark
+IRC channel: [freenode](https://freenode.net) #googlebenchmark
[Additional Tooling Documentation](docs/tools.md)
[Assembly Testing Documentation](docs/AssemblyTests.md)
-## Requirements
-
-The library can be used with C++03. However, it requires C++11 to build,
-including compiler and standard library support.
-
-The following minimum versions are required to build the library:
-
-* GCC 4.8
-* Clang 3.4
-* Visual Studio 14 2015
-* Intel 2015 Update 1
-
-See [Platform-Specific Build Instructions](#platform-specific-build-instructions).
-## Installation
+## Building
-This describes the installation process using cmake. As pre-requisites, you'll
-need git and cmake installed.
-
-_See [dependencies.md](dependencies.md) for more details regarding supported
-versions of build tools._
+The basic steps for configuring and building the library look like this:
```bash
-# Check out the library.
$ git clone https://github.com/google/benchmark.git
# Benchmark requires Google Test as a dependency. Add the source tree as a subdirectory.
$ git clone https://github.com/google/googletest.git benchmark/googletest
-# Go to the library root directory
-$ cd benchmark
-# Make a build directory to place the build output.
-$ cmake -E make_directory "build"
-# Generate build system files with cmake.
-$ cmake -E chdir "build" cmake -DCMAKE_BUILD_TYPE=Release ../
-# or, starting with CMake 3.13, use a simpler form:
-# cmake -DCMAKE_BUILD_TYPE=Release -S . -B "build"
-# Build the library.
-$ cmake --build "build" --config Release
-```
-This builds the `benchmark` and `benchmark_main` libraries and tests.
-On a unix system, the build directory should now look something like this:
-
-```
-/benchmark
- /build
- /src
- /libbenchmark.a
- /libbenchmark_main.a
- /test
- ...
-```
-
-Next, you can run the tests to check the build.
-
-```bash
-$ cmake -E chdir "build" ctest --build-config Release
-```
-
-If you want to install the library globally, also run:
-
-```
-sudo cmake --build "build" --config Release --target install
+$ mkdir build && cd build
+$ cmake -G <generator> [options] ../benchmark
+# Assuming a makefile generator was used
+$ make
```
Note that Google Benchmark requires Google Test to build and run the tests. This
@@ -119,25 +40,37 @@ dependency can be provided two ways:
If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF`
to `CMAKE_ARGS`.
-### Debug vs Release
-By default, benchmark builds as a debug library. You will see a warning in the
-output when this is the case. To build it as a release library instead, add
-`-DCMAKE_BUILD_TYPE=Release` when generating the build system files, as shown
-above. The use of `--config Release` in build commands is needed to properly
-support multi-configuration tools (like Visual Studio for example) and can be
-skipped for other build systems (like Makefile).
+## Installation Guide
-To enable link-time optimisation, also add `-DBENCHMARK_ENABLE_LTO=true` when
-generating the build system files.
+For Ubuntu and Debian Based System
-If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake
-cache variables, if autodetection fails.
+First make sure you have git and cmake installed (If not please install them)
-If you are using clang, you may need to set `LLVMAR_EXECUTABLE`,
-`LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
+```
+sudo apt-get install git cmake
+```
-### Stable and Experimental Library Versions
+Now, let's clone the repository and build it
+
+```
+git clone https://github.com/google/benchmark.git
+cd benchmark
+# If you want to build tests and don't use BENCHMARK_DOWNLOAD_DEPENDENCIES, then
+# git clone https://github.com/google/googletest.git
+mkdir build
+cd build
+cmake .. -DCMAKE_BUILD_TYPE=RELEASE
+make
+```
+
+If you need to install the library globally
+
+```
+sudo make install
+```
+
+## Stable and Experimental Library Versions
The main branch contains the latest stable version of the benchmarking library;
the API of which can be considered largely stable, with source breaking changes
@@ -149,13 +82,16 @@ to use, test, and provide feedback on the new features are encouraged to try
this branch. However, this branch provides no stability guarantees and reserves
the right to change and break the API at any time.
-## Usage
+## Further knowledge
-### Basic usage
+It may help to read the [Google Test documentation](https://github.com/google/googletest/blob/master/googletest/docs/primer.md)
+as some of the structural aspects of the APIs are similar.
-Define a function that executes the code to measure, register it as a benchmark
-function using the `BENCHMARK` macro, and ensure an appropriate `main` function
-is available:
+## Example usage
+### Basic usage
+Define a function that executes the code to be measured, register it as a
+benchmark function using the `BENCHMARK` macro, and ensure an appropriate `main`
+function is available:
```c++
#include <benchmark/benchmark.h>
@@ -178,47 +114,15 @@ BENCHMARK(BM_StringCopy);
BENCHMARK_MAIN();
```
-To run the benchmark, compile and link against the `benchmark` library
-(libbenchmark.a/.so). If you followed the build steps above, this library will
-be under the build directory you created.
-
-```bash
-# Example on linux after running the build steps above. Assumes the
-# `benchmark` and `build` directories are under the current directory.
-$ g++ mybenchmark.cc -std=c++11 -isystem benchmark/include \
- -Lbenchmark/build/src -lbenchmark -lpthread -o mybenchmark
-```
+Don't forget to inform your linker to add benchmark library e.g. through
+`-lbenchmark` compilation flag. Alternatively, you may leave out the
+`BENCHMARK_MAIN();` at the end of the source file and link against
+`-lbenchmark_main` to get the same default behavior.
-Alternatively, link against the `benchmark_main` library and remove
-`BENCHMARK_MAIN();` above to get the same behavior.
-
-The compiled executable will run all benchmarks by default. Pass the `--help`
-flag for option information or see the guide below.
-
-### Usage with CMake
-
-If using CMake, it is recommended to link against the project-provided
-`benchmark::benchmark` and `benchmark::benchmark_main` targets using
-`target_link_libraries`.
-It is possible to use ```find_package``` to import an installed version of the
-library.
-```cmake
-find_package(benchmark REQUIRED)
-```
-Alternatively, ```add_subdirectory``` will incorporate the library directly in
-to one's CMake project.
-```cmake
-add_subdirectory(benchmark)
-```
-Either way, link to the library as follows.
-```cmake
-target_link_libraries(MyTarget benchmark::benchmark)
-```
-
-## Platform Specific Build Instructions
-
-### Building with GCC
+The benchmark library will measure and report the timing for code within the
+`for(...)` loop.
+#### Platform-specific libraries
When the library is built using GCC it is necessary to link with the pthread
library due to how GCC implements `std::thread`. Failing to link to pthread will
lead to runtime exceptions (unless you're using libc++), not linker errors. See
@@ -227,282 +131,13 @@ can link to pthread by adding `-pthread` to your linker command. Note, you can
also use `-lpthread`, but there are potential issues with ordering of command
line parameters if you use that.
-### Building with Visual Studio 2015 or 2017
-
-The `shlwapi` library (`-lshlwapi`) is required to support a call to `CPUInfo` which reads the registry. Either add `shlwapi.lib` under `[ Configuration Properties > Linker > Input ]`, or use the following:
-
-```
-// Alternatively, can add libraries using linker options.
-#ifdef _WIN32
-#pragma comment ( lib, "Shlwapi.lib" )
-#ifdef _DEBUG
-#pragma comment ( lib, "benchmarkd.lib" )
-#else
-#pragma comment ( lib, "benchmark.lib" )
-#endif
-#endif
-```
-
-Can also use the graphical version of CMake:
-* Open `CMake GUI`.
-* Under `Where to build the binaries`, same path as source plus `build`.
-* Under `CMAKE_INSTALL_PREFIX`, same path as source plus `install`.
-* Click `Configure`, `Generate`, `Open Project`.
-* If build fails, try deleting entire directory and starting again, or unticking options to build less.
-
-### Building with Intel 2015 Update 1 or Intel System Studio Update 4
-
-See instructions for building with Visual Studio. Once built, right click on the solution and change the build to Intel.
-
-### Building on Solaris
+If you're running benchmarks on Windows, the shlwapi library (`-lshlwapi`) is
+also required.
If you're running benchmarks on solaris, you'll want the kstat library linked in
too (`-lkstat`).
-## User Guide
-
-### Command Line
-
-[Output Formats](#output-formats)
-
-[Output Files](#output-files)
-
-[Running Benchmarks](#running-benchmarks)
-
-[Running a Subset of Benchmarks](#running-a-subset-of-benchmarks)
-
-[Result Comparison](#result-comparison)
-
-[Extra Context](#extra-context)
-
-### Library
-
-[Runtime and Reporting Considerations](#runtime-and-reporting-considerations)
-
-[Passing Arguments](#passing-arguments)
-
-[Custom Benchmark Name](#custom-benchmark-name)
-
-[Calculating Asymptotic Complexity](#asymptotic-complexity)
-
-[Templated Benchmarks](#templated-benchmarks)
-
-[Fixtures](#fixtures)
-
-[Custom Counters](#custom-counters)
-
-[Multithreaded Benchmarks](#multithreaded-benchmarks)
-
-[CPU Timers](#cpu-timers)
-
-[Manual Timing](#manual-timing)
-
-[Setting the Time Unit](#setting-the-time-unit)
-
-[Random Interleaving](docs/random_interleaving.md)
-
-[User-Requested Performance Counters](docs/perf_counters.md)
-
-[Preventing Optimization](#preventing-optimization)
-
-[Reporting Statistics](#reporting-statistics)
-
-[Custom Statistics](#custom-statistics)
-
-[Using RegisterBenchmark](#using-register-benchmark)
-
-[Exiting with an Error](#exiting-with-an-error)
-
-[A Faster KeepRunning Loop](#a-faster-keep-running-loop)
-
-[Disabling CPU Frequency Scaling](#disabling-cpu-frequency-scaling)
-
-
-<a name="output-formats" />
-
-### Output Formats
-
-The library supports multiple output formats. Use the
-`--benchmark_format=<console|json|csv>` flag (or set the
-`BENCHMARK_FORMAT=<console|json|csv>` environment variable) to set
-the format type. `console` is the default format.
-
-The Console format is intended to be a human readable format. By default
-the format generates color output. Context is output on stderr and the
-tabular data on stdout. Example tabular output looks like:
-
-```
-Benchmark Time(ns) CPU(ns) Iterations
-----------------------------------------------------------------------
-BM_SetInsert/1024/1 28928 29349 23853 133.097kB/s 33.2742k items/s
-BM_SetInsert/1024/8 32065 32913 21375 949.487kB/s 237.372k items/s
-BM_SetInsert/1024/10 33157 33648 21431 1.13369MB/s 290.225k items/s
-```
-
-The JSON format outputs human readable json split into two top level attributes.
-The `context` attribute contains information about the run in general, including
-information about the CPU and the date.
-The `benchmarks` attribute contains a list of every benchmark run. Example json
-output looks like:
-
-```json
-{
- "context": {
- "date": "2015/03/17-18:40:25",
- "num_cpus": 40,
- "mhz_per_cpu": 2801,
- "cpu_scaling_enabled": false,
- "build_type": "debug"
- },
- "benchmarks": [
- {
- "name": "BM_SetInsert/1024/1",
- "iterations": 94877,
- "real_time": 29275,
- "cpu_time": 29836,
- "bytes_per_second": 134066,
- "items_per_second": 33516
- },
- {
- "name": "BM_SetInsert/1024/8",
- "iterations": 21609,
- "real_time": 32317,
- "cpu_time": 32429,
- "bytes_per_second": 986770,
- "items_per_second": 246693
- },
- {
- "name": "BM_SetInsert/1024/10",
- "iterations": 21393,
- "real_time": 32724,
- "cpu_time": 33355,
- "bytes_per_second": 1199226,
- "items_per_second": 299807
- }
- ]
-}
-```
-
-The CSV format outputs comma-separated values. The `context` is output on stderr
-and the CSV itself on stdout. Example CSV output looks like:
-
-```
-name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label
-"BM_SetInsert/1024/1",65465,17890.7,8407.45,475768,118942,
-"BM_SetInsert/1024/8",116606,18810.1,9766.64,3.27646e+06,819115,
-"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06,
-```
-
-<a name="output-files" />
-
-### Output Files
-
-Write benchmark results to a file with the `--benchmark_out=<filename>` option
-(or set `BENCHMARK_OUT`). Specify the output format with
-`--benchmark_out_format={json|console|csv}` (or set
-`BENCHMARK_OUT_FORMAT={json|console|csv}`). Note that the 'csv' reporter is
-deprecated and the saved `.csv` file
-[is not parsable](https://github.com/google/benchmark/issues/794) by csv
-parsers.
-
-Specifying `--benchmark_out` does not suppress the console output.
-
-<a name="running-benchmarks" />
-
-### Running Benchmarks
-
-Benchmarks are executed by running the produced binaries. Benchmarks binaries,
-by default, accept options that may be specified either through their command
-line interface or by setting environment variables before execution. For every
-`--option_flag=<value>` CLI switch, a corresponding environment variable
-`OPTION_FLAG=<value>` exist and is used as default if set (CLI switches always
- prevails). A complete list of CLI options is available running benchmarks
- with the `--help` switch.
-
-<a name="running-a-subset-of-benchmarks" />
-
-### Running a Subset of Benchmarks
-
-The `--benchmark_filter=<regex>` option (or `BENCHMARK_FILTER=<regex>`
-environment variable) can be used to only run the benchmarks that match
-the specified `<regex>`. For example:
-
-```bash
-$ ./run_benchmarks.x --benchmark_filter=BM_memcpy/32
-Run on (1 X 2300 MHz CPU )
-2016-06-25 19:34:24
-Benchmark Time CPU Iterations
-----------------------------------------------------
-BM_memcpy/32 11 ns 11 ns 79545455
-BM_memcpy/32k 2181 ns 2185 ns 324074
-BM_memcpy/32 12 ns 12 ns 54687500
-BM_memcpy/32k 1834 ns 1837 ns 357143
-```
-
-<a name="result-comparison" />
-
-### Result comparison
-
-It is possible to compare the benchmarking results.
-See [Additional Tooling Documentation](docs/tools.md)
-
-<a name="extra-context" />
-
-### Extra Context
-
-Sometimes it's useful to add extra context to the content printed before the
-results. By default this section includes information about the CPU on which
-the benchmarks are running. If you do want to add more context, you can use
-the `benchmark_context` command line flag:
-
-```bash
-$ ./run_benchmarks --benchmark_context=pwd=`pwd`
-Run on (1 x 2300 MHz CPU)
-pwd: /home/user/benchmark/
-Benchmark Time CPU Iterations
-----------------------------------------------------
-BM_memcpy/32 11 ns 11 ns 79545455
-BM_memcpy/32k 2181 ns 2185 ns 324074
-```
-
-You can get the same effect with the API:
-
-```c++
- benchmark::AddCustomContext("foo", "bar");
-```
-
-Note that attempts to add a second value with the same key will fail with an
-error message.
-
-<a name="runtime-and-reporting-considerations" />
-
-### Runtime and Reporting Considerations
-
-When the benchmark binary is executed, each benchmark function is run serially.
-The number of iterations to run is determined dynamically by running the
-benchmark a few times and measuring the time taken and ensuring that the
-ultimate result will be statistically stable. As such, faster benchmark
-functions will be run for more iterations than slower benchmark functions, and
-the number of iterations is thus reported.
-
-In all cases, the number of iterations for which the benchmark is run is
-governed by the amount of time the benchmark takes. Concretely, the number of
-iterations is at least one, not more than 1e9, until CPU time is greater than
-the minimum time, or the wallclock time is 5x minimum time. The minimum time is
-set per benchmark by calling `MinTime` on the registered benchmark object.
-
-Average timings are then reported over the iterations run. If multiple
-repetitions are requested using the `--benchmark_repetitions` command-line
-option, or at registration time, the benchmark function will be run several
-times and statistical results across these repetitions will also be reported.
-
-As well as the per-benchmark entries, a preamble in the report will include
-information about the machine on which the benchmarks are run.
-
-<a name="passing-arguments" />
-
-### Passing Arguments
-
+### Passing arguments
Sometimes a family of benchmarks can be implemented with just one routine that
takes an extra argument to specify which one of the family of benchmarks to
run. For example, the following code defines a family of benchmarks for
@@ -538,26 +173,8 @@ range multiplier is changed to multiples of two.
```c++
BENCHMARK(BM_memcpy)->RangeMultiplier(2)->Range(8, 8<<10);
```
-
Now arguments generated are [ 8, 16, 32, 64, 128, 256, 512, 1024, 2k, 4k, 8k ].
-The preceding code shows a method of defining a sparse range. The following
-example shows a method of defining a dense range. It is then used to benchmark
-the performance of `std::vector` initialization for uniformly increasing sizes.
-
-```c++
-static void BM_DenseRange(benchmark::State& state) {
- for(auto _ : state) {
- std::vector<int> v(state.range(0), state.range(0));
- benchmark::DoNotOptimize(v.data());
- benchmark::ClobberMemory();
- }
-}
-BENCHMARK(BM_DenseRange)->DenseRange(0, 1024, 128);
-```
-
-Now arguments generated are [ 0, 128, 256, 384, 512, 640, 768, 896, 1024 ].
-
You might have a benchmark that depends on two or more inputs. For example, the
following code defines a family of benchmarks for measuring the speed of set
insertion.
@@ -593,29 +210,6 @@ pair.
BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}});
```
-Some benchmarks may require specific argument values that cannot be expressed
-with `Ranges`. In this case, `ArgsProduct` offers the ability to generate a
-benchmark input for each combination in the product of the supplied vectors.
-
-```c++
-BENCHMARK(BM_SetInsert)
- ->ArgsProduct({{1<<10, 3<<10, 8<<10}, {20, 40, 60, 80}})
-// would generate the same benchmark arguments as
-BENCHMARK(BM_SetInsert)
- ->Args({1<<10, 20})
- ->Args({3<<10, 20})
- ->Args({8<<10, 20})
- ->Args({3<<10, 40})
- ->Args({8<<10, 40})
- ->Args({1<<10, 40})
- ->Args({1<<10, 60})
- ->Args({3<<10, 60})
- ->Args({8<<10, 60})
- ->Args({1<<10, 80})
- ->Args({3<<10, 80})
- ->Args({8<<10, 80});
-```
-
For more complex patterns of inputs, passing a custom function to `Apply` allows
programmatic specification of an arbitrary set of arguments on which to run the
benchmark. The following example enumerates a dense range on one parameter,
@@ -630,32 +224,7 @@ static void CustomArguments(benchmark::internal::Benchmark* b) {
BENCHMARK(BM_SetInsert)->Apply(CustomArguments);
```
-#### Passing Arbitrary Arguments to a Benchmark
-
-In C++11 it is possible to define a benchmark that takes an arbitrary number
-of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)`
-macro creates a benchmark that invokes `func` with the `benchmark::State` as
-the first argument followed by the specified `args...`.
-The `test_case_name` is appended to the name of the benchmark and
-should describe the values passed.
-
-```c++
-template <class ...ExtraArgs>
-void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) {
- [...]
-}
-// Registers a benchmark named "BM_takes_args/int_string_test" that passes
-// the specified values to `extra_args`.
-BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc"));
-```
-
-Note that elements of `...args` may refer to global variables. Users should
-avoid modifying global state inside of a benchmark.
-
-<a name="asymptotic-complexity" />
-
-### Calculating Asymptotic Complexity (Big O)
-
+### Calculate asymptotic complexity (Big O)
Asymptotic complexity might be calculated for a family of benchmarks. The
following code will calculate the coefficient for the high-order term in the
running time and the normalized root-mean square error of string comparison.
@@ -686,28 +255,13 @@ that might be used to customize high-order term calculation.
```c++
BENCHMARK(BM_StringCompare)->RangeMultiplier(2)
- ->Range(1<<10, 1<<18)->Complexity([](benchmark::IterationCount n)->double{return n; });
+ ->Range(1<<10, 1<<18)->Complexity([](int64_t n)->double{return n; });
```
-<a name="custom-benchmark-name" />
-
-### Custom Benchmark Name
-
-You can change the benchmark's name as follows:
-
-```c++
-BENCHMARK(BM_memcpy)->Name("memcpy")->RangeMultiplier(2)->Range(8, 8<<10);
-```
-
-The invocation will execute the benchmark as before using `BM_memcpy` but changes
-the prefix in the report to `memcpy`.
-
-<a name="templated-benchmarks" />
-
-### Templated Benchmarks
-
-This example produces and consumes messages of size `sizeof(v)` `range_x`
-times. It also outputs throughput in the absence of multiprogramming.
+### Templated benchmarks
+Templated benchmarks work the same way: This example produces and consumes
+messages of size `sizeof(v)` `range_x` times. It also outputs throughput in the
+absence of multiprogramming.
```c++
template <class Q> void BM_Sequential(benchmark::State& state) {
@@ -738,218 +292,110 @@ Three macros are provided for adding benchmark templates.
#define BENCHMARK_TEMPLATE2(func, arg1, arg2)
```
-<a name="fixtures" />
-
-### Fixtures
-
-Fixture tests are created by first defining a type that derives from
-`::benchmark::Fixture` and then creating/registering the tests using the
-following macros:
+### A Faster KeepRunning loop
-* `BENCHMARK_F(ClassName, Method)`
-* `BENCHMARK_DEFINE_F(ClassName, Method)`
-* `BENCHMARK_REGISTER_F(ClassName, Method)`
-
-For Example:
+In C++11 mode, a ranged-based for loop should be used in preference to
+the `KeepRunning` loop for running the benchmarks. For example:
```c++
-class MyFixture : public benchmark::Fixture {
-public:
- void SetUp(const ::benchmark::State& state) {
- }
-
- void TearDown(const ::benchmark::State& state) {
- }
-};
-
-BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) {
- for (auto _ : st) {
- ...
- }
-}
-
-BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) {
- for (auto _ : st) {
- ...
+static void BM_Fast(benchmark::State &state) {
+ for (auto _ : state) {
+ FastOperation();
}
}
-/* BarTest is NOT registered */
-BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2);
-/* BarTest is now registered */
+BENCHMARK(BM_Fast);
```
-#### Templated Fixtures
-
-Also you can create templated fixture by using the following macros:
-
-* `BENCHMARK_TEMPLATE_F(ClassName, Method, ...)`
-* `BENCHMARK_TEMPLATE_DEFINE_F(ClassName, Method, ...)`
-
-For example:
+The reason the ranged-for loop is faster than using `KeepRunning`, is
+because `KeepRunning` requires a memory load and store of the iteration count
+ever iteration, whereas the ranged-for variant is able to keep the iteration count
+in a register.
-```c++
-template<typename T>
-class MyFixture : public benchmark::Fixture {};
+For example, an empty inner loop of using the ranged-based for method looks like:
-BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) {
- for (auto _ : st) {
- ...
- }
-}
+```asm
+# Loop Init
+ mov rbx, qword ptr [r14 + 104]
+ call benchmark::State::StartKeepRunning()
+ test rbx, rbx
+ je .LoopEnd
+.LoopHeader: # =>This Inner Loop Header: Depth=1
+ add rbx, -1
+ jne .LoopHeader
+.LoopEnd:
+```
-BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) {
- for (auto _ : st) {
- ...
- }
-}
+Compared to an empty `KeepRunning` loop, which looks like:
-BENCHMARK_REGISTER_F(MyFixture, DoubleTest)->Threads(2);
+```asm
+.LoopHeader: # in Loop: Header=BB0_3 Depth=1
+ cmp byte ptr [rbx], 1
+ jne .LoopInit
+.LoopBody: # =>This Inner Loop Header: Depth=1
+ mov rax, qword ptr [rbx + 8]
+ lea rcx, [rax + 1]
+ mov qword ptr [rbx + 8], rcx
+ cmp rax, qword ptr [rbx + 104]
+ jb .LoopHeader
+ jmp .LoopEnd
+.LoopInit:
+ mov rdi, rbx
+ call benchmark::State::StartKeepRunning()
+ jmp .LoopBody
+.LoopEnd:
```
-<a name="custom-counters" />
-
-### Custom Counters
+Unless C++03 compatibility is required, the ranged-for variant of writing
+the benchmark loop should be preferred.
-You can add your own counters with user-defined names. The example below
-will add columns "Foo", "Bar" and "Baz" in its output:
+## Passing arbitrary arguments to a benchmark
+In C++11 it is possible to define a benchmark that takes an arbitrary number
+of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)`
+macro creates a benchmark that invokes `func` with the `benchmark::State` as
+the first argument followed by the specified `args...`.
+The `test_case_name` is appended to the name of the benchmark and
+should describe the values passed.
```c++
-static void UserCountersExample1(benchmark::State& state) {
- double numFoos = 0, numBars = 0, numBazs = 0;
- for (auto _ : state) {
- // ... count Foo,Bar,Baz events
- }
- state.counters["Foo"] = numFoos;
- state.counters["Bar"] = numBars;
- state.counters["Baz"] = numBazs;
+template <class ...ExtraArgs>
+void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) {
+ [...]
}
+// Registers a benchmark named "BM_takes_args/int_string_test" that passes
+// the specified values to `extra_args`.
+BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc"));
```
+Note that elements of `...args` may refer to global variables. Users should
+avoid modifying global state inside of a benchmark.
-The `state.counters` object is a `std::map` with `std::string` keys
-and `Counter` values. The latter is a `double`-like class, via an implicit
-conversion to `double&`. Thus you can use all of the standard arithmetic
-assignment operators (`=,+=,-=,*=,/=`) to change the value of each counter.
-
-In multithreaded benchmarks, each counter is set on the calling thread only.
-When the benchmark finishes, the counters from each thread will be summed;
-the resulting sum is the value which will be shown for the benchmark.
-
-The `Counter` constructor accepts three parameters: the value as a `double`
-; a bit flag which allows you to show counters as rates, and/or as per-thread
-iteration, and/or as per-thread averages, and/or iteration invariants,
-and/or finally inverting the result; and a flag specifying the 'unit' - i.e.
-is 1k a 1000 (default, `benchmark::Counter::OneK::kIs1000`), or 1024
-(`benchmark::Counter::OneK::kIs1024`)?
-
-```c++
- // sets a simple counter
- state.counters["Foo"] = numFoos;
-
- // Set the counter as a rate. It will be presented divided
- // by the duration of the benchmark.
- // Meaning: per one second, how many 'foo's are processed?
- state.counters["FooRate"] = Counter(numFoos, benchmark::Counter::kIsRate);
-
- // Set the counter as a rate. It will be presented divided
- // by the duration of the benchmark, and the result inverted.
- // Meaning: how many seconds it takes to process one 'foo'?
- state.counters["FooInvRate"] = Counter(numFoos, benchmark::Counter::kIsRate | benchmark::Counter::kInvert);
-
- // Set the counter as a thread-average quantity. It will
- // be presented divided by the number of threads.
- state.counters["FooAvg"] = Counter(numFoos, benchmark::Counter::kAvgThreads);
+## Using RegisterBenchmark(name, fn, args...)
- // There's also a combined flag:
- state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate);
+The `RegisterBenchmark(name, func, args...)` function provides an alternative
+way to create and register benchmarks.
+`RegisterBenchmark(name, func, args...)` creates, registers, and returns a
+pointer to a new benchmark with the specified `name` that invokes
+`func(st, args...)` where `st` is a `benchmark::State` object.
- // This says that we process with the rate of state.range(0) bytes every iteration:
- state.counters["BytesProcessed"] = Counter(state.range(0), benchmark::Counter::kIsIterationInvariantRate, benchmark::Counter::OneK::kIs1024);
-```
+Unlike the `BENCHMARK` registration macros, which can only be used at the global
+scope, the `RegisterBenchmark` can be called anywhere. This allows for
+benchmark tests to be registered programmatically.
-When you're compiling in C++11 mode or later you can use `insert()` with
-`std::initializer_list`:
+Additionally `RegisterBenchmark` allows any callable object to be registered
+as a benchmark. Including capturing lambdas and function objects.
+For Example:
```c++
- // With C++11, this can be done:
- state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}});
- // ... instead of:
- state.counters["Foo"] = numFoos;
- state.counters["Bar"] = numBars;
- state.counters["Baz"] = numBazs;
-```
-
-#### Counter Reporting
-
-When using the console reporter, by default, user counters are printed at
-the end after the table, the same way as ``bytes_processed`` and
-``items_processed``. This is best for cases in which there are few counters,
-or where there are only a couple of lines per benchmark. Here's an example of
-the default output:
-
-```
-------------------------------------------------------------------------------
-Benchmark Time CPU Iterations UserCounters...
-------------------------------------------------------------------------------
-BM_UserCounter/threads:8 2248 ns 10277 ns 68808 Bar=16 Bat=40 Baz=24 Foo=8
-BM_UserCounter/threads:1 9797 ns 9788 ns 71523 Bar=2 Bat=5 Baz=3 Foo=1024m
-BM_UserCounter/threads:2 4924 ns 9842 ns 71036 Bar=4 Bat=10 Baz=6 Foo=2
-BM_UserCounter/threads:4 2589 ns 10284 ns 68012 Bar=8 Bat=20 Baz=12 Foo=4
-BM_UserCounter/threads:8 2212 ns 10287 ns 68040 Bar=16 Bat=40 Baz=24 Foo=8
-BM_UserCounter/threads:16 1782 ns 10278 ns 68144 Bar=32 Bat=80 Baz=48 Foo=16
-BM_UserCounter/threads:32 1291 ns 10296 ns 68256 Bar=64 Bat=160 Baz=96 Foo=32
-BM_UserCounter/threads:4 2615 ns 10307 ns 68040 Bar=8 Bat=20 Baz=12 Foo=4
-BM_Factorial 26 ns 26 ns 26608979 40320
-BM_Factorial/real_time 26 ns 26 ns 26587936 40320
-BM_CalculatePiRange/1 16 ns 16 ns 45704255 0
-BM_CalculatePiRange/8 73 ns 73 ns 9520927 3.28374
-BM_CalculatePiRange/64 609 ns 609 ns 1140647 3.15746
-BM_CalculatePiRange/512 4900 ns 4901 ns 142696 3.14355
-```
-
-If this doesn't suit you, you can print each counter as a table column by
-passing the flag `--benchmark_counters_tabular=true` to the benchmark
-application. This is best for cases in which there are a lot of counters, or
-a lot of lines per individual benchmark. Note that this will trigger a
-reprinting of the table header any time the counter set changes between
-individual benchmarks. Here's an example of corresponding output when
-`--benchmark_counters_tabular=true` is passed:
+auto BM_test = [](benchmark::State& st, auto Inputs) { /* ... */ };
-```
----------------------------------------------------------------------------------------
-Benchmark Time CPU Iterations Bar Bat Baz Foo
----------------------------------------------------------------------------------------
-BM_UserCounter/threads:8 2198 ns 9953 ns 70688 16 40 24 8
-BM_UserCounter/threads:1 9504 ns 9504 ns 73787 2 5 3 1
-BM_UserCounter/threads:2 4775 ns 9550 ns 72606 4 10 6 2
-BM_UserCounter/threads:4 2508 ns 9951 ns 70332 8 20 12 4
-BM_UserCounter/threads:8 2055 ns 9933 ns 70344 16 40 24 8
-BM_UserCounter/threads:16 1610 ns 9946 ns 70720 32 80 48 16
-BM_UserCounter/threads:32 1192 ns 9948 ns 70496 64 160 96 32
-BM_UserCounter/threads:4 2506 ns 9949 ns 70332 8 20 12 4
---------------------------------------------------------------
-Benchmark Time CPU Iterations
---------------------------------------------------------------
-BM_Factorial 26 ns 26 ns 26392245 40320
-BM_Factorial/real_time 26 ns 26 ns 26494107 40320
-BM_CalculatePiRange/1 15 ns 15 ns 45571597 0
-BM_CalculatePiRange/8 74 ns 74 ns 9450212 3.28374
-BM_CalculatePiRange/64 595 ns 595 ns 1173901 3.15746
-BM_CalculatePiRange/512 4752 ns 4752 ns 147380 3.14355
-BM_CalculatePiRange/4k 37970 ns 37972 ns 18453 3.14184
-BM_CalculatePiRange/32k 303733 ns 303744 ns 2305 3.14162
-BM_CalculatePiRange/256k 2434095 ns 2434186 ns 288 3.1416
-BM_CalculatePiRange/1024k 9721140 ns 9721413 ns 71 3.14159
-BM_CalculatePi/threads:8 2255 ns 9943 ns 70936
+int main(int argc, char** argv) {
+ for (auto& test_input : { /* ... */ })
+ benchmark::RegisterBenchmark(test_input.name(), BM_test, test_input);
+ benchmark::Initialize(&argc, argv);
+ benchmark::RunSpecifiedBenchmarks();
+}
```
-Note above the additional header printed when the benchmark changes from
-``BM_UserCounter`` to ``BM_Factorial``. This is because ``BM_Factorial`` does
-not have the same counter set as ``BM_UserCounter``.
-
-<a name="multithreaded-benchmarks"/>
-
-### Multithreaded Benchmarks
-
+### Multithreaded benchmarks
In a multithreaded test (benchmark invoked by multiple threads simultaneously),
it is guaranteed that none of the threads will start until all have reached
the start of the benchmark loop, and all will have finished before any thread
@@ -982,58 +428,11 @@ BENCHMARK(BM_test)->Range(8, 8<<10)->UseRealTime();
Without `UseRealTime`, CPU time is used by default.
-<a name="cpu-timers" />
-
-### CPU Timers
-
-By default, the CPU timer only measures the time spent by the main thread.
-If the benchmark itself uses threads internally, this measurement may not
-be what you are looking for. Instead, there is a way to measure the total
-CPU usage of the process, by all the threads.
-
-```c++
-void callee(int i);
-
-static void MyMain(int size) {
-#pragma omp parallel for
- for(int i = 0; i < size; i++)
- callee(i);
-}
-
-static void BM_OpenMP(benchmark::State& state) {
- for (auto _ : state)
- MyMain(state.range(0));
-}
-
-// Measure the time spent by the main thread, use it to decide for how long to
-// run the benchmark loop. Depending on the internal implementation detail may
-// measure to anywhere from near-zero (the overhead spent before/after work
-// handoff to worker thread[s]) to the whole single-thread time.
-BENCHMARK(BM_OpenMP)->Range(8, 8<<10);
-
-// Measure the user-visible time, the wall clock (literally, the time that
-// has passed on the clock on the wall), use it to decide for how long to
-// run the benchmark loop. This will always be meaningful, an will match the
-// time spent by the main thread in single-threaded case, in general decreasing
-// with the number of internal threads doing the work.
-BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->UseRealTime();
-
-// Measure the total CPU consumption, use it to decide for how long to
-// run the benchmark loop. This will always measure to no less than the
-// time spent by the main thread in single-threaded case.
-BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime();
-
-// A mixture of the last two. Measure the total CPU consumption, but use the
-// wall clock to decide for how long to run the benchmark loop.
-BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime()->UseRealTime();
-```
-
-#### Controlling Timers
-
+## Controlling timers
Normally, the entire duration of the work loop (`for (auto _ : state) {}`)
-is measured. But sometimes, it is necessary to do some work inside of
+is measured. But sometimes, it is nessesary to do some work inside of
that loop, every iteration, but without counting that time to the benchmark time.
-That is possible, although it is not recommended, since it has high overhead.
+That is possible, althought it is not recommended, since it has high overhead.
```c++
static void BM_SetInsert_With_Timer_Control(benchmark::State& state) {
@@ -1050,10 +449,7 @@ static void BM_SetInsert_With_Timer_Control(benchmark::State& state) {
BENCHMARK(BM_SetInsert_With_Timer_Control)->Ranges({{1<<10, 8<<10}, {128, 512}});
```
-<a name="manual-timing" />
-
-### Manual Timing
-
+## Manual timing
For benchmarking something for which neither CPU time nor real-time are
correct or accurate enough, completely manual timing is supported using
the `UseManualTime` function.
@@ -1079,7 +475,7 @@ static void BM_ManualTiming(benchmark::State& state) {
auto start = std::chrono::high_resolution_clock::now();
// Simulate some useful workload with a sleep
std::this_thread::sleep_for(sleep_duration);
- auto end = std::chrono::high_resolution_clock::now();
+ auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
@@ -1091,22 +487,7 @@ static void BM_ManualTiming(benchmark::State& state) {
BENCHMARK(BM_ManualTiming)->Range(1, 1<<17)->UseManualTime();
```
-<a name="setting-the-time-unit" />
-
-### Setting the Time Unit
-
-If a benchmark runs a few milliseconds it may be hard to visually compare the
-measured times, since the output data is given in nanoseconds per default. In
-order to manually set the time unit, you can specify it manually:
-
-```c++
-BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
-```
-
-<a name="preventing-optimization" />
-
-### Preventing Optimization
-
+### Preventing optimisation
To prevent a value or expression from being optimized away by the compiler
the `benchmark::DoNotOptimize(...)` and `benchmark::ClobberMemory()`
functions can be used.
@@ -1164,10 +545,16 @@ static void BM_vector_push_back(benchmark::State& state) {
Note that `ClobberMemory()` is only available for GNU or MSVC based compilers.
-<a name="reporting-statistics" />
+### Set time unit manually
+If a benchmark runs a few milliseconds it may be hard to visually compare the
+measured times, since the output data is given in nanoseconds per default. In
+order to manually set the time unit, you can specify it manually:
-### Statistics: Reporting the Mean, Median and Standard Deviation of Repeated Benchmarks
+```c++
+BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
+```
+### Reporting the mean, median and standard deviation by repeated benchmarks
By default each benchmark is run once and that single result is reported.
However benchmarks are often noisy and a single result may not be representative
of the overall behavior. For this reason it's possible to repeatedly rerun the
@@ -1193,13 +580,10 @@ Calling `ReportAggregatesOnly(bool)` / `DisplayAggregatesOnly(bool)` on a
registered benchmark object overrides the value of the appropriate flag for that
benchmark.
-<a name="custom-statistics" />
-
-### Custom Statistics
-
+## User-defined statistics for repeated benchmarks
While having mean, median and standard deviation is nice, this may not be
-enough for everyone. For example you may want to know what the largest
-observation is, e.g. because you have some real-time constraints. This is easy.
+enough for everyone. For example you may want to know what is the largest
+observation, e.g. because you have some real-time constraints. This is easy.
The following code will specify a custom statistic to be calculated, defined
by a lambda function.
@@ -1219,39 +603,194 @@ BENCHMARK(BM_spin_empty)
->Arg(512);
```
-<a name="using-register-benchmark" />
+## Fixtures
+Fixture tests are created by
+first defining a type that derives from `::benchmark::Fixture` and then
+creating/registering the tests using the following macros:
-### Using RegisterBenchmark(name, fn, args...)
+* `BENCHMARK_F(ClassName, Method)`
+* `BENCHMARK_DEFINE_F(ClassName, Method)`
+* `BENCHMARK_REGISTER_F(ClassName, Method)`
-The `RegisterBenchmark(name, func, args...)` function provides an alternative
-way to create and register benchmarks.
-`RegisterBenchmark(name, func, args...)` creates, registers, and returns a
-pointer to a new benchmark with the specified `name` that invokes
-`func(st, args...)` where `st` is a `benchmark::State` object.
+For Example:
-Unlike the `BENCHMARK` registration macros, which can only be used at the global
-scope, the `RegisterBenchmark` can be called anywhere. This allows for
-benchmark tests to be registered programmatically.
+```c++
+class MyFixture : public benchmark::Fixture {};
-Additionally `RegisterBenchmark` allows any callable object to be registered
-as a benchmark. Including capturing lambdas and function objects.
+BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) {
+ for (auto _ : st) {
+ ...
+ }
+}
-For Example:
+BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) {
+ for (auto _ : st) {
+ ...
+ }
+}
+/* BarTest is NOT registered */
+BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2);
+/* BarTest is now registered */
+```
+
+### Templated fixtures
+Also you can create templated fixture by using the following macros:
+
+* `BENCHMARK_TEMPLATE_F(ClassName, Method, ...)`
+* `BENCHMARK_TEMPLATE_DEFINE_F(ClassName, Method, ...)`
+
+For example:
```c++
-auto BM_test = [](benchmark::State& st, auto Inputs) { /* ... */ };
+template<typename T>
+class MyFixture : public benchmark::Fixture {};
-int main(int argc, char** argv) {
- for (auto& test_input : { /* ... */ })
- benchmark::RegisterBenchmark(test_input.name(), BM_test, test_input);
- benchmark::Initialize(&argc, argv);
- benchmark::RunSpecifiedBenchmarks();
- benchmark::Shutdown();
+BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) {
+ for (auto _ : st) {
+ ...
+ }
+}
+
+BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) {
+ for (auto _ : st) {
+ ...
+ }
+}
+
+BENCHMARK_REGISTER_F(MyFixture, DoubleTest)->Threads(2);
+```
+
+## User-defined counters
+
+You can add your own counters with user-defined names. The example below
+will add columns "Foo", "Bar" and "Baz" in its output:
+
+```c++
+static void UserCountersExample1(benchmark::State& state) {
+ double numFoos = 0, numBars = 0, numBazs = 0;
+ for (auto _ : state) {
+ // ... count Foo,Bar,Baz events
+ }
+ state.counters["Foo"] = numFoos;
+ state.counters["Bar"] = numBars;
+ state.counters["Baz"] = numBazs;
}
```
-<a name="exiting-with-an-error" />
+The `state.counters` object is a `std::map` with `std::string` keys
+and `Counter` values. The latter is a `double`-like class, via an implicit
+conversion to `double&`. Thus you can use all of the standard arithmetic
+assignment operators (`=,+=,-=,*=,/=`) to change the value of each counter.
+
+In multithreaded benchmarks, each counter is set on the calling thread only.
+When the benchmark finishes, the counters from each thread will be summed;
+the resulting sum is the value which will be shown for the benchmark.
+
+The `Counter` constructor accepts three parameters: the value as a `double`
+; a bit flag which allows you to show counters as rates, and/or as per-thread
+iteration, and/or as per-thread averages, and/or iteration invariants;
+and a flag specifying the 'unit' - i.e. is 1k a 1000 (default,
+`benchmark::Counter::OneK::kIs1000`), or 1024
+(`benchmark::Counter::OneK::kIs1024`)?
+
+```c++
+ // sets a simple counter
+ state.counters["Foo"] = numFoos;
+
+ // Set the counter as a rate. It will be presented divided
+ // by the duration of the benchmark.
+ state.counters["FooRate"] = Counter(numFoos, benchmark::Counter::kIsRate);
+
+ // Set the counter as a thread-average quantity. It will
+ // be presented divided by the number of threads.
+ state.counters["FooAvg"] = Counter(numFoos, benchmark::Counter::kAvgThreads);
+
+ // There's also a combined flag:
+ state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate);
+
+ // This says that we process with the rate of state.range(0) bytes every iteration:
+ state.counters["BytesProcessed"] = Counter(state.range(0), benchmark::Counter::kIsIterationInvariantRate, benchmark::Counter::OneK::kIs1024);
+```
+
+When you're compiling in C++11 mode or later you can use `insert()` with
+`std::initializer_list`:
+
+```c++
+ // With C++11, this can be done:
+ state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}});
+ // ... instead of:
+ state.counters["Foo"] = numFoos;
+ state.counters["Bar"] = numBars;
+ state.counters["Baz"] = numBazs;
+```
+
+### Counter reporting
+
+When using the console reporter, by default, user counters are are printed at
+the end after the table, the same way as ``bytes_processed`` and
+``items_processed``. This is best for cases in which there are few counters,
+or where there are only a couple of lines per benchmark. Here's an example of
+the default output:
+
+```
+------------------------------------------------------------------------------
+Benchmark Time CPU Iterations UserCounters...
+------------------------------------------------------------------------------
+BM_UserCounter/threads:8 2248 ns 10277 ns 68808 Bar=16 Bat=40 Baz=24 Foo=8
+BM_UserCounter/threads:1 9797 ns 9788 ns 71523 Bar=2 Bat=5 Baz=3 Foo=1024m
+BM_UserCounter/threads:2 4924 ns 9842 ns 71036 Bar=4 Bat=10 Baz=6 Foo=2
+BM_UserCounter/threads:4 2589 ns 10284 ns 68012 Bar=8 Bat=20 Baz=12 Foo=4
+BM_UserCounter/threads:8 2212 ns 10287 ns 68040 Bar=16 Bat=40 Baz=24 Foo=8
+BM_UserCounter/threads:16 1782 ns 10278 ns 68144 Bar=32 Bat=80 Baz=48 Foo=16
+BM_UserCounter/threads:32 1291 ns 10296 ns 68256 Bar=64 Bat=160 Baz=96 Foo=32
+BM_UserCounter/threads:4 2615 ns 10307 ns 68040 Bar=8 Bat=20 Baz=12 Foo=4
+BM_Factorial 26 ns 26 ns 26608979 40320
+BM_Factorial/real_time 26 ns 26 ns 26587936 40320
+BM_CalculatePiRange/1 16 ns 16 ns 45704255 0
+BM_CalculatePiRange/8 73 ns 73 ns 9520927 3.28374
+BM_CalculatePiRange/64 609 ns 609 ns 1140647 3.15746
+BM_CalculatePiRange/512 4900 ns 4901 ns 142696 3.14355
+```
+
+If this doesn't suit you, you can print each counter as a table column by
+passing the flag `--benchmark_counters_tabular=true` to the benchmark
+application. This is best for cases in which there are a lot of counters, or
+a lot of lines per individual benchmark. Note that this will trigger a
+reprinting of the table header any time the counter set changes between
+individual benchmarks. Here's an example of corresponding output when
+`--benchmark_counters_tabular=true` is passed:
-### Exiting with an Error
+```
+---------------------------------------------------------------------------------------
+Benchmark Time CPU Iterations Bar Bat Baz Foo
+---------------------------------------------------------------------------------------
+BM_UserCounter/threads:8 2198 ns 9953 ns 70688 16 40 24 8
+BM_UserCounter/threads:1 9504 ns 9504 ns 73787 2 5 3 1
+BM_UserCounter/threads:2 4775 ns 9550 ns 72606 4 10 6 2
+BM_UserCounter/threads:4 2508 ns 9951 ns 70332 8 20 12 4
+BM_UserCounter/threads:8 2055 ns 9933 ns 70344 16 40 24 8
+BM_UserCounter/threads:16 1610 ns 9946 ns 70720 32 80 48 16
+BM_UserCounter/threads:32 1192 ns 9948 ns 70496 64 160 96 32
+BM_UserCounter/threads:4 2506 ns 9949 ns 70332 8 20 12 4
+--------------------------------------------------------------
+Benchmark Time CPU Iterations
+--------------------------------------------------------------
+BM_Factorial 26 ns 26 ns 26392245 40320
+BM_Factorial/real_time 26 ns 26 ns 26494107 40320
+BM_CalculatePiRange/1 15 ns 15 ns 45571597 0
+BM_CalculatePiRange/8 74 ns 74 ns 9450212 3.28374
+BM_CalculatePiRange/64 595 ns 595 ns 1173901 3.15746
+BM_CalculatePiRange/512 4752 ns 4752 ns 147380 3.14355
+BM_CalculatePiRange/4k 37970 ns 37972 ns 18453 3.14184
+BM_CalculatePiRange/32k 303733 ns 303744 ns 2305 3.14162
+BM_CalculatePiRange/256k 2434095 ns 2434186 ns 288 3.1416
+BM_CalculatePiRange/1024k 9721140 ns 9721413 ns 71 3.14159
+BM_CalculatePi/threads:8 2255 ns 9943 ns 70936
+```
+Note above the additional header printed when the benchmark changes from
+``BM_UserCounter`` to ``BM_Factorial``. This is because ``BM_Factorial`` does
+not have the same counter set as ``BM_UserCounter``.
+
+## Exiting Benchmarks in Error
When errors caused by external influences, such as file I/O and network
communication, occur within a benchmark the
@@ -1262,9 +801,7 @@ Users must explicitly exit the loop, otherwise all iterations will be performed.
Users may explicitly return to exit the benchmark immediately.
The `SkipWithError(...)` function may be used at any point within the benchmark,
-including before and after the benchmark loop. Moreover, if `SkipWithError(...)`
-has been used, it is not required to reach the benchmark loop and one may return
-from the benchmark function early.
+including before and after the benchmark loop.
For example:
@@ -1272,105 +809,188 @@ For example:
static void BM_test(benchmark::State& state) {
auto resource = GetResource();
if (!resource.good()) {
- state.SkipWithError("Resource is not good!");
- // KeepRunning() loop will not be entered.
+ state.SkipWithError("Resource is not good!");
+ // KeepRunning() loop will not be entered.
}
- while (state.KeepRunning()) {
- auto data = resource.read_data();
- if (!resource.good()) {
- state.SkipWithError("Failed to read data!");
- break; // Needed to skip the rest of the iteration.
- }
- do_stuff(data);
+ for (state.KeepRunning()) {
+ auto data = resource.read_data();
+ if (!resource.good()) {
+ state.SkipWithError("Failed to read data!");
+ break; // Needed to skip the rest of the iteration.
+ }
+ do_stuff(data);
}
}
static void BM_test_ranged_fo(benchmark::State & state) {
- auto resource = GetResource();
- if (!resource.good()) {
- state.SkipWithError("Resource is not good!");
- return; // Early return is allowed when SkipWithError() has been used.
- }
+ state.SkipWithError("test will not be entered");
for (auto _ : state) {
- auto data = resource.read_data();
- if (!resource.good()) {
- state.SkipWithError("Failed to read data!");
- break; // REQUIRED to prevent all further iterations.
- }
- do_stuff(data);
+ state.SkipWithError("Failed!");
+ break; // REQUIRED to prevent all further iterations.
}
}
```
-<a name="a-faster-keep-running-loop" />
-### A Faster KeepRunning Loop
+## Running a subset of the benchmarks
-In C++11 mode, a ranged-based for loop should be used in preference to
-the `KeepRunning` loop for running the benchmarks. For example:
+The `--benchmark_filter=<regex>` option can be used to only run the benchmarks
+which match the specified `<regex>`. For example:
-```c++
-static void BM_Fast(benchmark::State &state) {
- for (auto _ : state) {
- FastOperation();
- }
+```bash
+$ ./run_benchmarks.x --benchmark_filter=BM_memcpy/32
+Run on (1 X 2300 MHz CPU )
+2016-06-25 19:34:24
+Benchmark Time CPU Iterations
+----------------------------------------------------
+BM_memcpy/32 11 ns 11 ns 79545455
+BM_memcpy/32k 2181 ns 2185 ns 324074
+BM_memcpy/32 12 ns 12 ns 54687500
+BM_memcpy/32k 1834 ns 1837 ns 357143
+```
+
+## Runtime and reporting considerations
+When the benchmark binary is executed, each benchmark function is run serially.
+The number of iterations to run is determined dynamically by running the
+benchmark a few times and measuring the time taken and ensuring that the
+ultimate result will be statistically stable. As such, faster benchmark
+functions will be run for more iterations than slower benchmark functions, and
+the number of iterations is thus reported.
+
+In all cases, the number of iterations for which the benchmark is run is
+governed by the amount of time the benchmark takes. Concretely, the number of
+iterations is at least one, not more than 1e9, until CPU time is greater than
+the minimum time, or the wallclock time is 5x minimum time. The minimum time is
+set per benchmark by calling `MinTime` on the registered benchmark object.
+
+Average timings are then reported over the iterations run. If multiple
+repetitions are requested using the `--benchmark_repetitions` command-line
+option, or at registration time, the benchmark function will be run several
+times and statistical results across these repetitions will also be reported.
+
+As well as the per-benchmark entries, a preamble in the report will include
+information about the machine on which the benchmarks are run.
+
+### Output Formats
+The library supports multiple output formats. Use the
+`--benchmark_format=<console|json|csv>` flag to set the format type. `console`
+is the default format.
+
+The Console format is intended to be a human readable format. By default
+the format generates color output. Context is output on stderr and the
+tabular data on stdout. Example tabular output looks like:
+```
+Benchmark Time(ns) CPU(ns) Iterations
+----------------------------------------------------------------------
+BM_SetInsert/1024/1 28928 29349 23853 133.097kB/s 33.2742k items/s
+BM_SetInsert/1024/8 32065 32913 21375 949.487kB/s 237.372k items/s
+BM_SetInsert/1024/10 33157 33648 21431 1.13369MB/s 290.225k items/s
+```
+
+The JSON format outputs human readable json split into two top level attributes.
+The `context` attribute contains information about the run in general, including
+information about the CPU and the date.
+The `benchmarks` attribute contains a list of every benchmark run. Example json
+output looks like:
+```json
+{
+ "context": {
+ "date": "2015/03/17-18:40:25",
+ "num_cpus": 40,
+ "mhz_per_cpu": 2801,
+ "cpu_scaling_enabled": false,
+ "build_type": "debug"
+ },
+ "benchmarks": [
+ {
+ "name": "BM_SetInsert/1024/1",
+ "iterations": 94877,
+ "real_time": 29275,
+ "cpu_time": 29836,
+ "bytes_per_second": 134066,
+ "items_per_second": 33516
+ },
+ {
+ "name": "BM_SetInsert/1024/8",
+ "iterations": 21609,
+ "real_time": 32317,
+ "cpu_time": 32429,
+ "bytes_per_second": 986770,
+ "items_per_second": 246693
+ },
+ {
+ "name": "BM_SetInsert/1024/10",
+ "iterations": 21393,
+ "real_time": 32724,
+ "cpu_time": 33355,
+ "bytes_per_second": 1199226,
+ "items_per_second": 299807
+ }
+ ]
}
-BENCHMARK(BM_Fast);
```
-The reason the ranged-for loop is faster than using `KeepRunning`, is
-because `KeepRunning` requires a memory load and store of the iteration count
-ever iteration, whereas the ranged-for variant is able to keep the iteration count
-in a register.
+The CSV format outputs comma-separated values. The `context` is output on stderr
+and the CSV itself on stdout. Example CSV output looks like:
+```
+name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label
+"BM_SetInsert/1024/1",65465,17890.7,8407.45,475768,118942,
+"BM_SetInsert/1024/8",116606,18810.1,9766.64,3.27646e+06,819115,
+"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06,
+```
-For example, an empty inner loop of using the ranged-based for method looks like:
+### Output Files
+The library supports writing the output of the benchmark to a file specified
+by `--benchmark_out=<filename>`. The format of the output can be specified
+using `--benchmark_out_format={json|console|csv}`. Specifying
+`--benchmark_out` does not suppress the console output.
+
+## Result comparison
+
+It is possible to compare the benchmarking results. See [Additional Tooling Documentation](docs/tools.md)
+
+## Debug vs Release
+By default, benchmark builds as a debug library. You will see a warning in the
+output when this is the case. To build it as a release library instead, use:
-```asm
-# Loop Init
- mov rbx, qword ptr [r14 + 104]
- call benchmark::State::StartKeepRunning()
- test rbx, rbx
- je .LoopEnd
-.LoopHeader: # =>This Inner Loop Header: Depth=1
- add rbx, -1
- jne .LoopHeader
-.LoopEnd:
+```
+cmake -DCMAKE_BUILD_TYPE=Release
```
-Compared to an empty `KeepRunning` loop, which looks like:
+To enable link-time optimisation, use
-```asm
-.LoopHeader: # in Loop: Header=BB0_3 Depth=1
- cmp byte ptr [rbx], 1
- jne .LoopInit
-.LoopBody: # =>This Inner Loop Header: Depth=1
- mov rax, qword ptr [rbx + 8]
- lea rcx, [rax + 1]
- mov qword ptr [rbx + 8], rcx
- cmp rax, qword ptr [rbx + 104]
- jb .LoopHeader
- jmp .LoopEnd
-.LoopInit:
- mov rdi, rbx
- call benchmark::State::StartKeepRunning()
- jmp .LoopBody
-.LoopEnd:
+```
+cmake -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_LTO=true
```
-Unless C++03 compatibility is required, the ranged-for variant of writing
-the benchmark loop should be preferred.
+If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake
+cache variables, if autodetection fails.
-<a name="disabling-cpu-frequency-scaling" />
+If you are using clang, you may need to set `LLVMAR_EXECUTABLE`,
+`LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
-### Disabling CPU Frequency Scaling
+## Compiler Support
-If you see this error:
+Google Benchmark uses C++11 when building the library. As such we require
+a modern C++ toolchain, both compiler and standard library.
+
+The following minimum versions are strongly recommended build the library:
+
+* GCC 4.8
+* Clang 3.4
+* Visual Studio 2013
+* Intel 2015 Update 1
+
+Anything older *may* work.
+
+Note: Using the library and its headers in C++03 is supported. C++11 is only
+required to build the library.
+## Disable CPU frequency scaling
+If you see this error:
```
***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead.
```
-
you might want to disable the CPU frequency scaling while running the benchmark:
-
```bash
sudo cpupower frequency-set --governor performance
./mybench
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/WORKSPACE b/gnu/llvm/libcxx/utils/google-benchmark/WORKSPACE
index 631f3ba05de..54734f1ea55 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/WORKSPACE
+++ b/gnu/llvm/libcxx/utils/google-benchmark/WORKSPACE
@@ -1,51 +1,7 @@
workspace(name = "com_github_google_benchmark")
-load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
-
-http_archive(
- name = "rules_cc",
- strip_prefix = "rules_cc-a508235df92e71d537fcbae0c7c952ea6957a912",
- urls = ["https://github.com/bazelbuild/rules_cc/archive/a508235df92e71d537fcbae0c7c952ea6957a912.zip"],
- sha256 = "d7dc12c1d5bc1a87474de8e3d17b7731a4dcebcfb8aa3990fe8ac7734ef12f2f",
-)
-
-http_archive(
- name = "com_google_absl",
- sha256 = "f41868f7a938605c92936230081175d1eae87f6ea2c248f41077c8f88316f111",
- strip_prefix = "abseil-cpp-20200225.2",
- urls = ["https://github.com/abseil/abseil-cpp/archive/20200225.2.tar.gz"],
-)
-
-http_archive(
- name = "com_google_googletest",
- strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e",
- urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"],
- sha256 = "8f827dd550db8b4fdf73904690df0be9fccc161017c9038a724bc9a0617a1bc8",
-)
-
http_archive(
- name = "pybind11",
- build_file = "@//bindings/python:pybind11.BUILD",
- sha256 = "1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d",
- strip_prefix = "pybind11-2.4.3",
- urls = ["https://github.com/pybind/pybind11/archive/v2.4.3.tar.gz"],
-)
-
-new_local_repository(
- name = "python_headers",
- build_file = "@//bindings/python:python_headers.BUILD",
- path = "/usr/include/python3.6", # May be overwritten by setup.py.
-)
-
-http_archive(
- name = "rules_python",
- url = "https://github.com/bazelbuild/rules_python/releases/download/0.1.0/rules_python-0.1.0.tar.gz",
- sha256 = "b6d46438523a3ec0f3cead544190ee13223a52f6a6765a29eae7b7cc24cc83a0",
-)
-
-load("@rules_python//python:pip.bzl", pip3_install="pip_install")
-
-pip3_install(
- name = "py_deps",
- requirements = "//:requirements.txt",
+ name = "com_google_googletest",
+ urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"],
+ strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e",
)
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/appveyor.yml b/gnu/llvm/libcxx/utils/google-benchmark/appveyor.yml
index 81da955f028..cf240190bea 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/appveyor.yml
+++ b/gnu/llvm/libcxx/utils/google-benchmark/appveyor.yml
@@ -41,7 +41,7 @@ build_script:
- cmake --build . --config %configuration%
test_script:
- - ctest --build-config %configuration% --timeout 300 --output-on-failure
+ - ctest -c %configuration% --timeout 300 --output-on-failure
artifacts:
- path: '_build/CMakeFiles/*.log'
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/cmake/AddCXXCompilerFlag.cmake b/gnu/llvm/libcxx/utils/google-benchmark/cmake/AddCXXCompilerFlag.cmake
index 858589e9775..d0d20998144 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/cmake/AddCXXCompilerFlag.cmake
+++ b/gnu/llvm/libcxx/utils/google-benchmark/cmake/AddCXXCompilerFlag.cmake
@@ -34,11 +34,9 @@ function(add_cxx_compiler_flag FLAG)
check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
if(${MANGLED_FLAG})
- if(ARGC GREATER 1)
- set(VARIANT ${ARGV1})
+ set(VARIANT ${ARGV1})
+ if(ARGV1)
string(TOUPPER "_${VARIANT}" VARIANT)
- else()
- set(VARIANT "")
endif()
set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${BENCHMARK_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
endif()
@@ -51,11 +49,9 @@ function(add_required_cxx_compiler_flag FLAG)
check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
if(${MANGLED_FLAG})
- if(ARGC GREATER 1)
- set(VARIANT ${ARGV1})
+ set(VARIANT ${ARGV1})
+ if(ARGV1)
string(TOUPPER "_${VARIANT}" VARIANT)
- else()
- set(VARIANT "")
endif()
set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE)
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/cmake/CXXFeatureCheck.cmake b/gnu/llvm/libcxx/utils/google-benchmark/cmake/CXXFeatureCheck.cmake
index 62e6741fe3d..99b56dd6239 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/cmake/CXXFeatureCheck.cmake
+++ b/gnu/llvm/libcxx/utils/google-benchmark/cmake/CXXFeatureCheck.cmake
@@ -27,11 +27,6 @@ function(cxx_feature_check FILE)
return()
endif()
- if (ARGC GREATER 1)
- message(STATUS "Enabling additional flags: ${ARGV1}")
- list(APPEND BENCHMARK_CXX_LINKER_FLAGS ${ARGV1})
- endif()
-
if (NOT DEFINED COMPILE_${FEATURE})
message(STATUS "Performing Test ${FEATURE}")
if(CMAKE_CROSSCOMPILING)
@@ -42,9 +37,9 @@ function(cxx_feature_check FILE)
if(COMPILE_${FEATURE})
message(WARNING
"If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0")
- set(RUN_${FEATURE} 0 CACHE INTERNAL "")
+ set(RUN_${FEATURE} 0)
else()
- set(RUN_${FEATURE} 1 CACHE INTERNAL "")
+ set(RUN_${FEATURE} 1)
endif()
else()
message(STATUS "Performing Test ${FEATURE}")
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/cmake/GetGitVersion.cmake b/gnu/llvm/libcxx/utils/google-benchmark/cmake/GetGitVersion.cmake
index 04a1f9b70d6..4f10f226d7a 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/cmake/GetGitVersion.cmake
+++ b/gnu/llvm/libcxx/utils/google-benchmark/cmake/GetGitVersion.cmake
@@ -20,20 +20,16 @@ set(__get_git_version INCLUDED)
function(get_git_version var)
if(GIT_EXECUTABLE)
- execute_process(COMMAND ${GIT_EXECUTABLE} describe --tags --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8
+ execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
RESULT_VARIABLE status
- OUTPUT_VARIABLE GIT_DESCRIBE_VERSION
+ OUTPUT_VARIABLE GIT_VERSION
ERROR_QUIET)
- if(status)
- set(GIT_DESCRIBE_VERSION "v0.0.0")
- endif()
-
- string(STRIP ${GIT_DESCRIBE_VERSION} GIT_DESCRIBE_VERSION)
- if(GIT_DESCRIBE_VERSION MATCHES v[^-]*-)
- string(REGEX REPLACE "v([^-]*)-([0-9]+)-.*" "\\1.\\2" GIT_VERSION ${GIT_DESCRIBE_VERSION})
+ if(${status})
+ set(GIT_VERSION "v0.0.0")
else()
- string(REGEX REPLACE "v(.*)" "\\1" GIT_VERSION ${GIT_DESCRIBE_VERSION})
+ string(STRIP ${GIT_VERSION} GIT_VERSION)
+ string(REGEX REPLACE "-[0-9]+-g" "-" GIT_VERSION ${GIT_VERSION})
endif()
# Work out if the repository is dirty
@@ -47,12 +43,12 @@ function(get_git_version var)
ERROR_QUIET)
string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY)
if (${GIT_DIRTY})
- set(GIT_DESCRIBE_VERSION "${GIT_DESCRIBE_VERSION}-dirty")
+ set(GIT_VERSION "${GIT_VERSION}-dirty")
endif()
- message(STATUS "git version: ${GIT_DESCRIBE_VERSION} normalized to ${GIT_VERSION}")
else()
- set(GIT_VERSION "0.0.0")
+ set(GIT_VERSION "v0.0.0")
endif()
+ message(STATUS "git Version: ${GIT_VERSION}")
set(${var} ${GIT_VERSION} PARENT_SCOPE)
endfunction()
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/cmake/benchmark.pc.in b/gnu/llvm/libcxx/utils/google-benchmark/cmake/benchmark.pc.in
index 34beb012eef..1e84bff68d8 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/cmake/benchmark.pc.in
+++ b/gnu/llvm/libcxx/utils/google-benchmark/cmake/benchmark.pc.in
@@ -1,12 +1,11 @@
prefix=@CMAKE_INSTALL_PREFIX@
exec_prefix=${prefix}
-libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@
-includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@
+libdir=${prefix}/lib
+includedir=${prefix}/include
Name: @PROJECT_NAME@
Description: Google microbenchmark framework
Version: @VERSION@
Libs: -L${libdir} -lbenchmark
-Libs.private: -lpthread
Cflags: -I${includedir}
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/cmake/gnu_posix_regex.cpp b/gnu/llvm/libcxx/utils/google-benchmark/cmake/gnu_posix_regex.cpp
index b5b91cdab7c..105189f02ee 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/cmake/gnu_posix_regex.cpp
+++ b/gnu/llvm/libcxx/utils/google-benchmark/cmake/gnu_posix_regex.cpp
@@ -9,4 +9,3 @@ int main() {
}
return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
}
-
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/cmake/posix_regex.cpp b/gnu/llvm/libcxx/utils/google-benchmark/cmake/posix_regex.cpp
index 466dc62560a..02f6dfc278a 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/cmake/posix_regex.cpp
+++ b/gnu/llvm/libcxx/utils/google-benchmark/cmake/posix_regex.cpp
@@ -11,4 +11,3 @@ int main() {
regfree(&re);
return ret;
}
-
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/cmake/std_regex.cpp b/gnu/llvm/libcxx/utils/google-benchmark/cmake/std_regex.cpp
index 696f2a26bce..8177c482e83 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/cmake/std_regex.cpp
+++ b/gnu/llvm/libcxx/utils/google-benchmark/cmake/std_regex.cpp
@@ -7,4 +7,3 @@ int main() {
std::regex_constants::extended | std::regex_constants::nosubs);
return std::regex_search(str, re) ? 0 : -1;
}
-
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/docs/AssemblyTests.md b/gnu/llvm/libcxx/utils/google-benchmark/docs/AssemblyTests.md
index 1fbdc269b53..0d06f50ac65 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/docs/AssemblyTests.md
+++ b/gnu/llvm/libcxx/utils/google-benchmark/docs/AssemblyTests.md
@@ -144,4 +144,3 @@ tests to other architectures and compilers (using `CHECK` prefixes).
Furthermore, the tests fail for builds which specify additional flags
that modify code generation, including `--coverage` or `-fsanitize=`.
-
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/docs/tools.md b/gnu/llvm/libcxx/utils/google-benchmark/docs/tools.md
index f2d0c497f3f..4a3b2e9bd2c 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/docs/tools.md
+++ b/gnu/llvm/libcxx/utils/google-benchmark/docs/tools.md
@@ -4,11 +4,7 @@
The `compare.py` can be used to compare the result of benchmarks.
-### Dependencies
-The utility relies on the [scipy](https://www.scipy.org) package which can be installed using pip:
-```bash
-pip3 install -r requirements.txt
-```
+**NOTE**: the utility relies on the scipy package which can be installed using [these instructions](https://www.scipy.org/install.html).
### Displaying aggregates only
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/include/benchmark/benchmark.h b/gnu/llvm/libcxx/utils/google-benchmark/include/benchmark/benchmark.h
index 9b5480244d6..a0fd7c6e1ca 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/include/benchmark/benchmark.h
+++ b/gnu/llvm/libcxx/utils/google-benchmark/include/benchmark/benchmark.h
@@ -42,7 +42,6 @@ BENCHMARK(BM_StringCopy);
int main(int argc, char** argv) {
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
- benchmark::Shutdown();
return 0;
}
@@ -57,7 +56,8 @@ static void BM_memcpy(benchmark::State& state) {
memset(src, 'x', state.range(0));
for (auto _ : state)
memcpy(dst, src, state.range(0));
- state.SetBytesProcessed(state.iterations() * state.range(0));
+ state.SetBytesProcessed(int64_t(state.iterations()) *
+ int64_t(state.range(0)));
delete[] src; delete[] dst;
}
BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10);
@@ -122,7 +122,8 @@ template <class Q> int BM_Sequential(benchmark::State& state) {
q.Wait(&v);
}
// actually messages, not bytes:
- state.SetBytesProcessed(state.iterations() * state.range(0));
+ state.SetBytesProcessed(
+ static_cast<int64_t>(state.iterations())*state.range(0));
}
BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue<int>)->Range(1<<0, 1<<10);
@@ -168,12 +169,6 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
#define BENCHMARK_HAS_CXX11
#endif
-// This _MSC_VER check should detect VS 2017 v15.3 and newer.
-#if __cplusplus >= 201703L || \
- (defined(_MSC_VER) && _MSC_VER >= 1911 && _MSVC_LANG >= 201703L)
-#define BENCHMARK_HAS_CXX17
-#endif
-
#include <stdint.h>
#include <algorithm>
@@ -183,7 +178,6 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
#include <map>
#include <set>
#include <string>
-#include <utility>
#include <vector>
#if defined(BENCHMARK_HAS_CXX11)
@@ -206,19 +200,13 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
TypeName& operator=(const TypeName&) = delete
#endif
-#ifdef BENCHMARK_HAS_CXX17
-#define BENCHMARK_UNUSED [[maybe_unused]]
-#elif defined(__GNUC__) || defined(__clang__)
+#if defined(__GNUC__)
#define BENCHMARK_UNUSED __attribute__((unused))
-#else
-#define BENCHMARK_UNUSED
-#endif
-
-#if defined(__GNUC__) || defined(__clang__)
#define BENCHMARK_ALWAYS_INLINE __attribute__((always_inline))
#define BENCHMARK_NOEXCEPT noexcept
#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x)
#elif defined(_MSC_VER) && !defined(__clang__)
+#define BENCHMARK_UNUSED
#define BENCHMARK_ALWAYS_INLINE __forceinline
#if _MSC_VER >= 1900
#define BENCHMARK_NOEXCEPT noexcept
@@ -229,6 +217,7 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
#endif
#define __func__ __FUNCTION__
#else
+#define BENCHMARK_UNUSED
#define BENCHMARK_ALWAYS_INLINE
#define BENCHMARK_NOEXCEPT
#define BENCHMARK_NOEXCEPT_OP(x)
@@ -257,17 +246,11 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
#endif
#if defined(__GNUC__) || __has_builtin(__builtin_unreachable)
-#define BENCHMARK_UNREACHABLE() __builtin_unreachable()
+ #define BENCHMARK_UNREACHABLE() __builtin_unreachable()
#elif defined(_MSC_VER)
-#define BENCHMARK_UNREACHABLE() __assume(false)
-#else
-#define BENCHMARK_UNREACHABLE() ((void)0)
-#endif
-
-#ifdef BENCHMARK_HAS_CXX11
-#define BENCHMARK_OVERRIDE override
+ #define BENCHMARK_UNREACHABLE() __assume(false)
#else
-#define BENCHMARK_OVERRIDE
+ #define BENCHMARK_UNREACHABLE() ((void)0)
#endif
namespace benchmark {
@@ -275,7 +258,6 @@ class BenchmarkReporter;
class MemoryManager;
void Initialize(int* argc, char** argv);
-void Shutdown();
// Report to stdout all arguments in 'argv' as unrecognized except the first.
// Returns true there is at least on unrecognized argument (i.e. 'argc' > 1).
@@ -302,9 +284,6 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
// allocation measurements for benchmark runs.
void RegisterMemoryManager(MemoryManager* memory_manager);
-// Add a key-value pair to output as part of the context stanza in the report.
-void AddCustomContext(const std::string& key, const std::string& value);
-
namespace internal {
class Benchmark;
class BenchmarkImp;
@@ -391,10 +370,7 @@ class Counter {
// It will be presented divided by the number of iterations.
kAvgIterations = 1U << 3U,
// Mark the counter as a iteration-average rate. See above.
- kAvgIterationsRate = kIsRate | kAvgIterations,
-
- // In the end, invert the result. This is always done last!
- kInvert = 1U << 31U
+ kAvgIterationsRate = kIsRate | kAvgIterations
};
enum OneK {
@@ -429,7 +405,7 @@ typedef std::map<std::string, Counter> UserCounters;
// TimeUnit is passed to a benchmark in order to specify the order of magnitude
// for the measured time.
-enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond, kSecond };
+enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond };
// BigO is passed to a benchmark in order to specify the asymptotic
// computational
@@ -437,17 +413,14 @@ enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond, kSecond };
// calculated automatically to the best fit.
enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda };
-typedef uint64_t IterationCount;
-
// BigOFunc is passed to a benchmark in order to specify the asymptotic
// computational complexity for the benchmark.
-typedef double(BigOFunc)(IterationCount);
+typedef double(BigOFunc)(int64_t);
// StatisticsFunc is passed to a benchmark in order to compute some descriptive
// statistics over all the measurements of some type
typedef double(StatisticsFunc)(const std::vector<double>&);
-namespace internal {
struct Statistics {
std::string name_;
StatisticsFunc* compute_;
@@ -456,10 +429,10 @@ struct Statistics {
: name_(name), compute_(compute) {}
};
-class BenchmarkInstance;
+namespace internal {
+struct BenchmarkInstance;
class ThreadTimer;
class ThreadManager;
-class PerfCountersMeasurement;
enum AggregationReportMode
#if defined(BENCHMARK_HAS_CXX11)
@@ -515,7 +488,7 @@ class State {
// while (state.KeepRunningBatch(1000)) {
// // process 1000 elements
// }
- bool KeepRunningBatch(IterationCount n);
+ bool KeepRunningBatch(size_t n);
// REQUIRES: timer is running and 'SkipWithError(...)' has not been called
// by the current thread.
@@ -565,9 +538,6 @@ class State {
// responsibility to exit the scope as needed.
void SkipWithError(const char* msg);
- // Returns true if an error has been reported with 'SkipWithError(...)'.
- bool error_occurred() const { return error_occurred_; }
-
// REQUIRES: called exactly once per iteration of the benchmarking loop.
// Set the manually measured time for this benchmark iteration, which
// is used instead of automatically measured time if UseManualTime() was
@@ -604,7 +574,7 @@ class State {
void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; }
BENCHMARK_ALWAYS_INLINE
- int64_t complexity_length_n() const { return complexity_n_; }
+ int64_t complexity_length_n() { return complexity_n_; }
// If this routine is called with items > 0, then an items/s
// label is printed on the benchmark report line for the currently
@@ -657,7 +627,7 @@ class State {
int64_t range_y() const { return range(1); }
BENCHMARK_ALWAYS_INLINE
- IterationCount iterations() const {
+ size_t iterations() const {
if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
return 0;
}
@@ -668,15 +638,15 @@ class State {
: // items we expect on the first cache line (ie 64 bytes of the struct)
// When total_iterations_ is 0, KeepRunning() and friends will return false.
// May be larger than max_iterations.
- IterationCount total_iterations_;
+ size_t total_iterations_;
// When using KeepRunningBatch(), batch_leftover_ holds the number of
// iterations beyond max_iters that were run. Used to track
// completed_iterations_ accurately.
- IterationCount batch_leftover_;
+ size_t batch_leftover_;
public:
- const IterationCount max_iterations;
+ const size_t max_iterations;
private:
bool started_;
@@ -697,32 +667,30 @@ class State {
const int threads;
private:
- State(IterationCount max_iters, const std::vector<int64_t>& ranges,
- int thread_i, int n_threads, internal::ThreadTimer* timer,
- internal::ThreadManager* manager,
- internal::PerfCountersMeasurement* perf_counters_measurement);
+ State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
+ int n_threads, internal::ThreadTimer* timer,
+ internal::ThreadManager* manager);
void StartKeepRunning();
// Implementation of KeepRunning() and KeepRunningBatch().
// is_batch must be true unless n is 1.
- bool KeepRunningInternal(IterationCount n, bool is_batch);
+ bool KeepRunningInternal(size_t n, bool is_batch);
void FinishKeepRunning();
- internal::ThreadTimer* const timer_;
- internal::ThreadManager* const manager_;
- internal::PerfCountersMeasurement* const perf_counters_measurement_;
+ internal::ThreadTimer* timer_;
+ internal::ThreadManager* manager_;
- friend class internal::BenchmarkInstance;
+ friend struct internal::BenchmarkInstance;
};
inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunning() {
return KeepRunningInternal(1, /*is_batch=*/false);
}
-inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningBatch(IterationCount n) {
+inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningBatch(size_t n) {
return KeepRunningInternal(n, /*is_batch=*/true);
}
-inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(IterationCount n,
+inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(size_t n,
bool is_batch) {
// total_iterations_ is set to 0 by the constructor, and always set to a
// nonzero value by StartKepRunning().
@@ -786,7 +754,7 @@ struct State::StateIterator {
}
private:
- IterationCount cached_;
+ size_t cached_;
State* const parent_;
};
@@ -815,9 +783,6 @@ class Benchmark {
// Note: the following methods all return "this" so that multiple
// method calls can be chained together in one expression.
- // Specify the name of the benchmark
- Benchmark* Name(const std::string& name);
-
// Run this benchmark once with "x" as the extra argument passed
// to the function.
// REQUIRES: The function passed to the constructor must accept an arg1.
@@ -856,11 +821,6 @@ class Benchmark {
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
Benchmark* Ranges(const std::vector<std::pair<int64_t, int64_t> >& ranges);
- // Run this benchmark once for each combination of values in the (cartesian)
- // product of the supplied argument lists.
- // REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
- Benchmark* ArgsProduct(const std::vector<std::vector<int64_t> >& arglists);
-
// Equivalent to ArgNames({name})
Benchmark* ArgName(const std::string& name);
@@ -898,7 +858,7 @@ class Benchmark {
// NOTE: This function should only be used when *exact* iteration control is
// needed and never to control or limit how long a benchmark runs, where
// `--benchmark_min_time=N` or `MinTime(...)` should be used instead.
- Benchmark* Iterations(IterationCount n);
+ Benchmark* Iterations(size_t n);
// Specify the amount of times to repeat this benchmark. This option overrides
// the `benchmark_repetitions` flag.
@@ -914,18 +874,11 @@ class Benchmark {
// Same as ReportAggregatesOnly(), but applies to display reporter only.
Benchmark* DisplayAggregatesOnly(bool value = true);
- // By default, the CPU time is measured only for the main thread, which may
- // be unrepresentative if the benchmark uses threads internally. If called,
- // the total CPU time spent by all the threads will be measured instead.
- // By default, the only the main thread CPU time will be measured.
- Benchmark* MeasureProcessCPUTime();
-
- // If a particular benchmark should use the Wall clock instead of the CPU time
- // (be it either the CPU time of the main thread only (default), or the
- // total CPU usage of the benchmark), call this method. If called, the elapsed
- // (wall) time will be used to control how many iterations are run, and in the
- // printing of items/second or MB/seconds values.
- // If not called, the CPU time used by the benchmark will be used.
+ // If a particular benchmark is I/O bound, runs multiple threads internally or
+ // if for some reason CPU timings are not representative, call this method. If
+ // called, the elapsed time will be used to control how many iterations are
+ // run, and in the printing of items/second or MB/seconds values. If not
+ // called, the cpu time used by the benchmark will be used.
Benchmark* UseRealTime();
// If a benchmark must measure time manually (e.g. if GPU execution time is
@@ -989,7 +942,6 @@ class Benchmark {
private:
friend class BenchmarkFamilies;
- friend class BenchmarkInstance;
std::string name_;
AggregationReportMode aggregation_report_mode_;
@@ -998,9 +950,8 @@ class Benchmark {
TimeUnit time_unit_;
int range_multiplier_;
double min_time_;
- IterationCount iterations_;
+ size_t iterations_;
int repetitions_;
- bool measure_process_cpu_time_;
bool use_real_time_;
bool use_manual_time_;
BigO complexity_;
@@ -1037,7 +988,7 @@ class FunctionBenchmark : public Benchmark {
FunctionBenchmark(const char* name, Function* func)
: Benchmark(name), func_(func) {}
- virtual void Run(State& st) BENCHMARK_OVERRIDE;
+ virtual void Run(State& st);
private:
Function* func_;
@@ -1047,7 +998,7 @@ class FunctionBenchmark : public Benchmark {
template <class Lambda>
class LambdaBenchmark : public Benchmark {
public:
- virtual void Run(State& st) BENCHMARK_OVERRIDE { lambda_(st); }
+ virtual void Run(State& st) { lambda_(st); }
private:
template <class OLambda>
@@ -1099,7 +1050,7 @@ class Fixture : public internal::Benchmark {
public:
Fixture() : internal::Benchmark("") {}
- virtual void Run(State& st) BENCHMARK_OVERRIDE {
+ virtual void Run(State& st) {
this->SetUp(st);
this->BenchmarkCase(st);
this->TearDown(st);
@@ -1132,12 +1083,9 @@ class Fixture : public internal::Benchmark {
// Helpers for generating unique variable names
#define BENCHMARK_PRIVATE_NAME(n) \
- BENCHMARK_PRIVATE_CONCAT(benchmark_uniq_, BENCHMARK_PRIVATE_UNIQUE_ID, n)
+ BENCHMARK_PRIVATE_CONCAT(_benchmark_, BENCHMARK_PRIVATE_UNIQUE_ID, n)
#define BENCHMARK_PRIVATE_CONCAT(a, b, c) BENCHMARK_PRIVATE_CONCAT2(a, b, c)
#define BENCHMARK_PRIVATE_CONCAT2(a, b, c) a##b##c
-// Helper for concatenation with macro name expansion
-#define BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method) \
- BaseClass##_##Method##_Benchmark
#define BENCHMARK_PRIVATE_DECLARE(n) \
static ::benchmark::internal::Benchmark* BENCHMARK_PRIVATE_NAME(n) \
@@ -1207,37 +1155,37 @@ class Fixture : public internal::Benchmark {
#define BENCHMARK_TEMPLATE(n, a) BENCHMARK_TEMPLATE1(n, a)
#endif
-#define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
- class BaseClass##_##Method##_Benchmark : public BaseClass { \
- public: \
- BaseClass##_##Method##_Benchmark() : BaseClass() { \
- this->SetName(#BaseClass "/" #Method); \
- } \
- \
- protected: \
- virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \
+#define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
+ class BaseClass##_##Method##_Benchmark : public BaseClass { \
+ public: \
+ BaseClass##_##Method##_Benchmark() : BaseClass() { \
+ this->SetName(#BaseClass "/" #Method); \
+ } \
+ \
+ protected: \
+ virtual void BenchmarkCase(::benchmark::State&); \
};
-#define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
- class BaseClass##_##Method##_Benchmark : public BaseClass<a> { \
- public: \
- BaseClass##_##Method##_Benchmark() : BaseClass<a>() { \
- this->SetName(#BaseClass "<" #a ">/" #Method); \
- } \
- \
- protected: \
- virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \
+#define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
+ class BaseClass##_##Method##_Benchmark : public BaseClass<a> { \
+ public: \
+ BaseClass##_##Method##_Benchmark() : BaseClass<a>() { \
+ this->SetName(#BaseClass "<" #a ">/" #Method); \
+ } \
+ \
+ protected: \
+ virtual void BenchmarkCase(::benchmark::State&); \
};
-#define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
- class BaseClass##_##Method##_Benchmark : public BaseClass<a, b> { \
- public: \
- BaseClass##_##Method##_Benchmark() : BaseClass<a, b>() { \
- this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \
- } \
- \
- protected: \
- virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \
+#define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
+ class BaseClass##_##Method##_Benchmark : public BaseClass<a, b> { \
+ public: \
+ BaseClass##_##Method##_Benchmark() : BaseClass<a, b>() { \
+ this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \
+ } \
+ \
+ protected: \
+ virtual void BenchmarkCase(::benchmark::State&); \
};
#ifdef BENCHMARK_HAS_CXX11
@@ -1249,7 +1197,7 @@ class Fixture : public internal::Benchmark {
} \
\
protected: \
- virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \
+ virtual void BenchmarkCase(::benchmark::State&); \
};
#else
#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) \
@@ -1258,27 +1206,27 @@ class Fixture : public internal::Benchmark {
#define BENCHMARK_DEFINE_F(BaseClass, Method) \
BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
- void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
#define BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) \
BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
- void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
#define BENCHMARK_TEMPLATE2_DEFINE_F(BaseClass, Method, a, b) \
BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
- void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
#ifdef BENCHMARK_HAS_CXX11
#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, ...) \
BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
- void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
#else
#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) \
BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a)
#endif
#define BENCHMARK_REGISTER_F(BaseClass, Method) \
- BENCHMARK_PRIVATE_REGISTER_F(BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method))
+ BENCHMARK_PRIVATE_REGISTER_F(BaseClass##_##Method##_Benchmark)
#define BENCHMARK_PRIVATE_REGISTER_F(TestName) \
BENCHMARK_PRIVATE_DECLARE(TestName) = \
@@ -1288,23 +1236,23 @@ class Fixture : public internal::Benchmark {
#define BENCHMARK_F(BaseClass, Method) \
BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
BENCHMARK_REGISTER_F(BaseClass, Method); \
- void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
#define BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) \
BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
BENCHMARK_REGISTER_F(BaseClass, Method); \
- void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
#define BENCHMARK_TEMPLATE2_F(BaseClass, Method, a, b) \
BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
BENCHMARK_REGISTER_F(BaseClass, Method); \
- void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
#ifdef BENCHMARK_HAS_CXX11
#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \
BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
BENCHMARK_REGISTER_F(BaseClass, Method); \
- void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
+ void BaseClass##_##Method##_Benchmark::BenchmarkCase
#else
#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) \
BENCHMARK_TEMPLATE1_F(BaseClass, Method, a)
@@ -1316,8 +1264,6 @@ class Fixture : public internal::Benchmark {
::benchmark::Initialize(&argc, argv); \
if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \
::benchmark::RunSpecifiedBenchmarks(); \
- ::benchmark::Shutdown(); \
- return 0; \
} \
int main(int, char**)
@@ -1334,16 +1280,10 @@ struct CPUInfo {
int num_sharing;
};
- enum Scaling {
- UNKNOWN,
- ENABLED,
- DISABLED
- };
-
int num_cpus;
- Scaling scaling;
double cycles_per_second;
std::vector<CacheInfo> caches;
+ bool scaling_enabled;
std::vector<double> load_avg;
static const CPUInfo& Get();
@@ -1353,33 +1293,15 @@ struct CPUInfo {
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(CPUInfo);
};
-// Adding Struct for System Information
+//Adding Struct for System Information
struct SystemInfo {
std::string name;
static const SystemInfo& Get();
-
private:
SystemInfo();
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(SystemInfo);
};
-// BenchmarkName contains the components of the Benchmark's name
-// which allows individual fields to be modified or cleared before
-// building the final name using 'str()'.
-struct BenchmarkName {
- std::string function_name;
- std::string args;
- std::string min_time;
- std::string iterations;
- std::string repetitions;
- std::string time_type;
- std::string threads;
-
- // Return the full name of the benchmark with each non-empty
- // field separated by a '/'
- std::string str() const;
-};
-
// Interface for custom benchmark result printers.
// By default, benchmark reports are printed to stdout. However an application
// can control the destination of the reports by calling
@@ -1397,14 +1319,12 @@ class BenchmarkReporter {
};
struct Run {
- static const int64_t no_repetition_index = -1;
enum RunType { RT_Iteration, RT_Aggregate };
Run()
: run_type(RT_Iteration),
error_occurred(false),
iterations(1),
- threads(1),
time_unit(kNanosecond),
real_accumulated_time(0),
cpu_accumulated_time(0),
@@ -1420,19 +1340,14 @@ class BenchmarkReporter {
max_bytes_used(0) {}
std::string benchmark_name() const;
- BenchmarkName run_name;
- int64_t family_index;
- int64_t per_family_instance_index;
- RunType run_type;
+ std::string run_name;
+ RunType run_type; // is this a measurement, or an aggregate?
std::string aggregate_name;
std::string report_label; // Empty if not set by benchmark.
bool error_occurred;
std::string error_message;
- IterationCount iterations;
- int64_t threads;
- int64_t repetition_index;
- int64_t repetitions;
+ int64_t iterations;
TimeUnit time_unit;
double real_accumulated_time;
double cpu_accumulated_time;
@@ -1458,7 +1373,7 @@ class BenchmarkReporter {
int64_t complexity_n;
// what statistics to compute from the measurements
- const std::vector<internal::Statistics>* statistics;
+ const std::vector<Statistics>* statistics;
// Inform print function whether the current run is a complexity report
bool report_big_o;
@@ -1472,19 +1387,6 @@ class BenchmarkReporter {
int64_t max_bytes_used;
};
- struct PerFamilyRunReports {
- PerFamilyRunReports() : num_runs_total(0), num_runs_done(0) {}
-
- // How many runs will all instances of this benchmark perform?
- int num_runs_total;
-
- // How many runs have happened already?
- int num_runs_done;
-
- // The reports about (non-errneous!) runs of this family.
- std::vector<BenchmarkReporter::Run> Runs;
- };
-
// Construct a BenchmarkReporter with the output stream set to 'std::cout'
// and the error stream set to 'std::cerr'
BenchmarkReporter();
@@ -1557,8 +1459,8 @@ class ConsoleReporter : public BenchmarkReporter {
prev_counters_(),
printed_header_(false) {}
- virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE;
- virtual void ReportRuns(const std::vector<Run>& reports) BENCHMARK_OVERRIDE;
+ virtual bool ReportContext(const Context& context);
+ virtual void ReportRuns(const std::vector<Run>& reports);
protected:
virtual void PrintRunData(const Run& report);
@@ -1573,9 +1475,9 @@ class ConsoleReporter : public BenchmarkReporter {
class JSONReporter : public BenchmarkReporter {
public:
JSONReporter() : first_report_(true) {}
- virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE;
- virtual void ReportRuns(const std::vector<Run>& reports) BENCHMARK_OVERRIDE;
- virtual void Finalize() BENCHMARK_OVERRIDE;
+ virtual bool ReportContext(const Context& context);
+ virtual void ReportRuns(const std::vector<Run>& reports);
+ virtual void Finalize();
private:
void PrintRunData(const Run& report);
@@ -1583,13 +1485,12 @@ class JSONReporter : public BenchmarkReporter {
bool first_report_;
};
-class BENCHMARK_DEPRECATED_MSG(
- "The CSV Reporter will be removed in a future release") CSVReporter
- : public BenchmarkReporter {
+class BENCHMARK_DEPRECATED_MSG("The CSV Reporter will be removed in a future release")
+ CSVReporter : public BenchmarkReporter {
public:
CSVReporter() : printed_header_(false) {}
- virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE;
- virtual void ReportRuns(const std::vector<Run>& reports) BENCHMARK_OVERRIDE;
+ virtual bool ReportContext(const Context& context);
+ virtual void ReportRuns(const std::vector<Run>& reports);
private:
void PrintRunData(const Run& report);
@@ -1623,8 +1524,6 @@ class MemoryManager {
inline const char* GetTimeUnitString(TimeUnit unit) {
switch (unit) {
- case kSecond:
- return "s";
case kMillisecond:
return "ms";
case kMicrosecond:
@@ -1637,8 +1536,6 @@ inline const char* GetTimeUnitString(TimeUnit unit) {
inline double GetTimeUnitMultiplier(TimeUnit unit) {
switch (unit) {
- case kSecond:
- return 1;
case kMillisecond:
return 1e3;
case kMicrosecond:
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/CMakeLists.txt b/gnu/llvm/libcxx/utils/google-benchmark/src/CMakeLists.txt
index a6c8e9a7a0b..7a77fdf41de 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/CMakeLists.txt
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/CMakeLists.txt
@@ -1,5 +1,4 @@
# Allow the source files to find headers in src/
-include(GNUInstallDirs)
include_directories(${PROJECT_SOURCE_DIR}/src)
if (DEFINED BENCHMARK_CXX_LINKER_FLAGS)
@@ -18,7 +17,6 @@ foreach(item ${BENCHMARK_MAIN})
endforeach()
add_library(benchmark ${SOURCE_FILES})
-add_library(benchmark::benchmark ALIAS benchmark)
set_target_properties(benchmark PROPERTIES
OUTPUT_NAME "benchmark"
VERSION ${GENERIC_LIB_VERSION}
@@ -28,12 +26,6 @@ target_include_directories(benchmark PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include>
)
-# libpfm, if available
-if (HAVE_LIBPFM)
- target_link_libraries(benchmark libpfm.a)
- add_definitions(-DHAVE_LIBPFM)
-endif()
-
# Link threads.
target_link_libraries(benchmark ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
find_library(LIBRT rt)
@@ -41,14 +33,6 @@ if(LIBRT)
target_link_libraries(benchmark ${LIBRT})
endif()
-if(CMAKE_BUILD_TYPE)
- string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UPPER)
-endif()
-if(NOT CMAKE_THREAD_LIBS_INIT AND "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}" MATCHES ".*-fsanitize=[^ ]*address.*")
- message(WARNING "CMake's FindThreads.cmake did not fail, but CMAKE_THREAD_LIBS_INIT ended up being empty. This was fixed in https://github.com/Kitware/CMake/commit/d53317130e84898c5328c237186dbd995aaf1c12 Let's guess that -pthread is sufficient.")
- target_link_libraries(benchmark -pthread)
-endif()
-
# We need extra libraries on Windows
if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
target_link_libraries(benchmark shlwapi)
@@ -61,7 +45,6 @@ endif()
# Benchmark main library
add_library(benchmark_main "benchmark_main.cc")
-add_library(benchmark::benchmark_main ALIAS benchmark_main)
set_target_properties(benchmark_main PROPERTIES
OUTPUT_NAME "benchmark_main"
VERSION ${GENERIC_LIB_VERSION}
@@ -70,8 +53,13 @@ set_target_properties(benchmark_main PROPERTIES
target_include_directories(benchmark PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include>
)
-target_link_libraries(benchmark_main benchmark::benchmark)
+target_link_libraries(benchmark_main benchmark)
+set(include_install_dir "include")
+set(lib_install_dir "lib/")
+set(bin_install_dir "bin/")
+set(config_install_dir "lib/cmake/${PROJECT_NAME}")
+set(pkgconfig_install_dir "lib/pkgconfig")
set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
@@ -95,26 +83,26 @@ if (BENCHMARK_ENABLE_INSTALL)
install(
TARGETS benchmark benchmark_main
EXPORT ${targets_export_name}
- ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
- LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
- RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
- INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
+ ARCHIVE DESTINATION ${lib_install_dir}
+ LIBRARY DESTINATION ${lib_install_dir}
+ RUNTIME DESTINATION ${bin_install_dir}
+ INCLUDES DESTINATION ${include_install_dir})
install(
DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark"
- DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
+ DESTINATION ${include_install_dir}
FILES_MATCHING PATTERN "*.*h")
install(
FILES "${project_config}" "${version_config}"
- DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}")
+ DESTINATION "${config_install_dir}")
install(
FILES "${pkg_config}"
- DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig")
+ DESTINATION "${pkgconfig_install_dir}")
install(
EXPORT "${targets_export_name}"
NAMESPACE "${namespace}"
- DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}")
+ DESTINATION "${config_install_dir}")
endif()
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark.cc b/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark.cc
index 89f64967bf1..aab07500af4 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark.cc
@@ -13,7 +13,6 @@
// limitations under the License.
#include "benchmark/benchmark.h"
-
#include "benchmark_api_internal.h"
#include "benchmark_runner.h"
#include "internal_macros.h"
@@ -33,10 +32,7 @@
#include <cstdlib>
#include <fstream>
#include <iostream>
-#include <limits>
-#include <map>
#include <memory>
-#include <random>
#include <string>
#include <thread>
#include <utility>
@@ -49,94 +45,85 @@
#include "internal_macros.h"
#include "log.h"
#include "mutex.h"
-#include "perf_counters.h"
#include "re.h"
#include "statistics.h"
#include "string_util.h"
#include "thread_manager.h"
#include "thread_timer.h"
-// Print a list of benchmarks. This option overrides all other options.
-DEFINE_bool(benchmark_list_tests, false);
-
-// A regular expression that specifies the set of benchmarks to execute. If
-// this flag is empty, or if this flag is the string \"all\", all benchmarks
-// linked into the binary are run.
-DEFINE_string(benchmark_filter, ".");
-
-// Minimum number of seconds we should run benchmark before results are
-// considered significant. For cpu-time based tests, this is the lower bound
-// on the total cpu time used by all threads that make up the test. For
-// real-time based tests, this is the lower bound on the elapsed time of the
-// benchmark execution, regardless of number of threads.
-DEFINE_double(benchmark_min_time, 0.5);
-
-// The number of runs of each benchmark. If greater than 1, the mean and
-// standard deviation of the runs will be reported.
-DEFINE_int32(benchmark_repetitions, 1);
-
-// If set, enable random interleaving of repetitions of all benchmarks.
-// See http://github.com/google/benchmark/issues/1051 for details.
-DEFINE_bool(benchmark_enable_random_interleaving, false);
-
-// Report the result of each benchmark repetitions. When 'true' is specified
-// only the mean, standard deviation, and other statistics are reported for
-// repeated benchmarks. Affects all reporters.
-DEFINE_bool(benchmark_report_aggregates_only, false);
-
-// Display the result of each benchmark repetitions. When 'true' is specified
-// only the mean, standard deviation, and other statistics are displayed for
-// repeated benchmarks. Unlike benchmark_report_aggregates_only, only affects
-// the display reporter, but *NOT* file reporter, which will still contain
-// all the output.
-DEFINE_bool(benchmark_display_aggregates_only, false);
-
-// The format to use for console output.
-// Valid values are 'console', 'json', or 'csv'.
-DEFINE_string(benchmark_format, "console");
-
-// The format to use for file output.
-// Valid values are 'console', 'json', or 'csv'.
-DEFINE_string(benchmark_out_format, "json");
-
-// The file to write additional output to.
-DEFINE_string(benchmark_out, "");
-
-// Whether to use colors in the output. Valid values:
-// 'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use colors if
-// the output is being sent to a terminal and the TERM environment variable is
-// set to a terminal type that supports colors.
-DEFINE_string(benchmark_color, "auto");
-
-// Whether to use tabular format when printing user counters to the console.
-// Valid values: 'true'/'yes'/1, 'false'/'no'/0. Defaults to false.
-DEFINE_bool(benchmark_counters_tabular, false);
-
-// The level of verbose logging to output
-DEFINE_int32(v, 0);
-
-// List of additional perf counters to collect, in libpfm format. For more
-// information about libpfm: https://man7.org/linux/man-pages/man3/libpfm.3.html
-DEFINE_string(benchmark_perf_counters, "");
+DEFINE_bool(benchmark_list_tests, false,
+ "Print a list of benchmarks. This option overrides all other "
+ "options.");
+
+DEFINE_string(benchmark_filter, ".",
+ "A regular expression that specifies the set of benchmarks "
+ "to execute. If this flag is empty, or if this flag is the "
+ "string \"all\", all benchmarks linked into the binary are "
+ "run.");
+
+DEFINE_double(benchmark_min_time, 0.5,
+ "Minimum number of seconds we should run benchmark before "
+ "results are considered significant. For cpu-time based "
+ "tests, this is the lower bound on the total cpu time "
+ "used by all threads that make up the test. For real-time "
+ "based tests, this is the lower bound on the elapsed time "
+ "of the benchmark execution, regardless of number of "
+ "threads.");
+
+DEFINE_int32(benchmark_repetitions, 1,
+ "The number of runs of each benchmark. If greater than 1, the "
+ "mean and standard deviation of the runs will be reported.");
+
+DEFINE_bool(
+ benchmark_report_aggregates_only, false,
+ "Report the result of each benchmark repetitions. When 'true' is specified "
+ "only the mean, standard deviation, and other statistics are reported for "
+ "repeated benchmarks. Affects all reporters.");
+
+DEFINE_bool(
+ benchmark_display_aggregates_only, false,
+ "Display the result of each benchmark repetitions. When 'true' is "
+ "specified only the mean, standard deviation, and other statistics are "
+ "displayed for repeated benchmarks. Unlike "
+ "benchmark_report_aggregates_only, only affects the display reporter, but "
+ "*NOT* file reporter, which will still contain all the output.");
+
+DEFINE_string(benchmark_format, "console",
+ "The format to use for console output. Valid values are "
+ "'console', 'json', or 'csv'.");
+
+DEFINE_string(benchmark_out_format, "json",
+ "The format to use for file output. Valid values are "
+ "'console', 'json', or 'csv'.");
+
+DEFINE_string(benchmark_out, "", "The file to write additional output to");
+
+DEFINE_string(benchmark_color, "auto",
+ "Whether to use colors in the output. Valid values: "
+ "'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use "
+ "colors if the output is being sent to a terminal and the TERM "
+ "environment variable is set to a terminal type that supports "
+ "colors.");
+
+DEFINE_bool(benchmark_counters_tabular, false,
+ "Whether to use tabular format when printing user counters to "
+ "the console. Valid values: 'true'/'yes'/1, 'false'/'no'/0."
+ "Defaults to false.");
+
+DEFINE_int32(v, 0, "The level of verbose logging to output");
namespace benchmark {
-namespace internal {
-
-// Extra context to include in the output formatted as comma-separated key-value
-// pairs. Kept internal as it's only used for parsing from env/command line.
-DEFINE_kvpairs(benchmark_context, {});
-std::map<std::string, std::string>* global_context = nullptr;
+namespace internal {
// FIXME: wouldn't LTO mess this up?
void UseCharPointer(char const volatile*) {}
} // namespace internal
-State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
- int thread_i, int n_threads, internal::ThreadTimer* timer,
- internal::ThreadManager* manager,
- internal::PerfCountersMeasurement* perf_counters_measurement)
+State::State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
+ int n_threads, internal::ThreadTimer* timer,
+ internal::ThreadManager* manager)
: total_iterations_(0),
batch_leftover_(0),
max_iterations(max_iters),
@@ -149,8 +136,7 @@ State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
thread_index(thread_i),
threads(n_threads),
timer_(timer),
- manager_(manager),
- perf_counters_measurement_(perf_counters_measurement) {
+ manager_(manager) {
CHECK(max_iterations != 0) << "At least one iteration must be run";
CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
@@ -162,7 +148,7 @@ State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
// which must be suppressed.
#if defined(__INTEL_COMPILER)
#pragma warning push
-#pragma warning(disable : 1875)
+#pragma warning(disable:1875)
#elif defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Winvalid-offsetof"
@@ -183,23 +169,11 @@ void State::PauseTiming() {
// Add in time accumulated so far
CHECK(started_ && !finished_ && !error_occurred_);
timer_->StopTimer();
- if (perf_counters_measurement_) {
- auto measurements = perf_counters_measurement_->StopAndGetMeasurements();
- for (const auto& name_and_measurement : measurements) {
- auto name = name_and_measurement.first;
- auto measurement = name_and_measurement.second;
- CHECK_EQ(counters[name], 0.0);
- counters[name] = Counter(measurement, Counter::kAvgIterations);
- }
- }
}
void State::ResumeTiming() {
CHECK(started_ && !finished_ && !error_occurred_);
timer_->StartTimer();
- if (perf_counters_measurement_) {
- perf_counters_measurement_->Start();
- }
}
void State::SkipWithError(const char* msg) {
@@ -247,37 +221,6 @@ void State::FinishKeepRunning() {
namespace internal {
namespace {
-// Flushes streams after invoking reporter methods that write to them. This
-// ensures users get timely updates even when streams are not line-buffered.
-void FlushStreams(BenchmarkReporter* reporter) {
- if (!reporter) return;
- std::flush(reporter->GetOutputStream());
- std::flush(reporter->GetErrorStream());
-}
-
-// Reports in both display and file reporters.
-void Report(BenchmarkReporter* display_reporter,
- BenchmarkReporter* file_reporter, const RunResults& run_results) {
- auto report_one = [](BenchmarkReporter* reporter, bool aggregates_only,
- const RunResults& results) {
- assert(reporter);
- // If there are no aggregates, do output non-aggregates.
- aggregates_only &= !results.aggregates_only.empty();
- if (!aggregates_only) reporter->ReportRuns(results.non_aggregates);
- if (!results.aggregates_only.empty())
- reporter->ReportRuns(results.aggregates_only);
- };
-
- report_one(display_reporter, run_results.display_report_aggregates_only,
- run_results);
- if (file_reporter)
- report_one(file_reporter, run_results.file_report_aggregates_only,
- run_results);
-
- FlushStreams(display_reporter);
- FlushStreams(file_reporter);
-}
-
void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
BenchmarkReporter* display_reporter,
BenchmarkReporter* file_reporter) {
@@ -290,10 +233,10 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
size_t stat_field_width = 0;
for (const BenchmarkInstance& benchmark : benchmarks) {
name_field_width =
- std::max<size_t>(name_field_width, benchmark.name().str().size());
- might_have_aggregates |= benchmark.repetitions() > 1;
+ std::max<size_t>(name_field_width, benchmark.name.size());
+ might_have_aggregates |= benchmark.repetitions > 1;
- for (const auto& Stat : benchmark.statistics())
+ for (const auto& Stat : *benchmark.statistics)
stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
}
if (might_have_aggregates) name_field_width += 1 + stat_field_width;
@@ -302,86 +245,50 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
BenchmarkReporter::Context context;
context.name_field_width = name_field_width;
- // Keep track of running times of all instances of each benchmark family.
- std::map<int /*family_index*/, BenchmarkReporter::PerFamilyRunReports>
- per_family_reports;
+ // Keep track of running times of all instances of current benchmark
+ std::vector<BenchmarkReporter::Run> complexity_reports;
+
+ // We flush streams after invoking reporter methods that write to them. This
+ // ensures users get timely updates even when streams are not line-buffered.
+ auto flushStreams = [](BenchmarkReporter* reporter) {
+ if (!reporter) return;
+ std::flush(reporter->GetOutputStream());
+ std::flush(reporter->GetErrorStream());
+ };
if (display_reporter->ReportContext(context) &&
(!file_reporter || file_reporter->ReportContext(context))) {
- FlushStreams(display_reporter);
- FlushStreams(file_reporter);
-
- size_t num_repetitions_total = 0;
-
- std::vector<internal::BenchmarkRunner> runners;
- runners.reserve(benchmarks.size());
- for (const BenchmarkInstance& benchmark : benchmarks) {
- BenchmarkReporter::PerFamilyRunReports* reports_for_family = nullptr;
- if (benchmark.complexity() != oNone)
- reports_for_family = &per_family_reports[benchmark.family_index()];
-
- runners.emplace_back(benchmark, reports_for_family);
- int num_repeats_of_this_instance = runners.back().GetNumRepeats();
- num_repetitions_total += num_repeats_of_this_instance;
- if (reports_for_family)
- reports_for_family->num_runs_total += num_repeats_of_this_instance;
- }
- assert(runners.size() == benchmarks.size() && "Unexpected runner count.");
-
- std::vector<int> repetition_indices;
- repetition_indices.reserve(num_repetitions_total);
- for (size_t runner_index = 0, num_runners = runners.size();
- runner_index != num_runners; ++runner_index) {
- const internal::BenchmarkRunner& runner = runners[runner_index];
- std::fill_n(std::back_inserter(repetition_indices),
- runner.GetNumRepeats(), runner_index);
- }
- assert(repetition_indices.size() == num_repetitions_total &&
- "Unexpected number of repetition indexes.");
-
- if (FLAGS_benchmark_enable_random_interleaving) {
- std::random_device rd;
- std::mt19937 g(rd());
- std::shuffle(repetition_indices.begin(), repetition_indices.end(), g);
- }
-
- for (size_t repetition_index : repetition_indices) {
- internal::BenchmarkRunner& runner = runners[repetition_index];
- runner.DoOneRepetition();
- if (runner.HasRepeatsRemaining()) continue;
- // FIXME: report each repetition separately, not all of them in bulk.
-
- RunResults run_results = runner.GetResults();
-
- // Maybe calculate complexity report
- if (const auto* reports_for_family = runner.GetReportsForFamily()) {
- if (reports_for_family->num_runs_done ==
- reports_for_family->num_runs_total) {
- auto additional_run_stats = ComputeBigO(reports_for_family->Runs);
- run_results.aggregates_only.insert(run_results.aggregates_only.end(),
- additional_run_stats.begin(),
- additional_run_stats.end());
- per_family_reports.erase(
- (int)reports_for_family->Runs.front().family_index);
- }
- }
-
- Report(display_reporter, file_reporter, run_results);
+ flushStreams(display_reporter);
+ flushStreams(file_reporter);
+
+ for (const auto& benchmark : benchmarks) {
+ RunResults run_results = RunBenchmark(benchmark, &complexity_reports);
+
+ auto report = [&run_results](BenchmarkReporter* reporter,
+ bool report_aggregates_only) {
+ assert(reporter);
+ // If there are no aggregates, do output non-aggregates.
+ report_aggregates_only &= !run_results.aggregates_only.empty();
+ if (!report_aggregates_only)
+ reporter->ReportRuns(run_results.non_aggregates);
+ if (!run_results.aggregates_only.empty())
+ reporter->ReportRuns(run_results.aggregates_only);
+ };
+
+ report(display_reporter, run_results.display_report_aggregates_only);
+ if (file_reporter)
+ report(file_reporter, run_results.file_report_aggregates_only);
+
+ flushStreams(display_reporter);
+ flushStreams(file_reporter);
}
}
display_reporter->Finalize();
if (file_reporter) file_reporter->Finalize();
- FlushStreams(display_reporter);
- FlushStreams(file_reporter);
+ flushStreams(display_reporter);
+ flushStreams(file_reporter);
}
-// Disable deprecated warnings temporarily because we need to reference
-// CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-#endif
-
std::unique_ptr<BenchmarkReporter> CreateReporter(
std::string const& name, ConsoleReporter::OutputOptions output_opts) {
typedef std::unique_ptr<BenchmarkReporter> PtrType;
@@ -397,10 +304,6 @@ std::unique_ptr<BenchmarkReporter> CreateReporter(
}
}
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
-
} // end namespace
bool IsZero(double n) {
@@ -409,7 +312,7 @@ bool IsZero(double n) {
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
int output_opts = ConsoleReporter::OO_Defaults;
- auto is_benchmark_color = [force_no_color]() -> bool {
+ auto is_benchmark_color = [force_no_color] () -> bool {
if (force_no_color) {
return false;
}
@@ -469,7 +372,7 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
if (!fname.empty()) {
output_file.open(fname);
if (!output_file.is_open()) {
- Err << "invalid file name: '" << fname << "'" << std::endl;
+ Err << "invalid file name: '" << fname << std::endl;
std::exit(1);
}
if (!file_reporter) {
@@ -490,8 +393,7 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
}
if (FLAGS_benchmark_list_tests) {
- for (auto const& benchmark : benchmarks)
- Out << benchmark.name().str() << "\n";
+ for (auto const& benchmark : benchmarks) Out << benchmark.name << "\n";
} else {
internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
}
@@ -503,16 +405,6 @@ void RegisterMemoryManager(MemoryManager* manager) {
internal::memory_manager = manager;
}
-void AddCustomContext(const std::string& key, const std::string& value) {
- if (internal::global_context == nullptr) {
- internal::global_context = new std::map<std::string, std::string>();
- }
- if (!internal::global_context->emplace(key, value).second) {
- std::cerr << "Failed to add custom context \"" << key << "\" as it already "
- << "exists with value \"" << value << "\"\n";
- }
-}
-
namespace internal {
void PrintUsageAndExit() {
@@ -522,7 +414,6 @@ void PrintUsageAndExit() {
" [--benchmark_filter=<regex>]\n"
" [--benchmark_min_time=<min_time>]\n"
" [--benchmark_repetitions=<num_repetitions>]\n"
- " [--benchmark_enable_random_interleaving={true|false}]\n"
" [--benchmark_report_aggregates_only={true|false}]\n"
" [--benchmark_display_aggregates_only={true|false}]\n"
" [--benchmark_format=<console|json|csv>]\n"
@@ -530,7 +421,6 @@ void PrintUsageAndExit() {
" [--benchmark_out_format=<json|console|csv>]\n"
" [--benchmark_color={auto|true|false}]\n"
" [--benchmark_counters_tabular={true|false}]\n"
- " [--benchmark_context=<key>=<value>,...]\n"
" [--v=<verbosity>]\n");
exit(0);
}
@@ -539,7 +429,7 @@ void ParseCommandLineFlags(int* argc, char** argv) {
using namespace benchmark;
BenchmarkReporter::Context::executable_name =
(argc && *argc > 0) ? argv[0] : "unknown";
- for (int i = 1; argc && i < *argc; ++i) {
+ for (int i = 1; i < *argc; ++i) {
if (ParseBoolFlag(argv[i], "benchmark_list_tests",
&FLAGS_benchmark_list_tests) ||
ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
@@ -547,8 +437,6 @@ void ParseCommandLineFlags(int* argc, char** argv) {
&FLAGS_benchmark_min_time) ||
ParseInt32Flag(argv[i], "benchmark_repetitions",
&FLAGS_benchmark_repetitions) ||
- ParseBoolFlag(argv[i], "benchmark_enable_random_interleaving",
- &FLAGS_benchmark_enable_random_interleaving) ||
ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
&FLAGS_benchmark_report_aggregates_only) ||
ParseBoolFlag(argv[i], "benchmark_display_aggregates_only",
@@ -563,10 +451,6 @@ void ParseCommandLineFlags(int* argc, char** argv) {
ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
ParseBoolFlag(argv[i], "benchmark_counters_tabular",
&FLAGS_benchmark_counters_tabular) ||
- ParseStringFlag(argv[i], "benchmark_perf_counters",
- &FLAGS_benchmark_perf_counters) ||
- ParseKeyValueFlag(argv[i], "benchmark_context",
- &FLAGS_benchmark_context) ||
ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
@@ -577,17 +461,13 @@ void ParseCommandLineFlags(int* argc, char** argv) {
}
}
for (auto const* flag :
- {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format}) {
+ {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format})
if (*flag != "console" && *flag != "json" && *flag != "csv") {
PrintUsageAndExit();
}
- }
if (FLAGS_benchmark_color.empty()) {
PrintUsageAndExit();
}
- for (const auto& kv : FLAGS_benchmark_context) {
- AddCustomContext(kv.first, kv.second);
- }
}
int InitializeStreams() {
@@ -602,10 +482,6 @@ void Initialize(int* argc, char** argv) {
internal::LogLevel() = FLAGS_v;
}
-void Shutdown() {
- delete internal::global_context;
-}
-
bool ReportUnrecognizedArguments(int argc, char** argv) {
for (int i = 1; i < argc; ++i) {
fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0],
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_api_internal.cc b/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_api_internal.cc
index 89da519afc8..8d3108363b8 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_api_internal.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_api_internal.cc
@@ -1,94 +1,15 @@
#include "benchmark_api_internal.h"
-#include <cinttypes>
-
-#include "string_util.h"
-
namespace benchmark {
namespace internal {
-BenchmarkInstance::BenchmarkInstance(Benchmark* benchmark, int family_idx,
- int per_family_instance_idx,
- const std::vector<int64_t>& args,
- int thread_count)
- : benchmark_(*benchmark),
- family_index_(family_idx),
- per_family_instance_index_(per_family_instance_idx),
- aggregation_report_mode_(benchmark_.aggregation_report_mode_),
- args_(args),
- time_unit_(benchmark_.time_unit_),
- measure_process_cpu_time_(benchmark_.measure_process_cpu_time_),
- use_real_time_(benchmark_.use_real_time_),
- use_manual_time_(benchmark_.use_manual_time_),
- complexity_(benchmark_.complexity_),
- complexity_lambda_(benchmark_.complexity_lambda_),
- statistics_(benchmark_.statistics_),
- repetitions_(benchmark_.repetitions_),
- min_time_(benchmark_.min_time_),
- iterations_(benchmark_.iterations_),
- threads_(thread_count) {
- name_.function_name = benchmark_.name_;
-
- size_t arg_i = 0;
- for (const auto& arg : args) {
- if (!name_.args.empty()) {
- name_.args += '/';
- }
-
- if (arg_i < benchmark->arg_names_.size()) {
- const auto& arg_name = benchmark_.arg_names_[arg_i];
- if (!arg_name.empty()) {
- name_.args += StrFormat("%s:", arg_name.c_str());
- }
- }
-
- name_.args += StrFormat("%" PRId64, arg);
- ++arg_i;
- }
-
- if (!IsZero(benchmark->min_time_)) {
- name_.min_time = StrFormat("min_time:%0.3f", benchmark_.min_time_);
- }
-
- if (benchmark_.iterations_ != 0) {
- name_.iterations = StrFormat(
- "iterations:%lu", static_cast<unsigned long>(benchmark_.iterations_));
- }
-
- if (benchmark_.repetitions_ != 0) {
- name_.repetitions = StrFormat("repeats:%d", benchmark_.repetitions_);
- }
-
- if (benchmark_.measure_process_cpu_time_) {
- name_.time_type = "process_time";
- }
-
- if (benchmark_.use_manual_time_) {
- if (!name_.time_type.empty()) {
- name_.time_type += '/';
- }
- name_.time_type += "manual_time";
- } else if (benchmark_.use_real_time_) {
- if (!name_.time_type.empty()) {
- name_.time_type += '/';
- }
- name_.time_type += "real_time";
- }
-
- if (!benchmark_.thread_counts_.empty()) {
- name_.threads = StrFormat("threads:%d", threads_);
- }
-}
-
State BenchmarkInstance::Run(
- IterationCount iters, int thread_id, internal::ThreadTimer* timer,
- internal::ThreadManager* manager,
- internal::PerfCountersMeasurement* perf_counters_measurement) const {
- State st(iters, args_, thread_id, threads_, timer, manager,
- perf_counters_measurement);
- benchmark_.Run(st);
+ size_t iters, int thread_id, internal::ThreadTimer* timer,
+ internal::ThreadManager* manager) const {
+ State st(iters, arg, thread_id, threads, timer, manager);
+ benchmark->Run(st);
return st;
}
-} // namespace internal
-} // namespace benchmark
+} // internal
+} // benchmark
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_api_internal.h b/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_api_internal.h
index 9296b7d2c81..0524a85c01d 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_api_internal.h
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_api_internal.h
@@ -1,6 +1,9 @@
#ifndef BENCHMARK_API_INTERNAL_H
#define BENCHMARK_API_INTERNAL_H
+#include "benchmark/benchmark.h"
+#include "commandlineflags.h"
+
#include <cmath>
#include <iosfwd>
#include <limits>
@@ -8,60 +11,31 @@
#include <string>
#include <vector>
-#include "benchmark/benchmark.h"
-#include "commandlineflags.h"
-
namespace benchmark {
namespace internal {
// Information kept per benchmark we may want to run
-class BenchmarkInstance {
- public:
- BenchmarkInstance(Benchmark* benchmark, int family_index,
- int per_family_instance_index,
- const std::vector<int64_t>& args, int threads);
-
- const BenchmarkName& name() const { return name_; }
- int family_index() const { return family_index_; }
- int per_family_instance_index() const { return per_family_instance_index_; }
- AggregationReportMode aggregation_report_mode() const {
- return aggregation_report_mode_;
- }
- TimeUnit time_unit() const { return time_unit_; }
- bool measure_process_cpu_time() const { return measure_process_cpu_time_; }
- bool use_real_time() const { return use_real_time_; }
- bool use_manual_time() const { return use_manual_time_; }
- BigO complexity() const { return complexity_; }
- BigOFunc& complexity_lambda() const { return *complexity_lambda_; }
- const std::vector<Statistics>& statistics() const { return statistics_; }
- int repetitions() const { return repetitions_; }
- double min_time() const { return min_time_; }
- IterationCount iterations() const { return iterations_; }
- int threads() const { return threads_; }
-
- State Run(IterationCount iters, int thread_id, internal::ThreadTimer* timer,
- internal::ThreadManager* manager,
- internal::PerfCountersMeasurement* perf_counters_measurement) const;
-
- private:
- BenchmarkName name_;
- Benchmark& benchmark_;
- const int family_index_;
- const int per_family_instance_index_;
- AggregationReportMode aggregation_report_mode_;
- const std::vector<int64_t>& args_;
- TimeUnit time_unit_;
- bool measure_process_cpu_time_;
- bool use_real_time_;
- bool use_manual_time_;
- BigO complexity_;
- BigOFunc* complexity_lambda_;
- UserCounters counters_;
- const std::vector<Statistics>& statistics_;
- int repetitions_;
- double min_time_;
- IterationCount iterations_;
- int threads_; // Number of concurrent threads to us
+struct BenchmarkInstance {
+ std::string name;
+ Benchmark* benchmark;
+ AggregationReportMode aggregation_report_mode;
+ std::vector<int64_t> arg;
+ TimeUnit time_unit;
+ int range_multiplier;
+ bool use_real_time;
+ bool use_manual_time;
+ BigO complexity;
+ BigOFunc* complexity_lambda;
+ UserCounters counters;
+ const std::vector<Statistics>* statistics;
+ bool last_benchmark_instance;
+ int repetitions;
+ double min_time;
+ size_t iterations;
+ int threads; // Number of concurrent threads to us
+
+ State Run(size_t iters, int thread_id, internal::ThreadTimer* timer,
+ internal::ThreadManager* manager) const;
};
bool FindBenchmarksInternal(const std::string& re,
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_register.cc b/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_register.cc
index 574462220e7..f17f5b223ce 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_register.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_register.cc
@@ -24,7 +24,6 @@
#include <algorithm>
#include <atomic>
-#include <cinttypes>
#include <condition_variable>
#include <cstdio>
#include <cstdlib>
@@ -32,7 +31,6 @@
#include <fstream>
#include <iostream>
#include <memory>
-#include <numeric>
#include <sstream>
#include <thread>
@@ -129,13 +127,8 @@ bool BenchmarkFamilies::FindBenchmarks(
// Special list of thread counts to use when none are specified
const std::vector<int> one_thread = {1};
- int next_family_index = 0;
-
MutexLock l(mutex_);
for (std::unique_ptr<Benchmark>& family : families_) {
- int family_index = next_family_index;
- int per_family_instance_index = 0;
-
// Family was deleted or benchmark doesn't match
if (!family) continue;
@@ -155,24 +148,71 @@ bool BenchmarkFamilies::FindBenchmarks(
}
// reserve in the special case the regex ".", since we know the final
// family size.
- if (spec == ".") benchmarks->reserve(benchmarks->size() + family_size);
+ if (spec == ".") benchmarks->reserve(family_size);
for (auto const& args : family->args_) {
for (int num_threads : *thread_counts) {
- BenchmarkInstance instance(family.get(), family_index,
- per_family_instance_index, args,
- num_threads);
+ BenchmarkInstance instance;
+ instance.name = family->name_;
+ instance.benchmark = family.get();
+ instance.aggregation_report_mode = family->aggregation_report_mode_;
+ instance.arg = args;
+ instance.time_unit = family->time_unit_;
+ instance.range_multiplier = family->range_multiplier_;
+ instance.min_time = family->min_time_;
+ instance.iterations = family->iterations_;
+ instance.repetitions = family->repetitions_;
+ instance.use_real_time = family->use_real_time_;
+ instance.use_manual_time = family->use_manual_time_;
+ instance.complexity = family->complexity_;
+ instance.complexity_lambda = family->complexity_lambda_;
+ instance.statistics = &family->statistics_;
+ instance.threads = num_threads;
+
+ // Add arguments to instance name
+ size_t arg_i = 0;
+ for (auto const& arg : args) {
+ instance.name += "/";
+
+ if (arg_i < family->arg_names_.size()) {
+ const auto& arg_name = family->arg_names_[arg_i];
+ if (!arg_name.empty()) {
+ instance.name +=
+ StrFormat("%s:", family->arg_names_[arg_i].c_str());
+ }
+ }
+
+ // we know that the args are always non-negative (see 'AddRange()'),
+ // thus print as 'unsigned'. BUT, do a cast due to the 32-bit builds.
+ instance.name += StrFormat("%lu", static_cast<unsigned long>(arg));
+ ++arg_i;
+ }
- const auto full_name = instance.name().str();
- if ((re.Match(full_name) && !isNegativeFilter) ||
- (!re.Match(full_name) && isNegativeFilter)) {
- benchmarks->push_back(std::move(instance));
+ if (!IsZero(family->min_time_))
+ instance.name += StrFormat("/min_time:%0.3f", family->min_time_);
+ if (family->iterations_ != 0) {
+ instance.name +=
+ StrFormat("/iterations:%lu",
+ static_cast<unsigned long>(family->iterations_));
+ }
+ if (family->repetitions_ != 0)
+ instance.name += StrFormat("/repeats:%d", family->repetitions_);
+
+ if (family->use_manual_time_) {
+ instance.name += "/manual_time";
+ } else if (family->use_real_time_) {
+ instance.name += "/real_time";
+ }
- ++per_family_instance_index;
+ // Add the number of threads used to the name
+ if (!family->thread_counts_.empty()) {
+ instance.name += StrFormat("/threads:%d", instance.threads);
+ }
- // Only bump the next family index once we've estabilished that
- // at least one instance of this family will be run.
- if (next_family_index == family_index) ++next_family_index;
+ if ((re.Match(instance.name) && !isNegativeFilter) ||
+ (!re.Match(instance.name) && isNegativeFilter)) {
+ instance.last_benchmark_instance = (&args == &family->args_.back());
+ benchmarks->push_back(std::move(instance));
}
}
}
@@ -207,7 +247,6 @@ Benchmark::Benchmark(const char* name)
min_time_(0),
iterations_(0),
repetitions_(0),
- measure_process_cpu_time_(false),
use_real_time_(false),
use_manual_time_(false),
complexity_(oNone),
@@ -219,11 +258,6 @@ Benchmark::Benchmark(const char* name)
Benchmark::~Benchmark() {}
-Benchmark* Benchmark::Name(const std::string& name) {
- SetName(name.c_str());
- return this;
-}
-
Benchmark* Benchmark::Arg(int64_t x) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
args_.push_back({x});
@@ -250,41 +284,33 @@ Benchmark* Benchmark::Ranges(
const std::vector<std::pair<int64_t, int64_t>>& ranges) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
std::vector<std::vector<int64_t>> arglists(ranges.size());
+ std::size_t total = 1;
for (std::size_t i = 0; i < ranges.size(); i++) {
AddRange(&arglists[i], ranges[i].first, ranges[i].second,
range_multiplier_);
+ total *= arglists[i].size();
}
- ArgsProduct(arglists);
+ std::vector<std::size_t> ctr(arglists.size(), 0);
- return this;
-}
-
-Benchmark* Benchmark::ArgsProduct(
- const std::vector<std::vector<int64_t>>& arglists) {
- CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(arglists.size()));
-
- std::vector<std::size_t> indices(arglists.size());
- const std::size_t total = std::accumulate(
- std::begin(arglists), std::end(arglists), std::size_t{1},
- [](const std::size_t res, const std::vector<int64_t>& arglist) {
- return res * arglist.size();
- });
- std::vector<int64_t> args;
- args.reserve(arglists.size());
for (std::size_t i = 0; i < total; i++) {
- for (std::size_t arg = 0; arg < arglists.size(); arg++) {
- args.push_back(arglists[arg][indices[arg]]);
+ std::vector<int64_t> tmp;
+ tmp.reserve(arglists.size());
+
+ for (std::size_t j = 0; j < arglists.size(); j++) {
+ tmp.push_back(arglists[j].at(ctr[j]));
}
- args_.push_back(args);
- args.clear();
- std::size_t arg = 0;
- do {
- indices[arg] = (indices[arg] + 1) % arglists[arg].size();
- } while (indices[arg++] == 0 && arg < arglists.size());
- }
+ args_.push_back(std::move(tmp));
+ for (std::size_t j = 0; j < arglists.size(); j++) {
+ if (ctr[j] + 1 < arglists[j].size()) {
+ ++ctr[j];
+ break;
+ }
+ ctr[j] = 0;
+ }
+ }
return this;
}
@@ -302,6 +328,7 @@ Benchmark* Benchmark::ArgNames(const std::vector<std::string>& names) {
Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
+ CHECK_GE(start, 0);
CHECK_LE(start, limit);
for (int64_t arg = start; arg <= limit; arg += step) {
args_.push_back({arg});
@@ -333,7 +360,7 @@ Benchmark* Benchmark::MinTime(double t) {
return this;
}
-Benchmark* Benchmark::Iterations(IterationCount n) {
+Benchmark* Benchmark::Iterations(size_t n) {
CHECK(n > 0);
CHECK(IsZero(min_time_));
iterations_ = n;
@@ -367,12 +394,6 @@ Benchmark* Benchmark::DisplayAggregatesOnly(bool value) {
return this;
}
-Benchmark* Benchmark::MeasureProcessCPUTime() {
- // Can be used together with UseRealTime() / UseManualTime().
- measure_process_cpu_time_ = true;
- return this;
-}
-
Benchmark* Benchmark::UseRealTime() {
CHECK(!use_manual_time_)
<< "Cannot set UseRealTime and UseManualTime simultaneously.";
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_register.h b/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_register.h
index 09496607f22..0705e219f2f 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_register.h
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_register.h
@@ -1,108 +1,33 @@
#ifndef BENCHMARK_REGISTER_H
#define BENCHMARK_REGISTER_H
-#include <limits>
#include <vector>
#include "check.h"
-namespace benchmark {
-namespace internal {
-
-// Append the powers of 'mult' in the closed interval [lo, hi].
-// Returns iterator to the start of the inserted range.
template <typename T>
-typename std::vector<T>::iterator
-AddPowers(std::vector<T>* dst, T lo, T hi, int mult) {
+void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
CHECK_GE(lo, 0);
CHECK_GE(hi, lo);
CHECK_GE(mult, 2);
- const size_t start_offset = dst->size();
+ // Add "lo"
+ dst->push_back(lo);
static const T kmax = std::numeric_limits<T>::max();
- // Space out the values in multiples of "mult"
- for (T i = static_cast<T>(1); i <= hi; i *= mult) {
- if (i >= lo) {
+ // Now space out the benchmarks in multiples of "mult"
+ for (T i = 1; i < kmax / mult; i *= mult) {
+ if (i >= hi) break;
+ if (i > lo) {
dst->push_back(i);
}
- // Break the loop here since multiplying by
- // 'mult' would move outside of the range of T
- if (i > kmax / mult) break;
- }
-
- return dst->begin() + start_offset;
-}
-
-template <typename T>
-void AddNegatedPowers(std::vector<T>* dst, T lo, T hi, int mult) {
- // We negate lo and hi so we require that they cannot be equal to 'min'.
- CHECK_GT(lo, std::numeric_limits<T>::min());
- CHECK_GT(hi, std::numeric_limits<T>::min());
- CHECK_GE(hi, lo);
- CHECK_LE(hi, 0);
-
- // Add positive powers, then negate and reverse.
- // Casts necessary since small integers get promoted
- // to 'int' when negating.
- const auto lo_complement = static_cast<T>(-lo);
- const auto hi_complement = static_cast<T>(-hi);
-
- const auto it = AddPowers(dst, hi_complement, lo_complement, mult);
-
- std::for_each(it, dst->end(), [](T& t) { t *= -1; });
- std::reverse(it, dst->end());
-}
-
-template <typename T>
-void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
- static_assert(std::is_integral<T>::value && std::is_signed<T>::value,
- "Args type must be a signed integer");
-
- CHECK_GE(hi, lo);
- CHECK_GE(mult, 2);
-
- // Add "lo"
- dst->push_back(lo);
-
- // Handle lo == hi as a special case, so we then know
- // lo < hi and so it is safe to add 1 to lo and subtract 1
- // from hi without falling outside of the range of T.
- if (lo == hi) return;
-
- // Ensure that lo_inner <= hi_inner below.
- if (lo + 1 == hi) {
- dst->push_back(hi);
- return;
}
- // Add all powers of 'mult' in the range [lo+1, hi-1] (inclusive).
- const auto lo_inner = static_cast<T>(lo + 1);
- const auto hi_inner = static_cast<T>(hi - 1);
-
- // Insert negative values
- if (lo_inner < 0) {
- AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult);
- }
-
- // Treat 0 as a special case (see discussion on #762).
- if (lo < 0 && hi >= 0) {
- dst->push_back(0);
- }
-
- // Insert positive values
- if (hi_inner > 0) {
- AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult);
- }
-
- // Add "hi" (if different from last value).
- if (hi != dst->back()) {
+ // Add "hi" (if different from "lo")
+ if (hi != lo) {
dst->push_back(hi);
}
}
-} // namespace internal
-} // namespace benchmark
-
#endif // BENCHMARK_REGISTER_H
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_runner.cc b/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_runner.cc
index 6742d42dbec..38faeec8e3e 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_runner.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_runner.cc
@@ -13,7 +13,6 @@
// limitations under the License.
#include "benchmark_runner.h"
-
#include "benchmark/benchmark.h"
#include "benchmark_api_internal.h"
#include "internal_macros.h"
@@ -46,7 +45,6 @@
#include "internal_macros.h"
#include "log.h"
#include "mutex.h"
-#include "perf_counters.h"
#include "re.h"
#include "statistics.h"
#include "string_util.h"
@@ -61,41 +59,34 @@ MemoryManager* memory_manager = nullptr;
namespace {
-static constexpr IterationCount kMaxIterations = 1000000000;
+static const size_t kMaxIterations = 1000000000;
BenchmarkReporter::Run CreateRunReport(
const benchmark::internal::BenchmarkInstance& b,
- const internal::ThreadManager::Result& results,
- IterationCount memory_iterations,
- const MemoryManager::Result& memory_result, double seconds,
- int64_t repetition_index, int64_t repeats) {
+ const internal::ThreadManager::Result& results, size_t memory_iterations,
+ const MemoryManager::Result& memory_result, double seconds) {
// Create report about this benchmark run.
BenchmarkReporter::Run report;
- report.run_name = b.name();
- report.family_index = b.family_index();
- report.per_family_instance_index = b.per_family_instance_index();
+ report.run_name = b.name;
report.error_occurred = results.has_error_;
report.error_message = results.error_message_;
report.report_label = results.report_label_;
// This is the total iterations across all threads.
report.iterations = results.iterations;
- report.time_unit = b.time_unit();
- report.threads = b.threads();
- report.repetition_index = repetition_index;
- report.repetitions = repeats;
+ report.time_unit = b.time_unit;
if (!report.error_occurred) {
- if (b.use_manual_time()) {
+ if (b.use_manual_time) {
report.real_accumulated_time = results.manual_time_used;
} else {
report.real_accumulated_time = results.real_time_used;
}
report.cpu_accumulated_time = results.cpu_time_used;
report.complexity_n = results.complexity_n;
- report.complexity = b.complexity();
- report.complexity_lambda = b.complexity_lambda();
- report.statistics = &b.statistics();
+ report.complexity = b.complexity;
+ report.complexity_lambda = b.complexity_lambda;
+ report.statistics = b.statistics;
report.counters = results.counters;
if (memory_iterations > 0) {
@@ -107,24 +98,18 @@ BenchmarkReporter::Run CreateRunReport(
report.max_bytes_used = memory_result.max_bytes_used;
}
- internal::Finish(&report.counters, results.iterations, seconds,
- b.threads());
+ internal::Finish(&report.counters, results.iterations, seconds, b.threads);
}
return report;
}
// Execute one thread of benchmark b for the specified number of iterations.
-// Adds the stats collected for the thread into manager->results.
-void RunInThread(const BenchmarkInstance* b, IterationCount iters,
- int thread_id, ThreadManager* manager,
- PerfCountersMeasurement* perf_counters_measurement) {
- internal::ThreadTimer timer(
- b->measure_process_cpu_time()
- ? internal::ThreadTimer::CreateProcessCpuTime()
- : internal::ThreadTimer::Create());
- State st =
- b->Run(iters, thread_id, &timer, manager, perf_counters_measurement);
- CHECK(st.error_occurred() || st.iterations() >= st.max_iterations)
+// Adds the stats collected for the thread into *total.
+void RunInThread(const BenchmarkInstance* b, size_t iters, int thread_id,
+ ThreadManager* manager) {
+ internal::ThreadTimer timer;
+ State st = b->Run(iters, thread_id, &timer, manager);
+ CHECK(st.iterations() >= st.max_iterations)
<< "Benchmark returned before State::KeepRunning() returned false!";
{
MutexLock l(manager->GetBenchmarkMutex());
@@ -139,209 +124,225 @@ void RunInThread(const BenchmarkInstance* b, IterationCount iters,
manager->NotifyThreadComplete();
}
-} // end namespace
-
-BenchmarkRunner::BenchmarkRunner(
- const benchmark::internal::BenchmarkInstance& b_,
- BenchmarkReporter::PerFamilyRunReports* reports_for_family_)
- : b(b_),
- reports_for_family(reports_for_family_),
- min_time(!IsZero(b.min_time()) ? b.min_time() : FLAGS_benchmark_min_time),
- repeats(b.repetitions() != 0 ? b.repetitions()
+class BenchmarkRunner {
+ public:
+ BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
+ std::vector<BenchmarkReporter::Run>* complexity_reports_)
+ : b(b_),
+ complexity_reports(*complexity_reports_),
+ min_time(!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time),
+ repeats(b.repetitions != 0 ? b.repetitions
: FLAGS_benchmark_repetitions),
- has_explicit_iteration_count(b.iterations() != 0),
- pool(b.threads() - 1),
- iters(has_explicit_iteration_count ? b.iterations() : 1),
- perf_counters_measurement(
- PerfCounters::Create(StrSplit(FLAGS_benchmark_perf_counters, ','))),
- perf_counters_measurement_ptr(perf_counters_measurement.IsValid()
- ? &perf_counters_measurement
- : nullptr) {
- run_results.display_report_aggregates_only =
- (FLAGS_benchmark_report_aggregates_only ||
- FLAGS_benchmark_display_aggregates_only);
- run_results.file_report_aggregates_only =
- FLAGS_benchmark_report_aggregates_only;
- if (b.aggregation_report_mode() != internal::ARM_Unspecified) {
+ has_explicit_iteration_count(b.iterations != 0),
+ pool(b.threads - 1),
+ iters(has_explicit_iteration_count ? b.iterations : 1) {
run_results.display_report_aggregates_only =
- (b.aggregation_report_mode() &
- internal::ARM_DisplayReportAggregatesOnly);
+ (FLAGS_benchmark_report_aggregates_only ||
+ FLAGS_benchmark_display_aggregates_only);
run_results.file_report_aggregates_only =
- (b.aggregation_report_mode() & internal::ARM_FileReportAggregatesOnly);
- CHECK(FLAGS_benchmark_perf_counters.empty() ||
- perf_counters_measurement.IsValid())
- << "Perf counters were requested but could not be set up.";
- }
-}
+ FLAGS_benchmark_report_aggregates_only;
+ if (b.aggregation_report_mode != internal::ARM_Unspecified) {
+ run_results.display_report_aggregates_only =
+ (b.aggregation_report_mode &
+ internal::ARM_DisplayReportAggregatesOnly);
+ run_results.file_report_aggregates_only =
+ (b.aggregation_report_mode & internal::ARM_FileReportAggregatesOnly);
+ }
-BenchmarkRunner::IterationResults BenchmarkRunner::DoNIterations() {
- VLOG(2) << "Running " << b.name().str() << " for " << iters << "\n";
+ for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
+ const bool is_the_first_repetition = repetition_num == 0;
+ DoOneRepetition(is_the_first_repetition);
+ }
- std::unique_ptr<internal::ThreadManager> manager;
- manager.reset(new internal::ThreadManager(b.threads()));
+ // Calculate additional statistics
+ run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
- // Run all but one thread in separate threads
- for (std::size_t ti = 0; ti < pool.size(); ++ti) {
- pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1),
- manager.get(), perf_counters_measurement_ptr);
+ // Maybe calculate complexity report
+ if ((b.complexity != oNone) && b.last_benchmark_instance) {
+ auto additional_run_stats = ComputeBigO(complexity_reports);
+ run_results.aggregates_only.insert(run_results.aggregates_only.end(),
+ additional_run_stats.begin(),
+ additional_run_stats.end());
+ complexity_reports.clear();
+ }
}
- // And run one thread here directly.
- // (If we were asked to run just one thread, we don't create new threads.)
- // Yes, we need to do this here *after* we start the separate threads.
- RunInThread(&b, iters, 0, manager.get(), perf_counters_measurement_ptr);
- // The main thread has finished. Now let's wait for the other threads.
- manager->WaitForAllThreads();
- for (std::thread& thread : pool) thread.join();
+ RunResults&& get_results() { return std::move(run_results); }
- IterationResults i;
- // Acquire the measurements/counters from the manager, UNDER THE LOCK!
- {
- MutexLock l(manager->GetBenchmarkMutex());
- i.results = manager->results;
- }
+ private:
+ RunResults run_results;
- // And get rid of the manager.
- manager.reset();
+ const benchmark::internal::BenchmarkInstance& b;
+ std::vector<BenchmarkReporter::Run>& complexity_reports;
- // Adjust real/manual time stats since they were reported per thread.
- i.results.real_time_used /= b.threads();
- i.results.manual_time_used /= b.threads();
- // If we were measuring whole-process CPU usage, adjust the CPU time too.
- if (b.measure_process_cpu_time()) i.results.cpu_time_used /= b.threads();
+ const double min_time;
+ const int repeats;
+ const bool has_explicit_iteration_count;
- VLOG(2) << "Ran in " << i.results.cpu_time_used << "/"
- << i.results.real_time_used << "\n";
+ std::vector<std::thread> pool;
- // By using KeepRunningBatch a benchmark can iterate more times than
- // requested, so take the iteration count from i.results.
- i.iters = i.results.iterations / b.threads();
+ size_t iters; // preserved between repetitions!
+ // So only the first repetition has to find/calculate it,
+ // the other repetitions will just use that precomputed iteration count.
- // Base decisions off of real time if requested by this benchmark.
- i.seconds = i.results.cpu_time_used;
- if (b.use_manual_time()) {
- i.seconds = i.results.manual_time_used;
- } else if (b.use_real_time()) {
- i.seconds = i.results.real_time_used;
- }
+ struct IterationResults {
+ internal::ThreadManager::Result results;
+ size_t iters;
+ double seconds;
+ };
+ IterationResults DoNIterations() {
+ VLOG(2) << "Running " << b.name << " for " << iters << "\n";
- return i;
-}
+ std::unique_ptr<internal::ThreadManager> manager;
+ manager.reset(new internal::ThreadManager(b.threads));
-IterationCount BenchmarkRunner::PredictNumItersNeeded(
- const IterationResults& i) const {
- // See how much iterations should be increased by.
- // Note: Avoid division by zero with max(seconds, 1ns).
- double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
- // If our last run was at least 10% of FLAGS_benchmark_min_time then we
- // use the multiplier directly.
- // Otherwise we use at most 10 times expansion.
- // NOTE: When the last run was at least 10% of the min time the max
- // expansion should be 14x.
- bool is_significant = (i.seconds / min_time) > 0.1;
- multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
- if (multiplier <= 1.0) multiplier = 2.0;
-
- // So what seems to be the sufficiently-large iteration count? Round up.
- const IterationCount max_next_iters = static_cast<IterationCount>(
- std::lround(std::max(multiplier * static_cast<double>(i.iters),
- static_cast<double>(i.iters) + 1.0)));
- // But we do have *some* sanity limits though..
- const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
-
- VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
- return next_iters; // round up before conversion to integer.
-}
+ // Run all but one thread in separate threads
+ for (std::size_t ti = 0; ti < pool.size(); ++ti) {
+ pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1),
+ manager.get());
+ }
+ // And run one thread here directly.
+ // (If we were asked to run just one thread, we don't create new threads.)
+ // Yes, we need to do this here *after* we start the separate threads.
+ RunInThread(&b, iters, 0, manager.get());
-bool BenchmarkRunner::ShouldReportIterationResults(
- const IterationResults& i) const {
- // Determine if this run should be reported;
- // Either it has run for a sufficient amount of time
- // or because an error was reported.
- return i.results.has_error_ ||
- i.iters >= kMaxIterations || // Too many iterations already.
- i.seconds >= min_time || // The elapsed time is large enough.
- // CPU time is specified but the elapsed real time greatly exceeds
- // the minimum time.
- // Note that user provided timers are except from this sanity check.
- ((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time());
-}
+ // The main thread has finished. Now let's wait for the other threads.
+ manager->WaitForAllThreads();
+ for (std::thread& thread : pool) thread.join();
-void BenchmarkRunner::DoOneRepetition() {
- assert(HasRepeatsRemaining() && "Already done all repetitions?");
-
- const bool is_the_first_repetition = num_repetitions_done == 0;
- IterationResults i;
-
- // We *may* be gradually increasing the length (iteration count)
- // of the benchmark until we decide the results are significant.
- // And once we do, we report those last results and exit.
- // Please do note that the if there are repetitions, the iteration count
- // is *only* calculated for the *first* repetition, and other repetitions
- // simply use that precomputed iteration count.
- for (;;) {
- i = DoNIterations();
-
- // Do we consider the results to be significant?
- // If we are doing repetitions, and the first repetition was already done,
- // it has calculated the correct iteration time, so we have run that very
- // iteration count just now. No need to calculate anything. Just report.
- // Else, the normal rules apply.
- const bool results_are_significant = !is_the_first_repetition ||
- has_explicit_iteration_count ||
- ShouldReportIterationResults(i);
-
- if (results_are_significant) break; // Good, let's report them!
-
- // Nope, bad iteration. Let's re-estimate the hopefully-sufficient
- // iteration count, and run the benchmark again...
-
- iters = PredictNumItersNeeded(i);
- assert(iters > i.iters &&
- "if we did more iterations than we want to do the next time, "
- "then we should have accepted the current iteration run.");
- }
+ IterationResults i;
+ // Acquire the measurements/counters from the manager, UNDER THE LOCK!
+ {
+ MutexLock l(manager->GetBenchmarkMutex());
+ i.results = manager->results;
+ }
- // Oh, one last thing, we need to also produce the 'memory measurements'..
- MemoryManager::Result memory_result;
- IterationCount memory_iterations = 0;
- if (memory_manager != nullptr) {
- // Only run a few iterations to reduce the impact of one-time
- // allocations in benchmarks that are not properly managed.
- memory_iterations = std::min<IterationCount>(16, iters);
- memory_manager->Start();
- std::unique_ptr<internal::ThreadManager> manager;
- manager.reset(new internal::ThreadManager(1));
- RunInThread(&b, memory_iterations, 0, manager.get(),
- perf_counters_measurement_ptr);
- manager->WaitForAllThreads();
+ // And get rid of the manager.
manager.reset();
- memory_manager->Stop(&memory_result);
+ // Adjust real/manual time stats since they were reported per thread.
+ i.results.real_time_used /= b.threads;
+ i.results.manual_time_used /= b.threads;
+
+ VLOG(2) << "Ran in " << i.results.cpu_time_used << "/"
+ << i.results.real_time_used << "\n";
+
+ // So for how long were we running?
+ i.iters = iters;
+ // Base decisions off of real time if requested by this benchmark.
+ i.seconds = i.results.cpu_time_used;
+ if (b.use_manual_time) {
+ i.seconds = i.results.manual_time_used;
+ } else if (b.use_real_time) {
+ i.seconds = i.results.real_time_used;
+ }
+
+ return i;
}
- // Ok, now actualy report.
- BenchmarkReporter::Run report =
- CreateRunReport(b, i.results, memory_iterations, memory_result, i.seconds,
- num_repetitions_done, repeats);
+ size_t PredictNumItersNeeded(const IterationResults& i) const {
+ // See how much iterations should be increased by.
+ // Note: Avoid division by zero with max(seconds, 1ns).
+ double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
+ // If our last run was at least 10% of FLAGS_benchmark_min_time then we
+ // use the multiplier directly.
+ // Otherwise we use at most 10 times expansion.
+ // NOTE: When the last run was at least 10% of the min time the max
+ // expansion should be 14x.
+ bool is_significant = (i.seconds / min_time) > 0.1;
+ multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
+ if (multiplier <= 1.0) multiplier = 2.0;
+
+ // So what seems to be the sufficiently-large iteration count? Round up.
+ const size_t max_next_iters =
+ 0.5 + std::max(multiplier * i.iters, i.iters + 1.0);
+ // But we do have *some* sanity limits though..
+ const size_t next_iters = std::min(max_next_iters, kMaxIterations);
+
+ VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
+ return next_iters; // round up before conversion to integer.
+ }
- if (reports_for_family) {
- ++reports_for_family->num_runs_done;
- if (!report.error_occurred) reports_for_family->Runs.push_back(report);
+ bool ShouldReportIterationResults(const IterationResults& i) const {
+ // Determine if this run should be reported;
+ // Either it has run for a sufficient amount of time
+ // or because an error was reported.
+ return i.results.has_error_ ||
+ i.iters >= kMaxIterations || // Too many iterations already.
+ i.seconds >= min_time || // The elapsed time is large enough.
+ // CPU time is specified but the elapsed real time greatly exceeds
+ // the minimum time.
+ // Note that user provided timers are except from this sanity check.
+ ((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time);
}
- run_results.non_aggregates.push_back(report);
+ void DoOneRepetition(bool is_the_first_repetition) {
+ IterationResults i;
+
+ // We *may* be gradually increasing the length (iteration count)
+ // of the benchmark until we decide the results are significant.
+ // And once we do, we report those last results and exit.
+ // Please do note that the if there are repetitions, the iteration count
+ // is *only* calculated for the *first* repetition, and other repetitions
+ // simply use that precomputed iteration count.
+ for (;;) {
+ i = DoNIterations();
+
+ // Do we consider the results to be significant?
+ // If we are doing repetitions, and the first repetition was already done,
+ // it has calculated the correct iteration time, so we have run that very
+ // iteration count just now. No need to calculate anything. Just report.
+ // Else, the normal rules apply.
+ const bool results_are_significant = !is_the_first_repetition ||
+ has_explicit_iteration_count ||
+ ShouldReportIterationResults(i);
+
+ if (results_are_significant) break; // Good, let's report them!
+
+ // Nope, bad iteration. Let's re-estimate the hopefully-sufficient
+ // iteration count, and run the benchmark again...
+
+ iters = PredictNumItersNeeded(i);
+ assert(iters > i.iters &&
+ "if we did more iterations than we want to do the next time, "
+ "then we should have accepted the current iteration run.");
+ }
- ++num_repetitions_done;
-}
+ // Oh, one last thing, we need to also produce the 'memory measurements'..
+ MemoryManager::Result memory_result;
+ size_t memory_iterations = 0;
+ if (memory_manager != nullptr) {
+ // Only run a few iterations to reduce the impact of one-time
+ // allocations in benchmarks that are not properly managed.
+ memory_iterations = std::min<size_t>(16, iters);
+ memory_manager->Start();
+ std::unique_ptr<internal::ThreadManager> manager;
+ manager.reset(new internal::ThreadManager(1));
+ RunInThread(&b, memory_iterations, 0, manager.get());
+ manager->WaitForAllThreads();
+ manager.reset();
+
+ memory_manager->Stop(&memory_result);
+ }
+
+ // Ok, now actualy report.
+ BenchmarkReporter::Run report = CreateRunReport(
+ b, i.results, memory_iterations, memory_result, i.seconds);
-RunResults&& BenchmarkRunner::GetResults() {
- assert(!HasRepeatsRemaining() && "Did not run all repetitions yet?");
+ if (!report.error_occurred && b.complexity != oNone)
+ complexity_reports.push_back(report);
- // Calculate additional statistics over the repetitions of this instance.
- run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
+ run_results.non_aggregates.push_back(report);
+ }
+};
+
+} // end namespace
- return std::move(run_results);
+RunResults RunBenchmark(
+ const benchmark::internal::BenchmarkInstance& b,
+ std::vector<BenchmarkReporter::Run>* complexity_reports) {
+ internal::BenchmarkRunner r(b, complexity_reports);
+ return r.get_results();
}
} // end namespace internal
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_runner.h b/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_runner.h
index 8a855236b22..96e8282a11a 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_runner.h
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/benchmark_runner.h
@@ -15,13 +15,8 @@
#ifndef BENCHMARK_RUNNER_H_
#define BENCHMARK_RUNNER_H_
-#include <thread>
-#include <vector>
-
#include "benchmark_api_internal.h"
#include "internal_macros.h"
-#include "perf_counters.h"
-#include "thread_manager.h"
DECLARE_double(benchmark_min_time);
@@ -31,8 +26,6 @@ DECLARE_bool(benchmark_report_aggregates_only);
DECLARE_bool(benchmark_display_aggregates_only);
-DECLARE_string(benchmark_perf_counters);
-
namespace benchmark {
namespace internal {
@@ -47,57 +40,9 @@ struct RunResults {
bool file_report_aggregates_only = false;
};
-class BenchmarkRunner {
- public:
- BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
- BenchmarkReporter::PerFamilyRunReports* reports_for_family);
-
- int GetNumRepeats() const { return repeats; }
-
- bool HasRepeatsRemaining() const {
- return GetNumRepeats() != num_repetitions_done;
- }
-
- void DoOneRepetition();
-
- RunResults&& GetResults();
-
- BenchmarkReporter::PerFamilyRunReports* GetReportsForFamily() const {
- return reports_for_family;
- };
-
- private:
- RunResults run_results;
-
- const benchmark::internal::BenchmarkInstance& b;
- BenchmarkReporter::PerFamilyRunReports* reports_for_family;
-
- const double min_time;
- const int repeats;
- const bool has_explicit_iteration_count;
-
- int num_repetitions_done = 0;
-
- std::vector<std::thread> pool;
-
- IterationCount iters; // preserved between repetitions!
- // So only the first repetition has to find/calculate it,
- // the other repetitions will just use that precomputed iteration count.
-
- PerfCountersMeasurement perf_counters_measurement;
- PerfCountersMeasurement* const perf_counters_measurement_ptr;
-
- struct IterationResults {
- internal::ThreadManager::Result results;
- IterationCount iters;
- double seconds;
- };
- IterationResults DoNIterations();
-
- IterationCount PredictNumItersNeeded(const IterationResults& i) const;
-
- bool ShouldReportIterationResults(const IterationResults& i) const;
-};
+RunResults RunBenchmark(
+ const benchmark::internal::BenchmarkInstance& b,
+ std::vector<BenchmarkReporter::Run>* complexity_reports);
} // namespace internal
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/commandlineflags.cc b/gnu/llvm/libcxx/utils/google-benchmark/src/commandlineflags.cc
index 5724aaa2940..734e88bbec6 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/commandlineflags.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/commandlineflags.cc
@@ -14,20 +14,13 @@
#include "commandlineflags.h"
-#include <algorithm>
#include <cctype>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <limits>
-#include <map>
-#include <utility>
-
-#include "../src/string_util.h"
namespace benchmark {
-namespace {
-
// Parses 'str' for a 32-bit signed integer. If successful, writes
// the result to *value and returns true; otherwise leaves *value
// unchanged and returns false.
@@ -82,30 +75,6 @@ bool ParseDouble(const std::string& src_text, const char* str, double* value) {
return true;
}
-// Parses 'str' into KV pairs. If successful, writes the result to *value and
-// returns true; otherwise leaves *value unchanged and returns false.
-bool ParseKvPairs(const std::string& src_text, const char* str,
- std::map<std::string, std::string>* value) {
- std::map<std::string, std::string> kvs;
- for (const auto& kvpair : StrSplit(str, ',')) {
- const auto kv = StrSplit(kvpair, '=');
- if (kv.size() != 2) {
- std::cerr << src_text << " is expected to be a comma-separated list of "
- << "<key>=<value> strings, but actually has value \"" << str
- << "\".\n";
- return false;
- }
- if (!kvs.emplace(kv[0], kv[1]).second) {
- std::cerr << src_text << " is expected to contain unique keys but key \""
- << kv[0] << "\" was repeated.\n";
- return false;
- }
- }
-
- *value = kvs;
- return true;
-}
-
// Returns the name of the environment variable corresponding to the
// given flag. For example, FlagToEnvVar("foo") will return
// "BENCHMARK_FOO" in the open-source version.
@@ -116,59 +85,47 @@ static std::string FlagToEnvVar(const char* flag) {
for (size_t i = 0; i != flag_str.length(); ++i)
env_var += static_cast<char>(::toupper(flag_str.c_str()[i]));
- return env_var;
+ return "BENCHMARK_" + env_var;
}
-} // namespace
-
-bool BoolFromEnv(const char* flag, bool default_val) {
+// Reads and returns the Boolean environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+//
+// The value is considered true iff it's not "0".
+bool BoolFromEnv(const char* flag, bool default_value) {
const std::string env_var = FlagToEnvVar(flag);
- const char* const value_str = getenv(env_var.c_str());
- return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str);
+ const char* const string_value = getenv(env_var.c_str());
+ return string_value == nullptr ? default_value
+ : strcmp(string_value, "0") != 0;
}
-int32_t Int32FromEnv(const char* flag, int32_t default_val) {
+// Reads and returns a 32-bit integer stored in the environment
+// variable corresponding to the given flag; if it isn't set or
+// doesn't represent a valid 32-bit integer, returns default_value.
+int32_t Int32FromEnv(const char* flag, int32_t default_value) {
const std::string env_var = FlagToEnvVar(flag);
- const char* const value_str = getenv(env_var.c_str());
- int32_t value = default_val;
- if (value_str == nullptr ||
- !ParseInt32(std::string("Environment variable ") + env_var, value_str,
- &value)) {
- return default_val;
+ const char* const string_value = getenv(env_var.c_str());
+ if (string_value == nullptr) {
+ // The environment variable is not set.
+ return default_value;
}
- return value;
-}
-double DoubleFromEnv(const char* flag, double default_val) {
- const std::string env_var = FlagToEnvVar(flag);
- const char* const value_str = getenv(env_var.c_str());
- double value = default_val;
- if (value_str == nullptr ||
- !ParseDouble(std::string("Environment variable ") + env_var, value_str,
- &value)) {
- return default_val;
+ int32_t result = default_value;
+ if (!ParseInt32(std::string("Environment variable ") + env_var, string_value,
+ &result)) {
+ std::cout << "The default value " << default_value << " is used.\n";
+ return default_value;
}
- return value;
-}
-const char* StringFromEnv(const char* flag, const char* default_val) {
- const std::string env_var = FlagToEnvVar(flag);
- const char* const value = getenv(env_var.c_str());
- return value == nullptr ? default_val : value;
+ return result;
}
-std::map<std::string, std::string> KvPairsFromEnv(
- const char* flag, std::map<std::string, std::string> default_val) {
+// Reads and returns the string environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+const char* StringFromEnv(const char* flag, const char* default_value) {
const std::string env_var = FlagToEnvVar(flag);
- const char* const value_str = getenv(env_var.c_str());
-
- if (value_str == nullptr) return default_val;
-
- std::map<std::string, std::string> value;
- if (!ParseKvPairs("Environment variable " + env_var, value_str, &value)) {
- return default_val;
- }
- return value;
+ const char* const value = getenv(env_var.c_str());
+ return value == nullptr ? default_value : value;
}
// Parses a string as a command line flag. The string should have
@@ -248,39 +205,14 @@ bool ParseStringFlag(const char* str, const char* flag, std::string* value) {
return true;
}
-bool ParseKeyValueFlag(
- const char* str, const char* flag,
- std::map<std::string, std::string>* value) {
- const char* const value_str = ParseFlagValue(str, flag, false);
-
- if (value_str == nullptr) return false;
-
- for (const auto& kvpair : StrSplit(value_str, ',')) {
- const auto kv = StrSplit(kvpair, '=');
- if (kv.size() != 2) return false;
- value->emplace(kv[0], kv[1]);
- }
-
- return true;
-}
-
bool IsFlag(const char* str, const char* flag) {
return (ParseFlagValue(str, flag, true) != nullptr);
}
bool IsTruthyFlagValue(const std::string& value) {
- if (value.size() == 1) {
- char v = value[0];
- return isalnum(v) &&
- !(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N');
- } else if (!value.empty()) {
- std::string value_lower(value);
- std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(),
- [](char c) { return static_cast<char>(::tolower(c)); });
- return !(value_lower == "false" || value_lower == "no" ||
- value_lower == "off");
- } else
- return true;
+ if (value.empty()) return true;
+ char ch = value[0];
+ return isalnum(ch) &&
+ !(ch == '0' || ch == 'f' || ch == 'F' || ch == 'n' || ch == 'N');
}
-
} // end namespace benchmark
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/commandlineflags.h b/gnu/llvm/libcxx/utils/google-benchmark/src/commandlineflags.h
index 0c988cccb3a..945c9a9fc4a 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/commandlineflags.h
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/commandlineflags.h
@@ -2,7 +2,6 @@
#define BENCHMARK_COMMANDLINEFLAGS_H_
#include <cstdint>
-#include <map>
#include <string>
// Macro for referencing flags.
@@ -11,61 +10,31 @@
// Macros for declaring flags.
#define DECLARE_bool(name) extern bool FLAG(name)
#define DECLARE_int32(name) extern int32_t FLAG(name)
+#define DECLARE_int64(name) extern int64_t FLAG(name)
#define DECLARE_double(name) extern double FLAG(name)
#define DECLARE_string(name) extern std::string FLAG(name)
-#define DECLARE_kvpairs(name) \
- extern std::map<std::string, std::string> FLAG(name)
// Macros for defining flags.
-#define DEFINE_bool(name, default_val) \
- bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val)
-#define DEFINE_int32(name, default_val) \
- int32_t FLAG(name) = benchmark::Int32FromEnv(#name, default_val)
-#define DEFINE_double(name, default_val) \
- double FLAG(name) = benchmark::DoubleFromEnv(#name, default_val)
-#define DEFINE_string(name, default_val) \
- std::string FLAG(name) = benchmark::StringFromEnv(#name, default_val)
-#define DEFINE_kvpairs(name, default_val) \
- std::map<std::string, std::string> FLAG(name) = \
- benchmark::KvPairsFromEnv(#name, default_val)
+#define DEFINE_bool(name, default_val, doc) bool FLAG(name) = (default_val)
+#define DEFINE_int32(name, default_val, doc) int32_t FLAG(name) = (default_val)
+#define DEFINE_int64(name, default_val, doc) int64_t FLAG(name) = (default_val)
+#define DEFINE_double(name, default_val, doc) double FLAG(name) = (default_val)
+#define DEFINE_string(name, default_val, doc) \
+ std::string FLAG(name) = (default_val)
namespace benchmark {
+// Parses 'str' for a 32-bit signed integer. If successful, writes the result
+// to *value and returns true; otherwise leaves *value unchanged and returns
+// false.
+bool ParseInt32(const std::string& src_text, const char* str, int32_t* value);
-// Parses a bool from the environment variable corresponding to the given flag.
-//
-// If the variable exists, returns IsTruthyFlagValue() value; if not,
-// returns the given default value.
+// Parses a bool/Int32/string from the environment variable
+// corresponding to the given Google Test flag.
bool BoolFromEnv(const char* flag, bool default_val);
-
-// Parses an Int32 from the environment variable corresponding to the given
-// flag.
-//
-// If the variable exists, returns ParseInt32() value; if not, returns
-// the given default value.
int32_t Int32FromEnv(const char* flag, int32_t default_val);
-
-// Parses an Double from the environment variable corresponding to the given
-// flag.
-//
-// If the variable exists, returns ParseDouble(); if not, returns
-// the given default value.
double DoubleFromEnv(const char* flag, double default_val);
-
-// Parses a string from the environment variable corresponding to the given
-// flag.
-//
-// If variable exists, returns its value; if not, returns
-// the given default value.
const char* StringFromEnv(const char* flag, const char* default_val);
-// Parses a set of kvpairs from the environment variable corresponding to the
-// given flag.
-//
-// If variable exists, returns its value; if not, returns
-// the given default value.
-std::map<std::string, std::string> KvPairsFromEnv(
- const char* flag, std::map<std::string, std::string> default_val);
-
// Parses a string for a bool flag, in the form of either
// "--flag=value" or "--flag".
//
@@ -77,40 +46,34 @@ std::map<std::string, std::string> KvPairsFromEnv(
// true. On failure, returns false without changing *value.
bool ParseBoolFlag(const char* str, const char* flag, bool* value);
-// Parses a string for an Int32 flag, in the form of "--flag=value".
+// Parses a string for an Int32 flag, in the form of
+// "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseInt32Flag(const char* str, const char* flag, int32_t* value);
-// Parses a string for a Double flag, in the form of "--flag=value".
+// Parses a string for a Double flag, in the form of
+// "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseDoubleFlag(const char* str, const char* flag, double* value);
-// Parses a string for a string flag, in the form of "--flag=value".
+// Parses a string for a string flag, in the form of
+// "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseStringFlag(const char* str, const char* flag, std::string* value);
-// Parses a string for a kvpairs flag in the form "--flag=key=value,key=value"
-//
-// On success, stores the value of the flag in *value and returns true. On
-// failure returns false, though *value may have been mutated.
-bool ParseKeyValueFlag(const char* str, const char* flag,
- std::map<std::string, std::string>* value);
-
// Returns true if the string matches the flag.
bool IsFlag(const char* str, const char* flag);
// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or
-// some non-alphanumeric character. Also returns false if the value matches
-// one of 'no', 'false', 'off' (case-insensitive). As a special case, also
-// returns true if value is the empty string.
+// some non-alphanumeric character. As a special case, also returns true if
+// value is the empty string.
bool IsTruthyFlagValue(const std::string& value);
-
} // end namespace benchmark
#endif // BENCHMARK_COMMANDLINEFLAGS_H_
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/complexity.cc b/gnu/llvm/libcxx/utils/google-benchmark/src/complexity.cc
index 29f7c3b0315..6ef17660c95 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/complexity.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/complexity.cc
@@ -29,23 +29,20 @@ BigOFunc* FittingCurve(BigO complexity) {
static const double kLog2E = 1.44269504088896340736;
switch (complexity) {
case oN:
- return [](IterationCount n) -> double { return static_cast<double>(n); };
+ return [](int64_t n) -> double { return static_cast<double>(n); };
case oNSquared:
- return [](IterationCount n) -> double { return std::pow(n, 2); };
+ return [](int64_t n) -> double { return std::pow(n, 2); };
case oNCubed:
- return [](IterationCount n) -> double { return std::pow(n, 3); };
+ return [](int64_t n) -> double { return std::pow(n, 3); };
case oLogN:
/* Note: can't use log2 because Android's GNU STL lacks it */
- return
- [](IterationCount n) { return kLog2E * log(static_cast<double>(n)); };
+ return [](int64_t n) { return kLog2E * log(static_cast<double>(n)); };
case oNLogN:
/* Note: can't use log2 because Android's GNU STL lacks it */
- return [](IterationCount n) {
- return kLog2E * n * log(static_cast<double>(n));
- };
+ return [](int64_t n) { return kLog2E * n * log(static_cast<double>(n)); };
case o1:
default:
- return [](IterationCount) { return 1.0; };
+ return [](int64_t) { return 1.0; };
}
}
@@ -82,6 +79,7 @@ std::string GetBigOString(BigO complexity) {
LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
const std::vector<double>& time,
BigOFunc* fitting_curve) {
+ double sigma_gn = 0.0;
double sigma_gn_squared = 0.0;
double sigma_time = 0.0;
double sigma_time_gn = 0.0;
@@ -89,6 +87,7 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
// Calculate least square fitting parameter
for (size_t i = 0; i < n.size(); ++i) {
double gn_i = fitting_curve(n[i]);
+ sigma_gn += gn_i;
sigma_gn_squared += gn_i * gn_i;
sigma_time += time[i];
sigma_time_gn += time[i] * gn_i;
@@ -184,21 +183,14 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
}
- // Drop the 'args' when reporting complexity.
- auto run_name = reports[0].run_name;
- run_name.args.clear();
+ std::string run_name = reports[0].benchmark_name().substr(
+ 0, reports[0].benchmark_name().find('/'));
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run big_o;
big_o.run_name = run_name;
- big_o.family_index = reports[0].family_index;
- big_o.per_family_instance_index = reports[0].per_family_instance_index;
big_o.run_type = BenchmarkReporter::Run::RT_Aggregate;
- big_o.repetitions = reports[0].repetitions;
- big_o.repetition_index = Run::no_repetition_index;
- big_o.threads = reports[0].threads;
big_o.aggregate_name = "BigO";
- big_o.report_label = reports[0].report_label;
big_o.iterations = 0;
big_o.real_accumulated_time = result_real.coef;
big_o.cpu_accumulated_time = result_cpu.coef;
@@ -215,15 +207,11 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
// Only add label to mean/stddev if it is same for all runs
Run rms;
rms.run_name = run_name;
- rms.family_index = reports[0].family_index;
- rms.per_family_instance_index = reports[0].per_family_instance_index;
+ big_o.report_label = reports[0].report_label;
rms.run_type = BenchmarkReporter::Run::RT_Aggregate;
rms.aggregate_name = "RMS";
rms.report_label = big_o.report_label;
rms.iterations = 0;
- rms.repetition_index = Run::no_repetition_index;
- rms.repetitions = reports[0].repetitions;
- rms.threads = reports[0].threads;
rms.real_accumulated_time = result_real.rms / multiplier;
rms.cpu_accumulated_time = result_cpu.rms / multiplier;
rms.report_rms = true;
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/console_reporter.cc b/gnu/llvm/libcxx/utils/google-benchmark/src/console_reporter.cc
index 6fd764525e8..ca364727cb4 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/console_reporter.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/console_reporter.cc
@@ -12,21 +12,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include "benchmark/benchmark.h"
+#include "complexity.h"
+#include "counter.h"
+
#include <algorithm>
#include <cstdint>
#include <cstdio>
-#include <cstring>
#include <iostream>
#include <string>
#include <tuple>
#include <vector>
-#include "benchmark/benchmark.h"
#include "check.h"
#include "colorprint.h"
#include "commandlineflags.h"
-#include "complexity.h"
-#include "counter.h"
#include "internal_macros.h"
#include "string_util.h"
#include "timers.h"
@@ -64,8 +64,9 @@ void ConsoleReporter::PrintHeader(const Run& run) {
str += " UserCounters...";
}
}
+ str += "\n";
std::string line = std::string(str.length(), '-');
- GetOutputStream() << line << "\n" << str << "\n" << line << "\n";
+ GetOutputStream() << line << "\n" << str << line << "\n";
}
void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
@@ -156,14 +157,16 @@ void ConsoleReporter::PrintRunData(const Run& result) {
const std::size_t cNameLen = std::max(std::string::size_type(10),
c.first.length());
auto const& s = HumanReadableNumber(c.second.value, c.second.oneK);
- const char* unit = "";
- if (c.second.flags & Counter::kIsRate)
- unit = (c.second.flags & Counter::kInvert) ? "s" : "/s";
if (output_options_ & OO_Tabular) {
- printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(),
- unit);
+ if (c.second.flags & Counter::kIsRate) {
+ printer(Out, COLOR_DEFAULT, " %*s/s", cNameLen - 2, s.c_str());
+ } else {
+ printer(Out, COLOR_DEFAULT, " %*s", cNameLen, s.c_str());
+ }
} else {
- printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), unit);
+ const char* unit = (c.second.flags & Counter::kIsRate) ? "/s" : "";
+ printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(),
+ unit);
}
}
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/counter.cc b/gnu/llvm/libcxx/utils/google-benchmark/src/counter.cc
index cf5b78ee3ac..cb604e060b6 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/counter.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/counter.cc
@@ -17,7 +17,7 @@
namespace benchmark {
namespace internal {
-double Finish(Counter const& c, IterationCount iterations, double cpu_time,
+double Finish(Counter const& c, int64_t iterations, double cpu_time,
double num_threads) {
double v = c.value;
if (c.flags & Counter::kIsRate) {
@@ -32,15 +32,10 @@ double Finish(Counter const& c, IterationCount iterations, double cpu_time,
if (c.flags & Counter::kAvgIterations) {
v /= iterations;
}
-
- if (c.flags & Counter::kInvert) { // Invert is *always* last.
- v = 1.0 / v;
- }
return v;
}
-void Finish(UserCounters* l, IterationCount iterations, double cpu_time,
- double num_threads) {
+void Finish(UserCounters* l, int64_t iterations, double cpu_time, double num_threads) {
for (auto& c : *l) {
c.second.value = Finish(c.second, iterations, cpu_time, num_threads);
}
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/counter.h b/gnu/llvm/libcxx/utils/google-benchmark/src/counter.h
index 1f5a58e31f0..d884e50aa12 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/counter.h
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/counter.h
@@ -12,21 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#ifndef BENCHMARK_COUNTER_H_
-#define BENCHMARK_COUNTER_H_
-
#include "benchmark/benchmark.h"
namespace benchmark {
// these counter-related functions are hidden to reduce API surface.
namespace internal {
-void Finish(UserCounters* l, IterationCount iterations, double time,
- double num_threads);
+void Finish(UserCounters* l, int64_t iterations, double time, double num_threads);
void Increment(UserCounters* l, UserCounters const& r);
bool SameNames(UserCounters const& l, UserCounters const& r);
} // end namespace internal
} // end namespace benchmark
-
-#endif // BENCHMARK_COUNTER_H_
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/csv_reporter.cc b/gnu/llvm/libcxx/utils/google-benchmark/src/csv_reporter.cc
index af2c18fc8a6..d2f1d27eb62 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/csv_reporter.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/csv_reporter.cc
@@ -37,18 +37,6 @@ std::vector<std::string> elements = {
"error_occurred", "error_message"};
} // namespace
-std::string CsvEscape(const std::string & s) {
- std::string tmp;
- tmp.reserve(s.size() + 2);
- for (char c : s) {
- switch (c) {
- case '"' : tmp += "\"\""; break;
- default : tmp += c; break;
- }
- }
- return '"' + tmp + '"';
-}
-
bool CSVReporter::ReportContext(const Context& context) {
PrintBasicContext(&GetErrorStream(), context);
return true;
@@ -101,11 +89,18 @@ void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
void CSVReporter::PrintRunData(const Run& run) {
std::ostream& Out = GetOutputStream();
- Out << CsvEscape(run.benchmark_name()) << ",";
+
+ // Field with embedded double-quote characters must be doubled and the field
+ // delimited with double-quotes.
+ std::string name = run.benchmark_name();
+ ReplaceAll(&name, "\"", "\"\"");
+ Out << '"' << name << "\",";
if (run.error_occurred) {
Out << std::string(elements.size() - 3, ',');
Out << "true,";
- Out << CsvEscape(run.error_message) << "\n";
+ std::string msg = run.error_message;
+ ReplaceAll(&msg, "\"", "\"\"");
+ Out << '"' << msg << "\"\n";
return;
}
@@ -135,7 +130,11 @@ void CSVReporter::PrintRunData(const Run& run) {
}
Out << ",";
if (!run.report_label.empty()) {
- Out << CsvEscape(run.report_label);
+ // Field with embedded double-quote characters must be doubled and the field
+ // delimited with double-quotes.
+ std::string label = run.report_label;
+ ReplaceAll(&label, "\"", "\"\"");
+ Out << "\"" << label << "\"";
}
Out << ",,"; // for error_occurred and error_message
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/cycleclock.h b/gnu/llvm/libcxx/utils/google-benchmark/src/cycleclock.h
index f22ca9f7d29..d5d62c4c7fe 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/cycleclock.h
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/cycleclock.h
@@ -36,7 +36,7 @@
// declarations of some other intrinsics, breaking compilation.
// Therefore, we simply declare __rdtsc ourselves. See also
// http://connect.microsoft.com/VisualStudio/feedback/details/262047
-#if defined(COMPILER_MSVC) && !defined(_M_IX86) && !defined(_M_ARM64)
+#if defined(COMPILER_MSVC) && !defined(_M_IX86)
extern "C" uint64_t __rdtsc();
#pragma intrinsic(__rdtsc)
#endif
@@ -84,21 +84,13 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
return (high << 32) | low;
#elif defined(__powerpc__) || defined(__ppc__)
// This returns a time-base, which is not always precisely a cycle-count.
-#if defined(__powerpc64__) || defined(__ppc64__)
- int64_t tb;
- asm volatile("mfspr %0, 268" : "=r"(tb));
- return tb;
-#else
- uint32_t tbl, tbu0, tbu1;
- asm volatile(
- "mftbu %0\n"
- "mftb %1\n"
- "mftbu %2"
- : "=r"(tbu0), "=r"(tbl), "=r"(tbu1));
- tbl &= -static_cast<int32_t>(tbu0 == tbu1);
- // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is no longer needed)
- return (static_cast<uint64_t>(tbu1) << 32) | tbl;
-#endif
+ int64_t tbl, tbu0, tbu1;
+ asm("mftbu %0" : "=r"(tbu0));
+ asm("mftb %0" : "=r"(tbl));
+ asm("mftbu %0" : "=r"(tbu1));
+ tbl &= -static_cast<int64_t>(tbu0 == tbu1);
+ // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is garbage)
+ return (tbu1 << 32) | tbl;
#elif defined(__sparc__)
int64_t tick;
asm(".byte 0x83, 0x41, 0x00, 0x00");
@@ -114,12 +106,6 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
// when I know it will work. Otherwise, I'll use __rdtsc and hope
// the code is being compiled with a non-ancient compiler.
_asm rdtsc
-#elif defined(COMPILER_MSVC) && defined(_M_ARM64)
- // See https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics?view=vs-2019
- // and https://reviews.llvm.org/D53115
- int64_t virtual_timer_value;
- virtual_timer_value = _ReadStatusReg(ARM64_CNTVCT);
- return virtual_timer_value;
#elif defined(COMPILER_MSVC)
return __rdtsc();
#elif defined(BENCHMARK_OS_NACL)
@@ -167,51 +153,32 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
-#elif defined(__mips__) || defined(__m68k__)
+#elif defined(__mips__)
// mips apparently only allows rdtsc for superusers, so we fall
// back to gettimeofday. It's possible clock_gettime would be better.
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
-#elif defined(__loongarch__)
- struct timeval tv;
- gettimeofday(&tv, nullptr);
- return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#elif defined(__s390__) // Covers both s390 and s390x.
// Return the CPU clock.
uint64_t tsc;
-#if defined(BENCHMARK_OS_ZOS) && defined(COMPILER_IBMXL)
- // z/OS XL compiler HLASM syntax.
- asm(" stck %0" : "=m"(tsc) : : "cc");
-#else
asm("stck %0" : "=Q"(tsc) : : "cc");
-#endif
return tsc;
#elif defined(__riscv) // RISC-V
// Use RDCYCLE (and RDCYCLEH on riscv32)
#if __riscv_xlen == 32
- uint32_t cycles_lo, cycles_hi0, cycles_hi1;
- // This asm also includes the PowerPC overflow handling strategy, as above.
- // Implemented in assembly because Clang insisted on branching.
- asm volatile(
- "rdcycleh %0\n"
- "rdcycle %1\n"
- "rdcycleh %2\n"
- "sub %0, %0, %2\n"
- "seqz %0, %0\n"
- "sub %0, zero, %0\n"
- "and %1, %1, %0\n"
- : "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1));
- return (static_cast<uint64_t>(cycles_hi1) << 32) | cycles_lo;
+ uint64_t cycles_low, cycles_hi0, cycles_hi1;
+ asm("rdcycleh %0" : "=r"(cycles_hi0));
+ asm("rdcycle %0" : "=r"(cycles_lo));
+ asm("rdcycleh %0" : "=r"(cycles_hi1));
+ // This matches the PowerPC overflow detection, above
+ cycles_lo &= -static_cast<int64_t>(cycles_hi0 == cycles_hi1);
+ return (cycles_hi1 << 32) | cycles_lo;
#else
uint64_t cycles;
- asm volatile("rdcycle %0" : "=r"(cycles));
+ asm("rdcycle %0" : "=r"(cycles));
return cycles;
#endif
-#elif defined(__e2k__) || defined(__elbrus__)
- struct timeval tv;
- gettimeofday(&tv, nullptr);
- return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#else
// The soft failover to a generic implementation is automatic only for ARM.
// For other platforms the developer is expected to make an attempt to create
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/internal_macros.h b/gnu/llvm/libcxx/utils/google-benchmark/src/internal_macros.h
index 91f367b894b..5dbf4fd2752 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/internal_macros.h
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/internal_macros.h
@@ -13,11 +13,7 @@
#endif
#if defined(__clang__)
- #if defined(__ibmxl__)
- #if !defined(COMPILER_IBMXL)
- #define COMPILER_IBMXL
- #endif
- #elif !defined(COMPILER_CLANG)
+ #if !defined(COMPILER_CLANG)
#define COMPILER_CLANG
#endif
#elif defined(_MSC_VER)
@@ -62,8 +58,6 @@
#define BENCHMARK_OS_NETBSD 1
#elif defined(__OpenBSD__)
#define BENCHMARK_OS_OPENBSD 1
-#elif defined(__DragonFly__)
- #define BENCHMARK_OS_DRAGONFLY 1
#elif defined(__linux__)
#define BENCHMARK_OS_LINUX 1
#elif defined(__native_client__)
@@ -76,10 +70,6 @@
#define BENCHMARK_OS_FUCHSIA 1
#elif defined (__SVR4) && defined (__sun)
#define BENCHMARK_OS_SOLARIS 1
-#elif defined(__QNX__)
-#define BENCHMARK_OS_QNX 1
-#elif defined(__MVS__)
-#define BENCHMARK_OS_ZOS 1
#endif
#if defined(__ANDROID__) && defined(__GLIBCXX__)
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/json_reporter.cc b/gnu/llvm/libcxx/utils/google-benchmark/src/json_reporter.cc
index 26898456f85..7d01e8e4e31 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/json_reporter.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/json_reporter.cc
@@ -16,7 +16,6 @@
#include "complexity.h"
#include <algorithm>
-#include <cmath>
#include <cstdint>
#include <iomanip> // for setprecision
#include <iostream>
@@ -29,73 +28,39 @@
#include "timers.h"
namespace benchmark {
-namespace internal {
-extern std::map<std::string, std::string>* global_context;
-}
namespace {
-std::string StrEscape(const std::string & s) {
- std::string tmp;
- tmp.reserve(s.size());
- for (char c : s) {
- switch (c) {
- case '\b': tmp += "\\b"; break;
- case '\f': tmp += "\\f"; break;
- case '\n': tmp += "\\n"; break;
- case '\r': tmp += "\\r"; break;
- case '\t': tmp += "\\t"; break;
- case '\\': tmp += "\\\\"; break;
- case '"' : tmp += "\\\""; break;
- default : tmp += c; break;
- }
- }
- return tmp;
-}
-
std::string FormatKV(std::string const& key, std::string const& value) {
- return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
+ return StrFormat("\"%s\": \"%s\"", key.c_str(), value.c_str());
}
std::string FormatKV(std::string const& key, const char* value) {
- return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
+ return StrFormat("\"%s\": \"%s\"", key.c_str(), value);
}
std::string FormatKV(std::string const& key, bool value) {
- return StrFormat("\"%s\": %s", StrEscape(key).c_str(), value ? "true" : "false");
+ return StrFormat("\"%s\": %s", key.c_str(), value ? "true" : "false");
}
std::string FormatKV(std::string const& key, int64_t value) {
std::stringstream ss;
- ss << '"' << StrEscape(key) << "\": " << value;
- return ss.str();
-}
-
-std::string FormatKV(std::string const& key, IterationCount value) {
- std::stringstream ss;
- ss << '"' << StrEscape(key) << "\": " << value;
+ ss << '"' << key << "\": " << value;
return ss.str();
}
std::string FormatKV(std::string const& key, double value) {
std::stringstream ss;
- ss << '"' << StrEscape(key) << "\": ";
+ ss << '"' << key << "\": ";
- if (std::isnan(value))
- ss << (value < 0 ? "-" : "") << "NaN";
- else if (std::isinf(value))
- ss << (value < 0 ? "-" : "") << "Infinity";
- else {
- const auto max_digits10 =
- std::numeric_limits<decltype(value)>::max_digits10;
- const auto max_fractional_digits10 = max_digits10 - 1;
- ss << std::scientific << std::setprecision(max_fractional_digits10)
- << value;
- }
+ const auto max_digits10 = std::numeric_limits<decltype(value)>::max_digits10;
+ const auto max_fractional_digits10 = max_digits10 - 1;
+
+ ss << std::scientific << std::setprecision(max_fractional_digits10) << value;
return ss.str();
}
-int64_t RoundDouble(double v) { return std::lround(v); }
+int64_t RoundDouble(double v) { return static_cast<int64_t>(v + 0.5); }
} // end namespace
@@ -115,7 +80,12 @@ bool JSONReporter::ReportContext(const Context& context) {
out << indent << FormatKV("host_name", context.sys_info.name) << ",\n";
if (Context::executable_name) {
- out << indent << FormatKV("executable", Context::executable_name) << ",\n";
+ // windows uses backslash for its path separator,
+ // which must be escaped in JSON otherwise it blows up conforming JSON
+ // decoders
+ std::string executable_name = Context::executable_name;
+ ReplaceAll(&executable_name, "\\", "\\\\");
+ out << indent << FormatKV("executable", executable_name) << ",\n";
}
CPUInfo const& info = context.cpu_info;
@@ -125,10 +95,8 @@ bool JSONReporter::ReportContext(const Context& context) {
<< FormatKV("mhz_per_cpu",
RoundDouble(info.cycles_per_second / 1000000.0))
<< ",\n";
- if (CPUInfo::Scaling::UNKNOWN != info.scaling) {
- out << indent << FormatKV("cpu_scaling_enabled", info.scaling == CPUInfo::Scaling::ENABLED ? true : false)
- << ",\n";
- }
+ out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled)
+ << ",\n";
out << indent << "\"caches\": [\n";
indent = std::string(6, ' ');
@@ -140,7 +108,7 @@ bool JSONReporter::ReportContext(const Context& context) {
out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level))
<< ",\n";
out << cache_indent
- << FormatKV("size", static_cast<int64_t>(CI.size)) << ",\n";
+ << FormatKV("size", static_cast<int64_t>(CI.size) * 1000u) << ",\n";
out << cache_indent
<< FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing))
<< "\n";
@@ -163,13 +131,6 @@ bool JSONReporter::ReportContext(const Context& context) {
const char build_type[] = "debug";
#endif
out << indent << FormatKV("library_build_type", build_type) << "\n";
-
- if (internal::global_context != nullptr) {
- for (const auto& kv: *internal::global_context) {
- out << indent << FormatKV(kv.first, kv.second) << "\n";
- }
- }
-
// Close context block and open the list of benchmarks.
out << inner_indent << "},\n";
out << inner_indent << "\"benchmarks\": [\n";
@@ -207,11 +168,7 @@ void JSONReporter::PrintRunData(Run const& run) {
std::string indent(6, ' ');
std::ostream& out = GetOutputStream();
out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
- out << indent << FormatKV("family_index", run.family_index) << ",\n";
- out << indent
- << FormatKV("per_family_instance_index", run.per_family_instance_index)
- << ",\n";
- out << indent << FormatKV("run_name", run.run_name.str()) << ",\n";
+ out << indent << FormatKV("run_name", run.run_name) << ",\n";
out << indent << FormatKV("run_type", [&run]() -> const char* {
switch (run.run_type) {
case BenchmarkReporter::Run::RT_Iteration:
@@ -221,12 +178,6 @@ void JSONReporter::PrintRunData(Run const& run) {
}
BENCHMARK_UNREACHABLE();
}()) << ",\n";
- out << indent << FormatKV("repetitions", run.repetitions) << ",\n";
- if (run.run_type != BenchmarkReporter::Run::RT_Aggregate) {
- out << indent << FormatKV("repetition_index", run.repetition_index)
- << ",\n";
- }
- out << indent << FormatKV("threads", run.threads) << ",\n";
if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) {
out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n";
}
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/mutex.h b/gnu/llvm/libcxx/utils/google-benchmark/src/mutex.h
index 9cc414ec467..5f461d05a0c 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/mutex.h
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/mutex.h
@@ -9,60 +9,60 @@
// Enable thread safety attributes only with clang.
// The attributes can be safely erased when compiling with other compilers.
#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES)
-#define THREAD_ANNOTATION_ATTRIBUTE_(x) __attribute__((x))
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
#else
-#define THREAD_ANNOTATION_ATTRIBUTE_(x) // no-op
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
#endif
-#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(capability(x))
+#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x))
-#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE_(scoped_lockable)
+#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
-#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(guarded_by(x))
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
-#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(pt_guarded_by(x))
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
#define ACQUIRED_BEFORE(...) \
- THREAD_ANNOTATION_ATTRIBUTE_(acquired_before(__VA_ARGS__))
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
#define ACQUIRED_AFTER(...) \
- THREAD_ANNOTATION_ATTRIBUTE_(acquired_after(__VA_ARGS__))
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
#define REQUIRES(...) \
- THREAD_ANNOTATION_ATTRIBUTE_(requires_capability(__VA_ARGS__))
+ THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
#define REQUIRES_SHARED(...) \
- THREAD_ANNOTATION_ATTRIBUTE_(requires_shared_capability(__VA_ARGS__))
+ THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
#define ACQUIRE(...) \
- THREAD_ANNOTATION_ATTRIBUTE_(acquire_capability(__VA_ARGS__))
+ THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
#define ACQUIRE_SHARED(...) \
- THREAD_ANNOTATION_ATTRIBUTE_(acquire_shared_capability(__VA_ARGS__))
+ THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
#define RELEASE(...) \
- THREAD_ANNOTATION_ATTRIBUTE_(release_capability(__VA_ARGS__))
+ THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
#define RELEASE_SHARED(...) \
- THREAD_ANNOTATION_ATTRIBUTE_(release_shared_capability(__VA_ARGS__))
+ THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
#define TRY_ACQUIRE(...) \
- THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_capability(__VA_ARGS__))
+ THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
#define TRY_ACQUIRE_SHARED(...) \
- THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_shared_capability(__VA_ARGS__))
+ THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
-#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE_(locks_excluded(__VA_ARGS__))
+#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
-#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(assert_capability(x))
+#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x))
#define ASSERT_SHARED_CAPABILITY(x) \
- THREAD_ANNOTATION_ATTRIBUTE_(assert_shared_capability(x))
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
-#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(lock_returned(x))
+#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
#define NO_THREAD_SAFETY_ANALYSIS \
- THREAD_ANNOTATION_ATTRIBUTE_(no_thread_safety_analysis)
+ THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
namespace benchmark {
@@ -71,7 +71,7 @@ typedef std::condition_variable Condition;
// NOTE: Wrappers for std::mutex and std::unique_lock are provided so that
// we can annotate them with thread safety attributes and use the
// -Wthread-safety warning with clang. The standard library types cannot be
-// used directly because they do not provide the required annotations.
+// used directly because they do not provided the required annotations.
class CAPABILITY("mutex") Mutex {
public:
Mutex() {}
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/reporter.cc b/gnu/llvm/libcxx/utils/google-benchmark/src/reporter.cc
index 14dd40dc72f..59bc5f71023 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/reporter.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/reporter.cc
@@ -18,8 +18,6 @@
#include <cstdlib>
#include <iostream>
-#include <map>
-#include <string>
#include <tuple>
#include <vector>
@@ -27,9 +25,6 @@
#include "string_util.h"
namespace benchmark {
-namespace internal {
-extern std::map<std::string, std::string>* global_context;
-}
BenchmarkReporter::BenchmarkReporter()
: output_stream_(&std::cout), error_stream_(&std::cerr) {}
@@ -54,7 +49,7 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
Out << "CPU Caches:\n";
for (auto &CInfo : info.caches) {
Out << " L" << CInfo.level << " " << CInfo.type << " "
- << (CInfo.size / 1024) << " KiB";
+ << (CInfo.size / 1000) << "K";
if (CInfo.num_sharing != 0)
Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")";
Out << "\n";
@@ -69,13 +64,7 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
Out << "\n";
}
- if (internal::global_context != nullptr) {
- for (const auto& kv: *internal::global_context) {
- Out << kv.first << ": " << kv.second << "\n";
- }
- }
-
- if (CPUInfo::Scaling::ENABLED == info.scaling) {
+ if (info.scaling_enabled) {
Out << "***WARNING*** CPU scaling is enabled, the benchmark "
"real time measurements may be noisy and will incur extra "
"overhead.\n";
@@ -94,7 +83,7 @@ BenchmarkReporter::Context::Context()
: cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get()) {}
std::string BenchmarkReporter::Run::benchmark_name() const {
- std::string name = run_name.str();
+ std::string name = run_name;
if (run_type == RT_Aggregate) {
name += "_" + aggregate_name;
}
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/sleep.cc b/gnu/llvm/libcxx/utils/google-benchmark/src/sleep.cc
index 4609d540ead..1512ac90f7e 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/sleep.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/sleep.cc
@@ -24,10 +24,6 @@
#include <windows.h>
#endif
-#ifdef BENCHMARK_OS_ZOS
-#include <unistd.h>
-#endif
-
namespace benchmark {
#ifdef BENCHMARK_OS_WINDOWS
// Window's Sleep takes milliseconds argument.
@@ -37,23 +33,11 @@ void SleepForSeconds(double seconds) {
}
#else // BENCHMARK_OS_WINDOWS
void SleepForMicroseconds(int microseconds) {
-#ifdef BENCHMARK_OS_ZOS
- // z/OS does not support nanosleep. Instead call sleep() and then usleep() to
- // sleep for the remaining microseconds because usleep() will fail if its
- // argument is greater than 1000000.
- div_t sleepTime = div(microseconds, kNumMicrosPerSecond);
- int seconds = sleepTime.quot;
- while (seconds != 0)
- seconds = sleep(seconds);
- while (usleep(sleepTime.rem) == -1 && errno == EINTR)
- ;
-#else
struct timespec sleep_time;
sleep_time.tv_sec = microseconds / kNumMicrosPerSecond;
sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro;
while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR)
; // Ignore signals and wait for the full interval to elapse.
-#endif
}
void SleepForMilliseconds(int milliseconds) {
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/statistics.cc b/gnu/llvm/libcxx/utils/google-benchmark/src/statistics.cc
index 57472b9ff99..e821aec18b7 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/statistics.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/statistics.cc
@@ -97,7 +97,7 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
// All repetitions should be run with the same number of iterations so we
// can take this information from the first benchmark.
- const IterationCount run_iterations = reports.front().iterations;
+ int64_t const run_iterations = reports.front().iterations;
// create stats for user counters
struct CounterStat {
Counter c;
@@ -147,13 +147,8 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
for (const auto& Stat : *reports[0].statistics) {
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run data;
- data.run_name = reports[0].run_name;
- data.family_index = reports[0].family_index;
- data.per_family_instance_index = reports[0].per_family_instance_index;
+ data.run_name = reports[0].benchmark_name();
data.run_type = BenchmarkReporter::Run::RT_Aggregate;
- data.threads = reports[0].threads;
- data.repetitions = reports[0].repetitions;
- data.repetition_index = Run::no_repetition_index;
data.aggregate_name = Stat.name_;
data.report_label = report_label;
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/string_util.cc b/gnu/llvm/libcxx/utils/google-benchmark/src/string_util.cc
index 3551418174f..05ac5b4ea36 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/string_util.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/string_util.cc
@@ -1,9 +1,6 @@
#include "string_util.h"
#include <array>
-#ifdef BENCHMARK_STL_ANDROID_GNUSTL
-#include <cerrno>
-#endif
#include <cmath>
#include <cstdarg>
#include <cstdio>
@@ -163,17 +160,13 @@ std::string StrFormat(const char* format, ...) {
return tmp;
}
-std::vector<std::string> StrSplit(const std::string& str, char delim) {
- if (str.empty()) return {};
- std::vector<std::string> ret;
- size_t first = 0;
- size_t next = str.find(delim);
- for (; next != std::string::npos;
- first = next + 1, next = str.find(delim, first)) {
- ret.push_back(str.substr(first, next - first));
+void ReplaceAll(std::string* str, const std::string& from,
+ const std::string& to) {
+ std::size_t start = 0;
+ while ((start = str->find(from, start)) != std::string::npos) {
+ str->replace(start, from.length(), to);
+ start += to.length();
}
- ret.push_back(str.substr(first));
- return ret;
}
#ifdef BENCHMARK_STL_ANDROID_GNUSTL
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/string_util.h b/gnu/llvm/libcxx/utils/google-benchmark/src/string_util.h
index 6bc28b6912a..fc5f8b0304b 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/string_util.h
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/string_util.h
@@ -12,9 +12,7 @@ void AppendHumanReadable(int n, std::string* str);
std::string HumanReadableNumber(double n, double one_k = 1024.0);
-#if defined(__MINGW32__)
-__attribute__((format(__MINGW_PRINTF_FORMAT, 1, 2)))
-#elif defined(__GNUC__)
+#ifdef __GNUC__
__attribute__((format(printf, 1, 2)))
#endif
std::string
@@ -37,7 +35,8 @@ inline std::string StrCat(Args&&... args) {
return ss.str();
}
-std::vector<std::string> StrSplit(const std::string& str, char delim);
+void ReplaceAll(std::string* str, const std::string& from,
+ const std::string& to);
#ifdef BENCHMARK_STL_ANDROID_GNUSTL
/*
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/sysinfo.cc b/gnu/llvm/libcxx/utils/google-benchmark/src/sysinfo.cc
index c1969ea2d3f..c0c07e5e62a 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/sysinfo.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/sysinfo.cc
@@ -29,8 +29,7 @@
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <unistd.h>
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \
- defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD || \
- defined BENCHMARK_OS_DRAGONFLY
+ defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD
#define BENCHMARK_HAS_SYSCTL
#include <sys/sysctl.h>
#endif
@@ -38,9 +37,6 @@
#if defined(BENCHMARK_OS_SOLARIS)
#include <kstat.h>
#endif
-#if defined(BENCHMARK_OS_QNX)
-#include <sys/syspage.h>
-#endif
#include <algorithm>
#include <array>
@@ -58,7 +54,6 @@
#include <memory>
#include <sstream>
#include <locale>
-#include <utility>
#include "check.h"
#include "cycleclock.h"
@@ -211,12 +206,9 @@ bool ReadFromFile(std::string const& fname, ArgT* arg) {
return f.good();
}
-CPUInfo::Scaling CpuScaling(int num_cpus) {
+bool CpuScalingEnabled(int num_cpus) {
// We don't have a valid CPU count, so don't even bother.
- if (num_cpus <= 0) return CPUInfo::Scaling::UNKNOWN;
-#ifdef BENCHMARK_OS_QNX
- return CPUInfo::Scaling::UNKNOWN;
-#endif
+ if (num_cpus <= 0) return false;
#ifndef BENCHMARK_OS_WINDOWS
// On Linux, the CPUfreq subsystem exposes CPU information as files on the
// local file system. If reading the exported files fails, then we may not be
@@ -225,11 +217,10 @@ CPUInfo::Scaling CpuScaling(int num_cpus) {
for (int cpu = 0; cpu < num_cpus; ++cpu) {
std::string governor_file =
StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor");
- if (ReadFromFile(governor_file, &res) && res != "performance") return CPUInfo::Scaling::ENABLED;
+ if (ReadFromFile(governor_file, &res) && res != "performance") return true;
}
- return CPUInfo::Scaling::DISABLED;
#endif
- return CPUInfo::Scaling::UNKNOWN;
+ return false;
}
int CountSetBitsInCPUMap(std::string Val) {
@@ -273,7 +264,7 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesFromKVFS() {
else if (f && suffix != "K")
PrintErrorAndDie("Invalid cache size format: Expected bytes ", suffix);
else if (suffix == "K")
- info.size *= 1024;
+ info.size *= 1000;
}
if (!ReadFromFile(StrCat(FPath, "type"), &info.type))
PrintErrorAndDie("Failed to read from file ", FPath, "type");
@@ -365,42 +356,6 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
}
return res;
}
-#elif BENCHMARK_OS_QNX
-std::vector<CPUInfo::CacheInfo> GetCacheSizesQNX() {
- std::vector<CPUInfo::CacheInfo> res;
- struct cacheattr_entry *cache = SYSPAGE_ENTRY(cacheattr);
- uint32_t const elsize = SYSPAGE_ELEMENT_SIZE(cacheattr);
- int num = SYSPAGE_ENTRY_SIZE(cacheattr) / elsize ;
- for(int i = 0; i < num; ++i ) {
- CPUInfo::CacheInfo info;
- switch (cache->flags){
- case CACHE_FLAG_INSTR :
- info.type = "Instruction";
- info.level = 1;
- break;
- case CACHE_FLAG_DATA :
- info.type = "Data";
- info.level = 1;
- break;
- case CACHE_FLAG_UNIFIED :
- info.type = "Unified";
- info.level = 2;
- break;
- case CACHE_FLAG_SHARED :
- info.type = "Shared";
- info.level = 3;
- break;
- default :
- continue;
- break;
- }
- info.size = cache->line_size * cache->num_lines;
- info.num_sharing = 0;
- res.push_back(std::move(info));
- cache = SYSPAGE_ARRAY_ADJ_OFFSET(cacheattr, cache, elsize);
- }
- return res;
-}
#endif
std::vector<CPUInfo::CacheInfo> GetCacheSizes() {
@@ -408,8 +363,6 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizes() {
return GetCacheSizesMacOSX();
#elif defined(BENCHMARK_OS_WINDOWS)
return GetCacheSizesWindows();
-#elif defined(BENCHMARK_OS_QNX)
- return GetCacheSizesQNX();
#else
return GetCacheSizesFromKVFS();
#endif
@@ -434,20 +387,9 @@ std::string GetSystemName() {
#endif
return str;
#else // defined(BENCHMARK_OS_WINDOWS)
-#ifndef HOST_NAME_MAX
-#ifdef BENCHMARK_HAS_SYSCTL // BSD/Mac Doesnt have HOST_NAME_MAX defined
-#define HOST_NAME_MAX 64
-#elif defined(BENCHMARK_OS_NACL)
-#define HOST_NAME_MAX 64
-#elif defined(BENCHMARK_OS_QNX)
-#define HOST_NAME_MAX 154
-#elif defined(BENCHMARK_OS_RTEMS)
-#define HOST_NAME_MAX 256
-#else
-#warning "HOST_NAME_MAX not defined. using 64"
+#ifdef BENCHMARK_OS_MACOSX //Mac Doesnt have HOST_NAME_MAX defined
#define HOST_NAME_MAX 64
#endif
-#endif // def HOST_NAME_MAX
char hostname[HOST_NAME_MAX];
int retVal = gethostname(hostname, HOST_NAME_MAX);
if (retVal != 0) return std::string("");
@@ -479,8 +421,6 @@ int GetNumCPUs() {
strerror(errno));
}
return NumCPU;
-#elif defined(BENCHMARK_OS_QNX)
- return static_cast<int>(_syspage_ptr->num_cpu);
#else
int NumCPUs = 0;
int MaxID = -1;
@@ -530,11 +470,7 @@ int GetNumCPUs() {
BENCHMARK_UNREACHABLE();
}
-double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) {
- // Currently, scaling is only used on linux path here,
- // suppress diagnostics about it being unused on other paths.
- (void)scaling;
-
+double GetCPUCyclesPerSecond() {
#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
long freq;
@@ -545,15 +481,8 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) {
// cannot always be relied upon. The same reasons apply to /proc/cpuinfo as
// well.
if (ReadFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)
- // If CPU scaling is disabled, use the the *current* frequency.
- // Note that we specifically don't want to read cpuinfo_cur_freq,
- // because it is only readable by root.
- || (scaling == CPUInfo::Scaling::DISABLED &&
- ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq",
- &freq))
- // Otherwise, if CPU scaling may be in effect, we want to use
- // the *maximum* frequency, not whatever CPU speed some random processor
- // happens to be using now.
+ // If CPU scaling is in effect, we want to use the *maximum* frequency,
+ // not whatever CPU speed some random processor happens to be using now.
|| ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
&freq)) {
// The value is in kHz (as the file name suggests). For example, on a
@@ -619,8 +548,6 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) {
"machdep.tsc_freq";
#elif defined BENCHMARK_OS_OPENBSD
"hw.cpuspeed";
-#elif defined BENCHMARK_OS_DRAGONFLY
- "hw.tsc_frequency";
#else
"hw.cpufrequency";
#endif
@@ -673,9 +600,6 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) {
double clock_hz = knp->value.ui64;
kstat_close(kc);
return clock_hz;
-#elif defined (BENCHMARK_OS_QNX)
- return static_cast<double>((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) *
- (int64_t)(1000 * 1000));
#endif
// If we've fallen through, attempt to roughly estimate the CPU clock rate.
const int estimate_time_ms = 1000;
@@ -685,10 +609,9 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) {
}
std::vector<double> GetLoadAvg() {
-#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \
- defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \
- defined BENCHMARK_OS_OPENBSD || defined BENCHMARK_OS_DRAGONFLY) && \
- !defined(__ANDROID__)
+#if defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \
+ defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \
+ defined BENCHMARK_OS_OPENBSD
constexpr int kMaxSamples = 3;
std::vector<double> res(kMaxSamples, 0.0);
const int nelem = getloadavg(res.data(), kMaxSamples);
@@ -712,11 +635,12 @@ const CPUInfo& CPUInfo::Get() {
CPUInfo::CPUInfo()
: num_cpus(GetNumCPUs()),
- scaling(CpuScaling(num_cpus)),
- cycles_per_second(GetCPUCyclesPerSecond(scaling)),
+ cycles_per_second(GetCPUCyclesPerSecond()),
caches(GetCacheSizes()),
+ scaling_enabled(CpuScalingEnabled(num_cpus)),
load_avg(GetLoadAvg()) {}
+
const SystemInfo& SystemInfo::Get() {
static const SystemInfo* info = new SystemInfo();
return *info;
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/thread_manager.h b/gnu/llvm/libcxx/utils/google-benchmark/src/thread_manager.h
index 28e2dd53aff..6e274c7ea6b 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/thread_manager.h
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/thread_manager.h
@@ -11,7 +11,7 @@ namespace internal {
class ThreadManager {
public:
- explicit ThreadManager(int num_threads)
+ ThreadManager(int num_threads)
: alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
@@ -38,7 +38,7 @@ class ThreadManager {
public:
struct Result {
- IterationCount iterations = 0;
+ int64_t iterations = 0;
double real_time_used = 0;
double cpu_time_used = 0;
double manual_time_used = 0;
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/thread_timer.h b/gnu/llvm/libcxx/utils/google-benchmark/src/thread_timer.h
index 1703ca0d6f8..eaf108e017d 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/thread_timer.h
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/thread_timer.h
@@ -8,22 +8,14 @@ namespace benchmark {
namespace internal {
class ThreadTimer {
- explicit ThreadTimer(bool measure_process_cpu_time_)
- : measure_process_cpu_time(measure_process_cpu_time_) {}
-
public:
- static ThreadTimer Create() {
- return ThreadTimer(/*measure_process_cpu_time_=*/false);
- }
- static ThreadTimer CreateProcessCpuTime() {
- return ThreadTimer(/*measure_process_cpu_time_=*/true);
- }
+ ThreadTimer() = default;
// Called by each thread
void StartTimer() {
running_ = true;
start_real_time_ = ChronoClockNow();
- start_cpu_time_ = ReadCpuTimerOfChoice();
+ start_cpu_time_ = ThreadCPUUsage();
}
// Called by each thread
@@ -33,8 +25,7 @@ class ThreadTimer {
real_time_used_ += ChronoClockNow() - start_real_time_;
// Floating point error can result in the subtraction producing a negative
// time. Guard against that.
- cpu_time_used_ +=
- std::max<double>(ReadCpuTimerOfChoice() - start_cpu_time_, 0);
+ cpu_time_used_ += std::max<double>(ThreadCPUUsage() - start_cpu_time_, 0);
}
// Called by each thread
@@ -43,32 +34,24 @@ class ThreadTimer {
bool running() const { return running_; }
// REQUIRES: timer is not running
- double real_time_used() const {
+ double real_time_used() {
CHECK(!running_);
return real_time_used_;
}
// REQUIRES: timer is not running
- double cpu_time_used() const {
+ double cpu_time_used() {
CHECK(!running_);
return cpu_time_used_;
}
// REQUIRES: timer is not running
- double manual_time_used() const {
+ double manual_time_used() {
CHECK(!running_);
return manual_time_used_;
}
private:
- double ReadCpuTimerOfChoice() const {
- if (measure_process_cpu_time) return ProcessCPUUsage();
- return ThreadCPUUsage();
- }
-
- // should the thread, or the process, time be measured?
- const bool measure_process_cpu_time;
-
bool running_ = false; // Is the timer running
double start_real_time_ = 0; // If running_
double start_cpu_time_ = 0; // If running_
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/src/timers.cc b/gnu/llvm/libcxx/utils/google-benchmark/src/timers.cc
index af4767dff94..7613ff92c6e 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/src/timers.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/src/timers.cc
@@ -28,8 +28,7 @@
#include <sys/time.h>
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <unistd.h>
-#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_DRAGONFLY || \
- defined BENCHMARK_OS_MACOSX
+#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX
#include <sys/sysctl.h>
#endif
#if defined(BENCHMARK_OS_MACOSX)
@@ -179,75 +178,40 @@ double ThreadCPUUsage() {
#endif
}
-std::string LocalDateTimeString() {
- // Write the local time in RFC3339 format yyyy-mm-ddTHH:MM:SS+/-HH:MM.
+namespace {
+
+std::string DateTimeString(bool local) {
typedef std::chrono::system_clock Clock;
std::time_t now = Clock::to_time_t(Clock::now());
- const std::size_t kTzOffsetLen = 6;
- const std::size_t kTimestampLen = 19;
-
- std::size_t tz_len;
- std::size_t timestamp_len;
- long int offset_minutes;
- char tz_offset_sign = '+';
- // tz_offset is set in one of three ways:
- // * strftime with %z - This either returns empty or the ISO 8601 time. The maximum length an
- // ISO 8601 string can be is 7 (e.g. -03:30, plus trailing zero).
- // * snprintf with %c%02li:%02li - The maximum length is 41 (one for %c, up to 19 for %02li,
- // one for :, up to 19 %02li, plus trailing zero).
- // * A fixed string of "-00:00". The maximum length is 7 (-00:00, plus trailing zero).
- //
- // Thus, the maximum size this needs to be is 41.
- char tz_offset[41];
- // Long enough buffer to avoid format-overflow warnings
- char storage[128];
+ const std::size_t kStorageSize = 128;
+ char storage[kStorageSize];
+ std::size_t written;
+ if (local) {
#if defined(BENCHMARK_OS_WINDOWS)
- std::tm *timeinfo_p = ::localtime(&now);
+ written =
+ std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
#else
- std::tm timeinfo;
- std::tm *timeinfo_p = &timeinfo;
- ::localtime_r(&now, &timeinfo);
+ std::tm timeinfo;
+ ::localtime_r(&now, &timeinfo);
+ written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
#endif
-
- tz_len = std::strftime(tz_offset, sizeof(tz_offset), "%z", timeinfo_p);
-
- if (tz_len < kTzOffsetLen && tz_len > 1) {
- // Timezone offset was written. strftime writes offset as +HHMM or -HHMM,
- // RFC3339 specifies an offset as +HH:MM or -HH:MM. To convert, we parse
- // the offset as an integer, then reprint it to a string.
-
- offset_minutes = ::strtol(tz_offset, NULL, 10);
- if (offset_minutes < 0) {
- offset_minutes *= -1;
- tz_offset_sign = '-';
- }
-
- tz_len = ::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li",
- tz_offset_sign, offset_minutes / 100, offset_minutes % 100);
- CHECK(tz_len == kTzOffsetLen);
- ((void)tz_len); // Prevent unused variable warning in optimized build.
} else {
- // Unknown offset. RFC3339 specifies that unknown local offsets should be
- // written as UTC time with -00:00 timezone.
#if defined(BENCHMARK_OS_WINDOWS)
- // Potential race condition if another thread calls localtime or gmtime.
- timeinfo_p = ::gmtime(&now);
+ written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now));
#else
+ std::tm timeinfo;
::gmtime_r(&now, &timeinfo);
+ written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
#endif
-
- strncpy(tz_offset, "-00:00", kTzOffsetLen + 1);
}
-
- timestamp_len = std::strftime(storage, sizeof(storage), "%Y-%m-%dT%H:%M:%S",
- timeinfo_p);
- CHECK(timestamp_len == kTimestampLen);
- // Prevent unused variable warning in optimized build.
- ((void)kTimestampLen);
-
- std::strncat(storage, tz_offset, sizeof(storage) - timestamp_len - 1);
+ CHECK(written < kStorageSize);
+ ((void)written); // prevent unused variable in optimized mode.
return std::string(storage);
}
+} // end namespace
+
+std::string LocalDateTimeString() { return DateTimeString(true); }
+
} // end namespace benchmark
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/AssemblyTests.cmake b/gnu/llvm/libcxx/utils/google-benchmark/test/AssemblyTests.cmake
index 3d078586f1d..8605221ff71 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/AssemblyTests.cmake
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/AssemblyTests.cmake
@@ -43,4 +43,3 @@ macro(add_filecheck_test name)
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
endforeach()
endmacro()
-
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/CMakeLists.txt b/gnu/llvm/libcxx/utils/google-benchmark/test/CMakeLists.txt
index 79cdf53b402..f15ce208189 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/CMakeLists.txt
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/CMakeLists.txt
@@ -38,28 +38,28 @@ add_library(output_test_helper STATIC output_test_helper.cc output_test.h)
macro(compile_benchmark_test name)
add_executable(${name} "${name}.cc")
- target_link_libraries(${name} benchmark::benchmark ${CMAKE_THREAD_LIBS_INIT})
+ target_link_libraries(${name} benchmark ${CMAKE_THREAD_LIBS_INIT})
endmacro(compile_benchmark_test)
macro(compile_benchmark_test_with_main name)
add_executable(${name} "${name}.cc")
- target_link_libraries(${name} benchmark::benchmark_main)
+ target_link_libraries(${name} benchmark_main)
endmacro(compile_benchmark_test_with_main)
macro(compile_output_test name)
add_executable(${name} "${name}.cc" output_test.h)
- target_link_libraries(${name} output_test_helper benchmark::benchmark
+ target_link_libraries(${name} output_test_helper benchmark
${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
endmacro(compile_output_test)
# Demonstration executable
compile_benchmark_test(benchmark_test)
-add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01)
+add_test(benchmark benchmark_test --benchmark_min_time=0.01)
compile_benchmark_test(filter_test)
macro(add_filter_test name filter expect)
- add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect})
- add_test(NAME ${name}_list_only COMMAND filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect})
+ add_test(${name} filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect})
+ add_test(${name}_list_only filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect})
endmacro(add_filter_test)
add_filter_test(filter_simple "Foo" 3)
@@ -82,19 +82,16 @@ add_filter_test(filter_regex_end ".*Ba$" 1)
add_filter_test(filter_regex_end_negative "-.*Ba$" 4)
compile_benchmark_test(options_test)
-add_test(NAME options_benchmarks COMMAND options_test --benchmark_min_time=0.01)
+add_test(options_benchmarks options_test --benchmark_min_time=0.01)
compile_benchmark_test(basic_test)
-add_test(NAME basic_benchmark COMMAND basic_test --benchmark_min_time=0.01)
-
-compile_output_test(repetitions_test)
-add_test(NAME repetitions_benchmark COMMAND repetitions_test --benchmark_min_time=0.01 --benchmark_repetitions=3)
+add_test(basic_benchmark basic_test --benchmark_min_time=0.01)
compile_benchmark_test(diagnostics_test)
-add_test(NAME diagnostics_test COMMAND diagnostics_test --benchmark_min_time=0.01)
+add_test(diagnostics_test diagnostics_test --benchmark_min_time=0.01)
compile_benchmark_test(skip_with_error_test)
-add_test(NAME skip_with_error_test COMMAND skip_with_error_test --benchmark_min_time=0.01)
+add_test(skip_with_error_test skip_with_error_test --benchmark_min_time=0.01)
compile_benchmark_test(donotoptimize_test)
# Some of the issues with DoNotOptimize only occur when optimization is enabled
@@ -102,63 +99,53 @@ check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
if (BENCHMARK_HAS_O3_FLAG)
set_target_properties(donotoptimize_test PROPERTIES COMPILE_FLAGS "-O3")
endif()
-add_test(NAME donotoptimize_test COMMAND donotoptimize_test --benchmark_min_time=0.01)
+add_test(donotoptimize_test donotoptimize_test --benchmark_min_time=0.01)
compile_benchmark_test(fixture_test)
-add_test(NAME fixture_test COMMAND fixture_test --benchmark_min_time=0.01)
+add_test(fixture_test fixture_test --benchmark_min_time=0.01)
compile_benchmark_test(register_benchmark_test)
-add_test(NAME register_benchmark_test COMMAND register_benchmark_test --benchmark_min_time=0.01)
+add_test(register_benchmark_test register_benchmark_test --benchmark_min_time=0.01)
compile_benchmark_test(map_test)
-add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01)
+add_test(map_test map_test --benchmark_min_time=0.01)
compile_benchmark_test(multiple_ranges_test)
-add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01)
-
-compile_benchmark_test(args_product_test)
-add_test(NAME args_product_test COMMAND args_product_test --benchmark_min_time=0.01)
+add_test(multiple_ranges_test multiple_ranges_test --benchmark_min_time=0.01)
compile_benchmark_test_with_main(link_main_test)
-add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01)
+add_test(link_main_test link_main_test --benchmark_min_time=0.01)
compile_output_test(reporter_output_test)
-add_test(NAME reporter_output_test COMMAND reporter_output_test --benchmark_min_time=0.01)
+add_test(reporter_output_test reporter_output_test --benchmark_min_time=0.01)
compile_output_test(templated_fixture_test)
-add_test(NAME templated_fixture_test COMMAND templated_fixture_test --benchmark_min_time=0.01)
+add_test(templated_fixture_test templated_fixture_test --benchmark_min_time=0.01)
compile_output_test(user_counters_test)
-add_test(NAME user_counters_test COMMAND user_counters_test --benchmark_min_time=0.01)
-
-compile_output_test(perf_counters_test)
-add_test(NAME perf_counters_test COMMAND perf_counters_test --benchmark_min_time=0.01 --benchmark_perf_counters=CYCLES,BRANCHES)
-
-compile_output_test(internal_threading_test)
-add_test(NAME internal_threading_test COMMAND internal_threading_test --benchmark_min_time=0.01)
+add_test(user_counters_test user_counters_test --benchmark_min_time=0.01)
compile_output_test(report_aggregates_only_test)
-add_test(NAME report_aggregates_only_test COMMAND report_aggregates_only_test --benchmark_min_time=0.01)
+add_test(report_aggregates_only_test report_aggregates_only_test --benchmark_min_time=0.01)
compile_output_test(display_aggregates_only_test)
-add_test(NAME display_aggregates_only_test COMMAND display_aggregates_only_test --benchmark_min_time=0.01)
+add_test(display_aggregates_only_test display_aggregates_only_test --benchmark_min_time=0.01)
compile_output_test(user_counters_tabular_test)
-add_test(NAME user_counters_tabular_test COMMAND user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01)
+add_test(user_counters_tabular_test user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01)
compile_output_test(user_counters_thousands_test)
-add_test(NAME user_counters_thousands_test COMMAND user_counters_thousands_test --benchmark_min_time=0.01)
+add_test(user_counters_thousands_test user_counters_thousands_test --benchmark_min_time=0.01)
compile_output_test(memory_manager_test)
-add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01)
+add_test(memory_manager_test memory_manager_test --benchmark_min_time=0.01)
check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG)
if (BENCHMARK_HAS_CXX03_FLAG)
compile_benchmark_test(cxx03_test)
set_target_properties(cxx03_test
PROPERTIES
- CXX_STANDARD 98
- CXX_STANDARD_REQUIRED YES)
+ COMPILE_FLAGS "-std=c++03")
# libstdc++ provides different definitions within <map> between dialects. When
# LTO is enabled and -Werror is specified GCC diagnoses this ODR violation
# causing the test to fail to compile. To prevent this we explicitly disable
@@ -169,7 +156,7 @@ if (BENCHMARK_HAS_CXX03_FLAG)
PROPERTIES
LINK_FLAGS "-Wno-odr")
endif()
- add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01)
+ add_test(cxx03 cxx03_test --benchmark_min_time=0.01)
endif()
# Attempt to work around flaky test failures when running on Appveyor servers.
@@ -179,7 +166,7 @@ else()
set(COMPLEXITY_MIN_TIME "0.01")
endif()
compile_output_test(complexity_test)
-add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME})
+add_test(complexity_benchmark complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME})
###############################################################################
# GoogleTest Unit Tests
@@ -188,22 +175,24 @@ add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=
if (BENCHMARK_ENABLE_GTEST_TESTS)
macro(compile_gtest name)
add_executable(${name} "${name}.cc")
- target_link_libraries(${name} benchmark::benchmark
- gmock_main ${CMAKE_THREAD_LIBS_INIT})
+ if (TARGET googletest)
+ add_dependencies(${name} googletest)
+ endif()
+ if (GTEST_INCLUDE_DIRS)
+ target_include_directories(${name} PRIVATE ${GTEST_INCLUDE_DIRS})
+ endif()
+ target_link_libraries(${name} benchmark
+ ${GTEST_BOTH_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
endmacro(compile_gtest)
macro(add_gtest name)
compile_gtest(${name})
- add_test(NAME ${name} COMMAND ${name})
+ add_test(${name} ${name})
endmacro()
add_gtest(benchmark_gtest)
- add_gtest(benchmark_name_gtest)
- add_gtest(benchmark_random_interleaving_gtest)
- add_gtest(commandlineflags_gtest)
add_gtest(statistics_gtest)
add_gtest(string_util_gtest)
- add_gtest(perf_counters_gtest)
endif(BENCHMARK_ENABLE_GTEST_TESTS)
###############################################################################
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/basic_test.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/basic_test.cc
index 33642211e20..d07fbc00b15 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/basic_test.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/basic_test.cc
@@ -98,7 +98,7 @@ BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
void BM_KeepRunning(benchmark::State& state) {
- benchmark::IterationCount iter_count = 0;
+ size_t iter_count = 0;
assert(iter_count == state.iterations());
while (state.KeepRunning()) {
++iter_count;
@@ -108,33 +108,18 @@ void BM_KeepRunning(benchmark::State& state) {
BENCHMARK(BM_KeepRunning);
void BM_KeepRunningBatch(benchmark::State& state) {
- // Choose a batch size >1000 to skip the typical runs with iteration
- // targets of 10, 100 and 1000. If these are not actually skipped the
- // bug would be detectable as consecutive runs with the same iteration
- // count. Below we assert that this does not happen.
- const benchmark::IterationCount batch_size = 1009;
-
- static benchmark::IterationCount prior_iter_count = 0;
- benchmark::IterationCount iter_count = 0;
+ // Choose a prime batch size to avoid evenly dividing max_iterations.
+ const size_t batch_size = 101;
+ size_t iter_count = 0;
while (state.KeepRunningBatch(batch_size)) {
iter_count += batch_size;
}
assert(state.iterations() == iter_count);
-
- // Verify that the iteration count always increases across runs (see
- // comment above).
- assert(iter_count == batch_size // max_iterations == 1
- || iter_count > prior_iter_count); // max_iterations > batch_size
- prior_iter_count = iter_count;
}
-// Register with a fixed repetition count to establish the invariant that
-// the iteration count should always change across runs. This overrides
-// the --benchmark_repetitions command line flag, which would otherwise
-// cause this test to fail if set > 1.
-BENCHMARK(BM_KeepRunningBatch)->Repetitions(1);
+BENCHMARK(BM_KeepRunningBatch);
void BM_RangedFor(benchmark::State& state) {
- benchmark::IterationCount iter_count = 0;
+ size_t iter_count = 0;
for (auto _ : state) {
++iter_count;
}
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/benchmark_gtest.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/benchmark_gtest.cc
index 14a885ba46d..10683b433ab 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/benchmark_gtest.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/benchmark_gtest.cc
@@ -1,15 +1,9 @@
-#include <map>
-#include <string>
#include <vector>
#include "../src/benchmark_register.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-namespace benchmark {
-namespace internal {
-extern std::map<std::string, std::string>* global_context;
-
namespace {
TEST(AddRangeTest, Simple) {
@@ -36,130 +30,4 @@ TEST(AddRangeTest, Advanced64) {
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
}
-TEST(AddRangeTest, FullRange8) {
- std::vector<int8_t> dst;
- AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), 8);
- EXPECT_THAT(dst, testing::ElementsAre(1, 8, 64, 127));
-}
-
-TEST(AddRangeTest, FullRange64) {
- std::vector<int64_t> dst;
- AddRange(&dst, int64_t{1}, std::numeric_limits<int64_t>::max(), 1024);
- EXPECT_THAT(
- dst, testing::ElementsAre(1LL, 1024LL, 1048576LL, 1073741824LL,
- 1099511627776LL, 1125899906842624LL,
- 1152921504606846976LL, 9223372036854775807LL));
-}
-
-TEST(AddRangeTest, NegativeRanges) {
- std::vector<int> dst;
- AddRange(&dst, -8, 0, 2);
- EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0));
-}
-
-TEST(AddRangeTest, StrictlyNegative) {
- std::vector<int> dst;
- AddRange(&dst, -8, -1, 2);
- EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1));
-}
-
-TEST(AddRangeTest, SymmetricNegativeRanges) {
- std::vector<int> dst;
- AddRange(&dst, -8, 8, 2);
- EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0, 1, 2, 4, 8));
-}
-
-TEST(AddRangeTest, SymmetricNegativeRangesOddMult) {
- std::vector<int> dst;
- AddRange(&dst, -30, 32, 5);
- EXPECT_THAT(dst, testing::ElementsAre(-30, -25, -5, -1, 0, 1, 5, 25, 32));
-}
-
-TEST(AddRangeTest, NegativeRangesAsymmetric) {
- std::vector<int> dst;
- AddRange(&dst, -3, 5, 2);
- EXPECT_THAT(dst, testing::ElementsAre(-3, -2, -1, 0, 1, 2, 4, 5));
-}
-
-TEST(AddRangeTest, NegativeRangesLargeStep) {
- // Always include -1, 0, 1 when crossing zero.
- std::vector<int> dst;
- AddRange(&dst, -8, 8, 10);
- EXPECT_THAT(dst, testing::ElementsAre(-8, -1, 0, 1, 8));
-}
-
-TEST(AddRangeTest, ZeroOnlyRange) {
- std::vector<int> dst;
- AddRange(&dst, 0, 0, 2);
- EXPECT_THAT(dst, testing::ElementsAre(0));
-}
-
-TEST(AddRangeTest, ZeroStartingRange) {
- std::vector<int> dst;
- AddRange(&dst, 0, 2, 2);
- EXPECT_THAT(dst, testing::ElementsAre(0, 1, 2));
-}
-
-TEST(AddRangeTest, NegativeRange64) {
- std::vector<int64_t> dst;
- AddRange<int64_t>(&dst, -4, 4, 2);
- EXPECT_THAT(dst, testing::ElementsAre(-4, -2, -1, 0, 1, 2, 4));
-}
-
-TEST(AddRangeTest, NegativeRangePreservesExistingOrder) {
- // If elements already exist in the range, ensure we don't change
- // their ordering by adding negative values.
- std::vector<int64_t> dst = {1, 2, 3};
- AddRange<int64_t>(&dst, -2, 2, 2);
- EXPECT_THAT(dst, testing::ElementsAre(1, 2, 3, -2, -1, 0, 1, 2));
-}
-
-TEST(AddRangeTest, FullNegativeRange64) {
- std::vector<int64_t> dst;
- const auto min = std::numeric_limits<int64_t>::min();
- const auto max = std::numeric_limits<int64_t>::max();
- AddRange(&dst, min, max, 1024);
- EXPECT_THAT(
- dst, testing::ElementsAreArray(std::vector<int64_t>{
- min, -1152921504606846976LL, -1125899906842624LL,
- -1099511627776LL, -1073741824LL, -1048576LL, -1024LL, -1LL, 0LL,
- 1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL,
- 1125899906842624LL, 1152921504606846976LL, max}));
-}
-
-TEST(AddRangeTest, Simple8) {
- std::vector<int8_t> dst;
- AddRange<int8_t>(&dst, 1, 8, 2);
- EXPECT_THAT(dst, testing::ElementsAre(1, 2, 4, 8));
-}
-
-TEST(AddCustomContext, Simple) {
- EXPECT_THAT(global_context, nullptr);
-
- AddCustomContext("foo", "bar");
- AddCustomContext("baz", "qux");
-
- EXPECT_THAT(*global_context,
- testing::UnorderedElementsAre(testing::Pair("foo", "bar"),
- testing::Pair("baz", "qux")));
-
- delete global_context;
- global_context = nullptr;
-}
-
-TEST(AddCustomContext, DuplicateKey) {
- EXPECT_THAT(global_context, nullptr);
-
- AddCustomContext("foo", "bar");
- AddCustomContext("foo", "qux");
-
- EXPECT_THAT(*global_context,
- testing::UnorderedElementsAre(testing::Pair("foo", "bar")));
-
- delete global_context;
- global_context = nullptr;
-}
-
-} // namespace
-} // namespace internal
-} // namespace benchmark
+} // end namespace
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/complexity_test.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/complexity_test.cc
index 0de73c5722b..323ddfe7ac5 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/complexity_test.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/complexity_test.cc
@@ -13,8 +13,7 @@ namespace {
int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
int AddComplexityTest(std::string test_name, std::string big_o_test_name,
- std::string rms_test_name, std::string big_o,
- int family_index) {
+ std::string rms_test_name, std::string big_o) {
SetSubstitutions({{"%name", test_name},
{"%bigo_name", big_o_test_name},
{"%rms_name", rms_test_name},
@@ -26,31 +25,21 @@ int AddComplexityTest(std::string test_name, std::string big_o_test_name,
{{"^%bigo_name %bigo_str %bigo_str[ ]*$"},
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
{"^%rms_name %rms %rms[ ]*$", MR_Next}});
- AddCases(
- TC_JSONOut,
- {{"\"name\": \"%bigo_name\",$"},
- {"\"family_index\": " + std::to_string(family_index) + ",$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
- {"\"run_name\": \"%name\",$", MR_Next},
- {"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": %int,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
- {"\"aggregate_name\": \"BigO\",$", MR_Next},
- {"\"cpu_coefficient\": %float,$", MR_Next},
- {"\"real_coefficient\": %float,$", MR_Next},
- {"\"big_o\": \"%bigo\",$", MR_Next},
- {"\"time_unit\": \"ns\"$", MR_Next},
- {"}", MR_Next},
- {"\"name\": \"%rms_name\",$"},
- {"\"family_index\": " + std::to_string(family_index) + ",$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
- {"\"run_name\": \"%name\",$", MR_Next},
- {"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": %int,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
- {"\"aggregate_name\": \"RMS\",$", MR_Next},
- {"\"rms\": %float$", MR_Next},
- {"}", MR_Next}});
+ AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"},
+ {"\"run_name\": \"%name\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"BigO\",$", MR_Next},
+ {"\"cpu_coefficient\": %float,$", MR_Next},
+ {"\"real_coefficient\": %float,$", MR_Next},
+ {"\"big_o\": \"%bigo\",$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next},
+ {"\"name\": \"%rms_name\",$"},
+ {"\"run_name\": \"%name\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"RMS\",$", MR_Next},
+ {"\"rms\": %float$", MR_Next},
+ {"}", MR_Next}});
AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"},
{"^\"%bigo_name\"", MR_Not},
{"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}});
@@ -73,9 +62,9 @@ void BM_Complexity_O1(benchmark::State& state) {
}
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity();
-BENCHMARK(BM_Complexity_O1)
- ->Range(1, 1 << 18)
- ->Complexity([](benchmark::IterationCount) { return 1.0; });
+BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](int64_t) {
+ return 1.0;
+});
const char *one_test_name = "BM_Complexity_O1";
const char *big_o_1_test_name = "BM_Complexity_O1_BigO";
@@ -89,15 +78,15 @@ const char *lambda_big_o_1 = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
- enum_big_o_1, /*family_index=*/0);
+ enum_big_o_1);
// Add auto enum tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
- auto_big_o_1, /*family_index=*/1);
+ auto_big_o_1);
// Add lambda tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
- lambda_big_o_1, /*family_index=*/2);
+ lambda_big_o_1);
// ========================================================================= //
// --------------------------- Testing BigO O(N) --------------------------- //
@@ -128,9 +117,7 @@ BENCHMARK(BM_Complexity_O_N)
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
- ->Complexity([](benchmark::IterationCount n) -> double {
- return static_cast<double>(n);
- });
+ ->Complexity([](int64_t n) -> double { return static_cast<double>(n); });
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
@@ -144,11 +131,11 @@ const char *lambda_big_o_n = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
- enum_auto_big_o_n, /*family_index=*/3);
+ enum_auto_big_o_n);
// Add lambda tests
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
- lambda_big_o_n, /*family_index=*/4);
+ lambda_big_o_n);
// ========================================================================= //
// ------------------------- Testing BigO O(N*lgN) ------------------------- //
@@ -169,9 +156,7 @@ BENCHMARK(BM_Complexity_O_N_log_N)
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
- ->Complexity([](benchmark::IterationCount n) {
- return kLog2E * n * log(static_cast<double>(n));
- });
+ ->Complexity([](int64_t n) { return kLog2E * n * log(static_cast<double>(n)); });
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
@@ -185,35 +170,11 @@ const char *lambda_big_o_n_lg_n = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
- rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n,
- /*family_index=*/6);
+ rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n);
// Add lambda tests
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
- rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n,
- /*family_index=*/7);
-
-// ========================================================================= //
-// -------- Testing formatting of Complexity with captured args ------------ //
-// ========================================================================= //
-
-void BM_ComplexityCaptureArgs(benchmark::State& state, int n) {
- for (auto _ : state) {
- // This test requires a non-zero CPU time to avoid divide-by-zero
- benchmark::DoNotOptimize(state.iterations());
- }
- state.SetComplexityN(n);
-}
-
-BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100)
- ->Complexity(benchmark::oN)
- ->Ranges({{1, 2}, {3, 4}});
-
-const std::string complexity_capture_name =
- "BM_ComplexityCaptureArgs/capture_test";
-
-ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO",
- complexity_capture_name + "_RMS", "N", /*family_index=*/9);
+ rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n);
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/cxx03_test.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/cxx03_test.cc
index c4c9a52273e..baa9ed9262b 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/cxx03_test.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/cxx03_test.cc
@@ -14,7 +14,7 @@
void BM_empty(benchmark::State& state) {
while (state.KeepRunning()) {
- volatile benchmark::IterationCount x = state.iterations();
+ volatile std::size_t x = state.iterations();
((void)x);
}
}
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/filter_test.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/filter_test.cc
index 1c198913b36..0e27065c155 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/filter_test.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/filter_test.cc
@@ -1,41 +1,36 @@
-#include <algorithm>
+#include "benchmark/benchmark.h"
+
#include <cassert>
#include <cmath>
#include <cstdint>
#include <cstdlib>
+
#include <iostream>
#include <limits>
#include <sstream>
#include <string>
-#include "benchmark/benchmark.h"
-
namespace {
class TestReporter : public benchmark::ConsoleReporter {
public:
- virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE {
+ virtual bool ReportContext(const Context& context) {
return ConsoleReporter::ReportContext(context);
};
- virtual void ReportRuns(const std::vector<Run>& report) BENCHMARK_OVERRIDE {
+ virtual void ReportRuns(const std::vector<Run>& report) {
++count_;
- max_family_index_ =
- std::max<size_t>(max_family_index_, report[0].family_index);
ConsoleReporter::ReportRuns(report);
};
- TestReporter() : count_(0), max_family_index_(0) {}
+ TestReporter() : count_(0) {}
virtual ~TestReporter() {}
size_t GetCount() const { return count_; }
- size_t GetMaxFamilyIndex() const { return max_family_index_; }
-
private:
mutable size_t count_;
- mutable size_t max_family_index_;
};
} // end namespace
@@ -103,15 +98,6 @@ int main(int argc, char **argv) {
<< std::endl;
return -1;
}
-
- const size_t max_family_index = test_reporter.GetMaxFamilyIndex();
- const size_t num_families = reports_count == 0 ? 0 : 1 + max_family_index;
- if (num_families != expected_reports) {
- std::cerr << "ERROR: Expected " << expected_reports
- << " test families to be run but num_families = "
- << num_families << std::endl;
- return -1;
- }
}
return 0;
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/fixture_test.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/fixture_test.cc
index eba0a42d9cb..1462b10f02f 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/fixture_test.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/fixture_test.cc
@@ -4,37 +4,35 @@
#include <cassert>
#include <memory>
-#define FIXTURE_BECHMARK_NAME MyFixture
-
-class FIXTURE_BECHMARK_NAME : public ::benchmark::Fixture {
+class MyFixture : public ::benchmark::Fixture {
public:
- void SetUp(const ::benchmark::State& state) BENCHMARK_OVERRIDE {
+ void SetUp(const ::benchmark::State& state) {
if (state.thread_index == 0) {
assert(data.get() == nullptr);
data.reset(new int(42));
}
}
- void TearDown(const ::benchmark::State& state) BENCHMARK_OVERRIDE {
+ void TearDown(const ::benchmark::State& state) {
if (state.thread_index == 0) {
assert(data.get() != nullptr);
data.reset();
}
}
- ~FIXTURE_BECHMARK_NAME() { assert(data == nullptr); }
+ ~MyFixture() { assert(data == nullptr); }
std::unique_ptr<int> data;
};
-BENCHMARK_F(FIXTURE_BECHMARK_NAME, Foo)(benchmark::State &st) {
+BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) {
assert(data.get() != nullptr);
assert(*data == 42);
for (auto _ : st) {
}
}
-BENCHMARK_DEFINE_F(FIXTURE_BECHMARK_NAME, Bar)(benchmark::State& st) {
+BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) {
if (st.thread_index == 0) {
assert(data.get() != nullptr);
assert(*data == 42);
@@ -45,7 +43,7 @@ BENCHMARK_DEFINE_F(FIXTURE_BECHMARK_NAME, Bar)(benchmark::State& st) {
}
st.SetItemsProcessed(st.range(0));
}
-BENCHMARK_REGISTER_F(FIXTURE_BECHMARK_NAME, Bar)->Arg(42);
-BENCHMARK_REGISTER_F(FIXTURE_BECHMARK_NAME, Bar)->Arg(42)->ThreadPerCpu();
+BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42);
+BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu();
BENCHMARK_MAIN();
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/map_test.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/map_test.cc
index 86391b36016..dbf7982a368 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/map_test.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/map_test.cc
@@ -34,11 +34,11 @@ BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12);
// Using fixtures.
class MapFixture : public ::benchmark::Fixture {
public:
- void SetUp(const ::benchmark::State& st) BENCHMARK_OVERRIDE {
+ void SetUp(const ::benchmark::State& st) {
m = ConstructRandomMap(static_cast<int>(st.range(0)));
}
- void TearDown(const ::benchmark::State&) BENCHMARK_OVERRIDE { m.clear(); }
+ void TearDown(const ::benchmark::State&) { m.clear(); }
std::map<int, int> m;
};
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/memory_manager_test.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/memory_manager_test.cc
index f0c192fcbd0..94be6083795 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/memory_manager_test.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/memory_manager_test.cc
@@ -5,8 +5,8 @@
#include "output_test.h"
class TestMemoryManager : public benchmark::MemoryManager {
- void Start() BENCHMARK_OVERRIDE {}
- void Stop(Result* result) BENCHMARK_OVERRIDE {
+ void Start() {}
+ void Stop(Result* result) {
result->num_allocs = 42;
result->max_bytes_used = 42000;
}
@@ -21,13 +21,8 @@ BENCHMARK(BM_empty);
ADD_CASES(TC_ConsoleOut, {{"^BM_empty %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_empty\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_empty\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -37,7 +32,8 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_empty\",$"},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_empty\",%csv_report$"}});
-int main(int argc, char* argv[]) {
+
+int main(int argc, char *argv[]) {
std::unique_ptr<benchmark::MemoryManager> mm(new TestMemoryManager());
benchmark::RegisterMemoryManager(mm.get());
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc
index 6b61f3af47b..c64acabc25c 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc
@@ -28,7 +28,7 @@ class MultipleRangesFixture : public ::benchmark::Fixture {
{2, 7, 15},
{7, 6, 3}}) {}
- void SetUp(const ::benchmark::State& state) BENCHMARK_OVERRIDE {
+ void SetUp(const ::benchmark::State& state) {
std::vector<int64_t> ranges = {state.range(0), state.range(1),
state.range(2)};
@@ -40,7 +40,8 @@ class MultipleRangesFixture : public ::benchmark::Fixture {
// NOTE: This is not TearDown as we want to check after _all_ runs are
// complete.
virtual ~MultipleRangesFixture() {
- if (actualValues != expectedValues) {
+ assert(actualValues.size() == expectedValues.size());
+ if (actualValues.size() != expectedValues.size()) {
std::cout << "EXPECTED\n";
for (auto v : expectedValues) {
std::cout << "{";
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/options_test.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/options_test.cc
index 9f9a78667c9..fdec69174ee 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/options_test.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/options_test.cc
@@ -25,7 +25,6 @@ BENCHMARK(BM_basic)->Arg(42);
BENCHMARK(BM_basic_slow)->Arg(10)->Unit(benchmark::kNanosecond);
BENCHMARK(BM_basic_slow)->Arg(100)->Unit(benchmark::kMicrosecond);
BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kMillisecond);
-BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kSecond);
BENCHMARK(BM_basic)->Range(1, 8);
BENCHMARK(BM_basic)->RangeMultiplier(2)->Range(1, 8);
BENCHMARK(BM_basic)->DenseRange(10, 15);
@@ -36,16 +35,6 @@ BENCHMARK(BM_basic)->UseRealTime();
BENCHMARK(BM_basic)->ThreadRange(2, 4);
BENCHMARK(BM_basic)->ThreadPerCpu();
BENCHMARK(BM_basic)->Repetitions(3);
-BENCHMARK(BM_basic)
- ->RangeMultiplier(std::numeric_limits<int>::max())
- ->Range(std::numeric_limits<int64_t>::min(),
- std::numeric_limits<int64_t>::max());
-
-// Negative ranges
-BENCHMARK(BM_basic)->Range(-64, -1);
-BENCHMARK(BM_basic)->RangeMultiplier(4)->Range(-8, 8);
-BENCHMARK(BM_basic)->DenseRange(-2, 2, 1);
-BENCHMARK(BM_basic)->Ranges({{-64, 1}, {-8, -1}});
void CustomArgs(benchmark::internal::Benchmark* b) {
for (int i = 0; i < 10; ++i) {
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/output_test.h b/gnu/llvm/libcxx/utils/google-benchmark/test/output_test.h
index 15368f9b683..9385761b214 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/output_test.h
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/output_test.h
@@ -158,7 +158,7 @@ T Results::GetAs(const char* entry_name) const {
// clang-format off
-#define CHECK_RESULT_VALUE_IMPL(entry, getfn, var_type, var_name, relationship, value) \
+#define _CHECK_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value) \
CONCAT(CHECK_, relationship) \
(entry.getfn< var_type >(var_name), (value)) << "\n" \
<< __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \
@@ -169,7 +169,7 @@ T Results::GetAs(const char* entry_name) const {
// check with tolerance. eps_factor is the tolerance window, which is
// interpreted relative to value (eg, 0.1 means 10% of value).
-#define CHECK_FLOAT_RESULT_VALUE_IMPL(entry, getfn, var_type, var_name, relationship, value, eps_factor) \
+#define _CHECK_FLOAT_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value, eps_factor) \
CONCAT(CHECK_FLOAT_, relationship) \
(entry.getfn< var_type >(var_name), (value), (eps_factor) * (value)) << "\n" \
<< __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \
@@ -187,16 +187,16 @@ T Results::GetAs(const char* entry_name) const {
<< "%)"
#define CHECK_RESULT_VALUE(entry, var_type, var_name, relationship, value) \
- CHECK_RESULT_VALUE_IMPL(entry, GetAs, var_type, var_name, relationship, value)
+ _CHECK_RESULT_VALUE(entry, GetAs, var_type, var_name, relationship, value)
#define CHECK_COUNTER_VALUE(entry, var_type, var_name, relationship, value) \
- CHECK_RESULT_VALUE_IMPL(entry, GetCounterAs, var_type, var_name, relationship, value)
+ _CHECK_RESULT_VALUE(entry, GetCounterAs, var_type, var_name, relationship, value)
#define CHECK_FLOAT_RESULT_VALUE(entry, var_name, relationship, value, eps_factor) \
- CHECK_FLOAT_RESULT_VALUE_IMPL(entry, GetAs, double, var_name, relationship, value, eps_factor)
+ _CHECK_FLOAT_RESULT_VALUE(entry, GetAs, double, var_name, relationship, value, eps_factor)
#define CHECK_FLOAT_COUNTER_VALUE(entry, var_name, relationship, value, eps_factor) \
- CHECK_FLOAT_RESULT_VALUE_IMPL(entry, GetCounterAs, double, var_name, relationship, value, eps_factor)
+ _CHECK_FLOAT_RESULT_VALUE(entry, GetCounterAs, double, var_name, relationship, value, eps_factor)
// clang-format on
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/output_test_helper.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/output_test_helper.cc
index b8ef1205744..5dc951d2bca 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/output_test_helper.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/output_test_helper.cc
@@ -48,9 +48,6 @@ SubMap& GetSubstitutions() {
{" %s ", "[ ]+"},
{"%time", "[ ]*" + time_re + "[ ]+ns"},
{"%console_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns [ ]*[0-9]+"},
- {"%console_us_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us [ ]*[0-9]+"},
- {"%console_ms_report", "[ ]*" + time_re + "[ ]+ms [ ]*" + time_re + "[ ]+ms [ ]*[0-9]+"},
- {"%console_s_report", "[ ]*" + time_re + "[ ]+s [ ]*" + time_re + "[ ]+s [ ]*[0-9]+"},
{"%console_time_only_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns"},
{"%console_us_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us [ ]*[0-9]+"},
{"%console_us_time_only_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us"},
@@ -59,8 +56,6 @@ SubMap& GetSubstitutions() {
"items_per_second,label,error_occurred,error_message"},
{"%csv_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,,,"},
{"%csv_us_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",us,,,,,"},
- {"%csv_ms_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ms,,,,,"},
- {"%csv_s_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",s,,,,,"},
{"%csv_bytes_report",
"[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + ",,,,"},
{"%csv_items_report",
@@ -139,7 +134,7 @@ class TestReporter : public benchmark::BenchmarkReporter {
TestReporter(std::vector<benchmark::BenchmarkReporter*> reps)
: reporters_(reps) {}
- virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE {
+ virtual bool ReportContext(const Context& context) {
bool last_ret = false;
bool first = true;
for (auto rep : reporters_) {
@@ -153,10 +148,10 @@ class TestReporter : public benchmark::BenchmarkReporter {
return last_ret;
}
- void ReportRuns(const std::vector<Run>& report) BENCHMARK_OVERRIDE {
+ void ReportRuns(const std::vector<Run>& report) {
for (auto rep : reporters_) rep->ReportRuns(report);
}
- void Finalize() BENCHMARK_OVERRIDE {
+ void Finalize() {
for (auto rep : reporters_) rep->Finalize();
}
@@ -378,12 +373,6 @@ int SetSubstitutions(
return 0;
}
-// Disable deprecated warnings temporarily because we need to reference
-// CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-#endif
void RunOutputTests(int argc, char* argv[]) {
using internal::GetTestCaseList;
benchmark::Initialize(&argc, argv);
@@ -442,10 +431,6 @@ void RunOutputTests(int argc, char* argv[]) {
internal::GetResultsChecker().CheckResults(csv.out_stream);
}
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
-
int SubstrCnt(const std::string& haystack, const std::string& pat) {
if (pat.length() == 0) return 0;
int count = 0;
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/register_benchmark_test.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/register_benchmark_test.cc
index c027eabacae..3ac5b21fb34 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/register_benchmark_test.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/register_benchmark_test.cc
@@ -10,7 +10,7 @@ namespace {
class TestReporter : public benchmark::ConsoleReporter {
public:
- virtual void ReportRuns(const std::vector<Run>& report) BENCHMARK_OVERRIDE {
+ virtual void ReportRuns(const std::vector<Run>& report) {
all_runs_.insert(all_runs_.end(), begin(report), end(report));
ConsoleReporter::ReportRuns(report);
}
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/reporter_output_test.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/reporter_output_test.cc
index 989eb48ecc8..ec6d51b3591 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/reporter_output_test.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/reporter_output_test.cc
@@ -15,7 +15,7 @@ ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next},
static int AddContextCases() {
AddCases(TC_ConsoleErr,
{
- {"^%int-%int-%intT%int:%int:%int[-+]%int:%int$", MR_Default},
+ {"%int[-/]%int[-/]%int %int:%int:%int$", MR_Default},
{"Running .*/reporter_output_test(\\.exe)?$", MR_Next},
{"Run on \\(%int X %float MHz CPU s?\\)", MR_Next},
});
@@ -28,7 +28,8 @@ static int AddContextCases() {
MR_Next},
{"\"num_cpus\": %int,$", MR_Next},
{"\"mhz_per_cpu\": %float,$", MR_Next},
- {"\"caches\": \\[$", MR_Default}});
+ {"\"cpu_scaling_enabled\": ", MR_Next},
+ {"\"caches\": \\[$", MR_Next}});
auto const& Info = benchmark::CPUInfo::Get();
auto const& Caches = Info.caches;
if (!Caches.empty()) {
@@ -37,9 +38,9 @@ static int AddContextCases() {
for (size_t I = 0; I < Caches.size(); ++I) {
std::string num_caches_str =
Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$";
- AddCases(TC_ConsoleErr,
- {{"L%int (Data|Instruction|Unified) %int KiB" + num_caches_str,
- MR_Next}});
+ AddCases(
+ TC_ConsoleErr,
+ {{"L%int (Data|Instruction|Unified) %intK" + num_caches_str, MR_Next}});
AddCases(TC_JSONOut, {{"\\{$", MR_Next},
{"\"type\": \"", MR_Next},
{"\"level\": %int,$", MR_Next},
@@ -71,13 +72,8 @@ BENCHMARK(BM_basic);
ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_basic\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -91,8 +87,6 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
void BM_bytes_per_second(benchmark::State& state) {
for (auto _ : state) {
- // This test requires a non-zero CPU time to avoid divide-by-zero
- benchmark::DoNotOptimize(state.iterations());
}
state.SetBytesProcessed(1);
}
@@ -101,13 +95,8 @@ BENCHMARK(BM_bytes_per_second);
ADD_CASES(TC_ConsoleOut, {{"^BM_bytes_per_second %console_report "
"bytes_per_second=%float[kM]{0,1}/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"},
- {"\"family_index\": 1,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_bytes_per_second\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -122,8 +111,6 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
void BM_items_per_second(benchmark::State& state) {
for (auto _ : state) {
- // This test requires a non-zero CPU time to avoid divide-by-zero
- benchmark::DoNotOptimize(state.iterations());
}
state.SetItemsProcessed(1);
}
@@ -132,13 +119,8 @@ BENCHMARK(BM_items_per_second);
ADD_CASES(TC_ConsoleOut, {{"^BM_items_per_second %console_report "
"items_per_second=%float[kM]{0,1}/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"},
- {"\"family_index\": 2,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_items_per_second\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -160,13 +142,8 @@ BENCHMARK(BM_label);
ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"},
- {"\"family_index\": 3,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_label\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -177,101 +154,6 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some "
"label\"%csv_label_report_end$"}});
// ========================================================================= //
-// ------------------------ Testing Time Label Output ---------------------- //
-// ========================================================================= //
-
-void BM_time_label_nanosecond(benchmark::State& state) {
- for (auto _ : state) {
- }
-}
-BENCHMARK(BM_time_label_nanosecond)->Unit(benchmark::kNanosecond);
-
-ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_nanosecond %console_report$"}});
-ADD_CASES(TC_JSONOut,
- {{"\"name\": \"BM_time_label_nanosecond\",$"},
- {"\"family_index\": 4,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
- {"\"run_name\": \"BM_time_label_nanosecond\",$", MR_Next},
- {"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\"$", MR_Next},
- {"}", MR_Next}});
-ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_nanosecond\",%csv_report$"}});
-
-void BM_time_label_microsecond(benchmark::State& state) {
- for (auto _ : state) {
- }
-}
-BENCHMARK(BM_time_label_microsecond)->Unit(benchmark::kMicrosecond);
-
-ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_microsecond %console_us_report$"}});
-ADD_CASES(TC_JSONOut,
- {{"\"name\": \"BM_time_label_microsecond\",$"},
- {"\"family_index\": 5,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
- {"\"run_name\": \"BM_time_label_microsecond\",$", MR_Next},
- {"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"us\"$", MR_Next},
- {"}", MR_Next}});
-ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_microsecond\",%csv_us_report$"}});
-
-void BM_time_label_millisecond(benchmark::State& state) {
- for (auto _ : state) {
- }
-}
-BENCHMARK(BM_time_label_millisecond)->Unit(benchmark::kMillisecond);
-
-ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_millisecond %console_ms_report$"}});
-ADD_CASES(TC_JSONOut,
- {{"\"name\": \"BM_time_label_millisecond\",$"},
- {"\"family_index\": 6,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
- {"\"run_name\": \"BM_time_label_millisecond\",$", MR_Next},
- {"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ms\"$", MR_Next},
- {"}", MR_Next}});
-ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_millisecond\",%csv_ms_report$"}});
-
-void BM_time_label_second(benchmark::State& state) {
- for (auto _ : state) {
- }
-}
-BENCHMARK(BM_time_label_second)->Unit(benchmark::kSecond);
-
-ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_second %console_s_report$"}});
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_time_label_second\",$"},
- {"\"family_index\": 7,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
- {"\"run_name\": \"BM_time_label_second\",$", MR_Next},
- {"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"s\"$", MR_Next},
- {"}", MR_Next}});
-ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_second\",%csv_s_report$"}});
-
-// ========================================================================= //
// ------------------------ Testing Error Output --------------------------- //
// ========================================================================= //
@@ -283,13 +165,8 @@ void BM_error(benchmark::State& state) {
BENCHMARK(BM_error);
ADD_CASES(TC_ConsoleOut, {{"^BM_error[ ]+ERROR OCCURRED: 'message'$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_error\",$"},
- {"\"family_index\": 8,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_error\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"error_occurred\": true,$", MR_Next},
{"\"error_message\": \"message\",$", MR_Next}});
@@ -307,13 +184,8 @@ void BM_no_arg_name(benchmark::State& state) {
BENCHMARK(BM_no_arg_name)->Arg(3);
ADD_CASES(TC_ConsoleOut, {{"^BM_no_arg_name/3 %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_no_arg_name/3\",$"},
- {"\"family_index\": 9,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_no_arg_name/3\",$", MR_Next},
- {"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next}});
+ {"\"run_type\": \"iteration\",$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}});
// ========================================================================= //
@@ -327,13 +199,8 @@ void BM_arg_name(benchmark::State& state) {
BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3);
ADD_CASES(TC_ConsoleOut, {{"^BM_arg_name/first:3 %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_name/first:3\",$"},
- {"\"family_index\": 10,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_arg_name/first:3\",$", MR_Next},
- {"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next}});
+ {"\"run_type\": \"iteration\",$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}});
// ========================================================================= //
@@ -349,42 +216,11 @@ ADD_CASES(TC_ConsoleOut,
{{"^BM_arg_names/first:2/5/third:4 %console_report$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"},
- {"\"family_index\": 11,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next},
- {"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next}});
+ {"\"run_type\": \"iteration\",$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}});
// ========================================================================= //
-// ------------------------ Testing Name Output ---------------------------- //
-// ========================================================================= //
-
-void BM_name(benchmark::State& state) {
- for (auto _ : state) {
- }
-}
-BENCHMARK(BM_name)->Name("BM_custom_name");
-
-ADD_CASES(TC_ConsoleOut, {{"^BM_custom_name %console_report$"}});
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_custom_name\",$"},
- {"\"family_index\": 12,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
- {"\"run_name\": \"BM_custom_name\",$", MR_Next},
- {"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\"$", MR_Next},
- {"}", MR_Next}});
-ADD_CASES(TC_CSVOut, {{"^\"BM_custom_name\",%csv_report$"}});
-
-// ========================================================================= //
// ------------------------ Testing Big Args Output ------------------------ //
// ========================================================================= //
@@ -402,8 +238,6 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"},
void BM_Complexity_O1(benchmark::State& state) {
for (auto _ : state) {
- // This test requires a non-zero CPU time to avoid divide-by-zero
- benchmark::DoNotOptimize(state.iterations());
}
state.SetComplexityN(state.range(0));
}
@@ -431,46 +265,24 @@ ADD_CASES(TC_ConsoleOut,
{"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"},
{"^BM_Repeat/repeats:2_stddev %console_time_only_report [ ]*2$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"},
- {"\"family_index\": 15,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:2\"", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:2\",$"},
- {"\"family_index\": 15,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"repetition_index\": 1,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:2_mean\",$"},
- {"\"family_index\": 15,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:2_median\",$"},
- {"\"family_index\": 15,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:2_stddev\",$"},
- {"\"family_index\": 15,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"},
@@ -488,54 +300,27 @@ ADD_CASES(TC_ConsoleOut,
{"^BM_Repeat/repeats:3_median %console_time_only_report [ ]*3$"},
{"^BM_Repeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"},
- {"\"family_index\": 16,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3\",$"},
- {"\"family_index\": 16,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"repetition_index\": 1,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3\",$"},
- {"\"family_index\": 16,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"repetition_index\": 2,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3_mean\",$"},
- {"\"family_index\": 16,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3_median\",$"},
- {"\"family_index\": 16,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3_stddev\",$"},
- {"\"family_index\": 16,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
@@ -555,62 +340,30 @@ ADD_CASES(TC_ConsoleOut,
{"^BM_Repeat/repeats:4_median %console_time_only_report [ ]*4$"},
{"^BM_Repeat/repeats:4_stddev %console_time_only_report [ ]*4$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"},
- {"\"family_index\": 17,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 4,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4\",$"},
- {"\"family_index\": 17,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 4,$", MR_Next},
- {"\"repetition_index\": 1,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4\",$"},
- {"\"family_index\": 17,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 4,$", MR_Next},
- {"\"repetition_index\": 2,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4\",$"},
- {"\"family_index\": 17,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 4,$", MR_Next},
- {"\"repetition_index\": 3,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4_mean\",$"},
- {"\"family_index\": 17,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 4,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 4,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4_median\",$"},
- {"\"family_index\": 17,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 4,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 4,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4_stddev\",$"},
- {"\"family_index\": 17,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 4,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 4,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"},
@@ -630,13 +383,8 @@ void BM_RepeatOnce(benchmark::State& state) {
BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly();
ADD_CASES(TC_ConsoleOut, {{"^BM_RepeatOnce/repeats:1 %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"},
- {"\"family_index\": 18,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_RepeatOnce/repeats:1\",$", MR_Next},
- {"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next}});
+ {"\"run_type\": \"iteration\",$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}});
// Test that non-aggregate data is not reported
@@ -654,30 +402,18 @@ ADD_CASES(
ADD_CASES(TC_JSONOut,
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
- {"\"family_index\": 19,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"},
- {"\"family_index\": 19,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"},
- {"\"family_index\": 19,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
@@ -702,30 +438,18 @@ ADD_CASES(
ADD_CASES(TC_JSONOut,
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"},
- {"\"family_index\": 20,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"name\": \"BM_SummaryDisplay/repeats:2_median\",$"},
- {"\"family_index\": 20,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"name\": \"BM_SummaryDisplay/repeats:2_stddev\",$"},
- {"\"family_index\": 20,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next}});
ADD_CASES(TC_CSVOut,
@@ -754,32 +478,20 @@ ADD_CASES(
ADD_CASES(TC_JSONOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
- {"\"family_index\": 21,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"},
- {"\"family_index\": 21,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"},
- {"\"family_index\": 21,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"}});
@@ -828,79 +540,48 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
ADD_CASES(
TC_JSONOut,
{{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
- {"\"family_index\": 22,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
- {"\"family_index\": 22,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"repetition_index\": 1,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
- {"\"family_index\": 22,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"repetition_index\": 2,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"},
- {"\"family_index\": 22,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"},
- {"\"family_index\": 22,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"},
- {"\"family_index\": 22,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"},
- {"\"family_index\": 22,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 3,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}});
@@ -917,39 +598,6 @@ ADD_CASES(
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_\",%csv_report$"}});
// ========================================================================= //
-// ------------------------- Testing StrEscape JSON ------------------------ //
-// ========================================================================= //
-#if 0 // enable when csv testing code correctly handles multi-line fields
-void BM_JSON_Format(benchmark::State& state) {
- state.SkipWithError("val\b\f\n\r\t\\\"with\"es,capes");
- for (auto _ : state) {
- }
-}
-BENCHMARK(BM_JSON_Format);
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_JSON_Format\",$"},
- {"\"family_index\": 23,$", MR_Next},
-{"\"per_family_instance_index\": 0,$", MR_Next},
- {"\"run_name\": \"BM_JSON_Format\",$", MR_Next},
- {"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
- {"\"error_occurred\": true,$", MR_Next},
- {R"("error_message": "val\\b\\f\\n\\r\\t\\\\\\"with\\"es,capes",$)", MR_Next}});
-#endif
-// ========================================================================= //
-// -------------------------- Testing CsvEscape ---------------------------- //
-// ========================================================================= //
-
-void BM_CSV_Format(benchmark::State& state) {
- state.SkipWithError("\"freedom\"");
- for (auto _ : state) {
- }
-}
-BENCHMARK(BM_CSV_Format);
-ADD_CASES(TC_CSVOut, {{"^\"BM_CSV_Format\",,,,,,,,true,\"\"\"freedom\"\"\"$"}});
-
-// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/skip_with_error_test.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/skip_with_error_test.cc
index 827966e9dfe..06579772ff7 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/skip_with_error_test.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/skip_with_error_test.cc
@@ -10,11 +10,11 @@ namespace {
class TestReporter : public benchmark::ConsoleReporter {
public:
- virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE {
+ virtual bool ReportContext(const Context& context) {
return ConsoleReporter::ReportContext(context);
};
- virtual void ReportRuns(const std::vector<Run>& report) BENCHMARK_OVERRIDE {
+ virtual void ReportRuns(const std::vector<Run>& report) {
all_runs_.insert(all_runs_.end(), begin(report), end(report));
ConsoleReporter::ReportRuns(report);
}
@@ -61,12 +61,6 @@ int AddCases(const char* base_name, std::initializer_list<TestCase> const& v) {
} // end namespace
-void BM_error_no_running(benchmark::State& state) {
- state.SkipWithError("error message");
-}
-BENCHMARK(BM_error_no_running);
-ADD_CASES("BM_error_no_running", {{"", true, "error message"}});
-
void BM_error_before_running(benchmark::State& state) {
state.SkipWithError("error message");
while (state.KeepRunning()) {
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/state_assembly_test.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/state_assembly_test.cc
index 7ddbb3b2a92..abe9a4ddb56 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/state_assembly_test.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/state_assembly_test.cc
@@ -25,7 +25,7 @@ extern "C" int test_for_auto_loop() {
for (auto _ : S) {
// CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]:
// CHECK-GNU-NEXT: subq $1, %rbx
- // CHECK-CLANG-NEXT: {{(addq \$1, %rax|incq %rax|addq \$-1, %rbx)}}
+ // CHECK-CLANG-NEXT: {{(addq \$1,|incq)}} %rax
// CHECK-NEXT: jne .L[[LOOP_HEAD]]
benchmark::DoNotOptimize(x);
}
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/statistics_gtest.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/statistics_gtest.cc
index 3ddc72dd7ac..99e314920c5 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/statistics_gtest.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/statistics_gtest.cc
@@ -21,8 +21,8 @@ TEST(StatisticsTest, Median) {
TEST(StatisticsTest, StdDev) {
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0);
- EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({2.5, 2.4, 3.3, 4.2, 5.1}),
- 1.151086443322134);
+ EXPECT_FLOAT_EQ(benchmark::StatisticsStdDev({1.5, 2.4, 3.3, 4.2, 5.1}),
+ 1.42302495);
}
} // end namespace
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/string_util_gtest.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/string_util_gtest.cc
index c7061b409e9..2c5d073f613 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/string_util_gtest.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/string_util_gtest.cc
@@ -3,7 +3,6 @@
//===---------------------------------------------------------------------===//
#include "../src/string_util.h"
-#include "../src/internal_macros.h"
#include "gtest/gtest.h"
namespace {
@@ -61,11 +60,9 @@ TEST(StringUtilTest, stoul) {
EXPECT_EQ(0xBEEFul, benchmark::stoul("BEEF", &pos, 16));
EXPECT_EQ(4ul, pos);
}
-#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
{
ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument);
}
-#endif
}
TEST(StringUtilTest, stoi) {
@@ -109,11 +106,9 @@ TEST(StringUtilTest, stoi) {
EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16));
EXPECT_EQ(4ul, pos);
}
-#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
{
ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument);
}
-#endif
}
TEST(StringUtilTest, stod) {
@@ -143,19 +138,9 @@ TEST(StringUtilTest, stod) {
EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos));
EXPECT_EQ(8ul, pos);
}
-#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
{
ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument);
}
-#endif
-}
-
-TEST(StringUtilTest, StrSplit) {
- EXPECT_EQ(benchmark::StrSplit("", ','), std::vector<std::string>{});
- EXPECT_EQ(benchmark::StrSplit("hello", ','),
- std::vector<std::string>({"hello"}));
- EXPECT_EQ(benchmark::StrSplit("hello,there,is,more", ','),
- std::vector<std::string>({"hello", "there", "is", "more"}));
}
} // end namespace
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc
index 421f27b5cb8..030e98916c3 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc
@@ -7,23 +7,19 @@
// @todo: <jpmag> this checks the full output at once; the rule for
// CounterSet1 was failing because it was not matching "^[-]+$".
// @todo: <jpmag> check that the counters are vertically aligned.
-ADD_CASES(TC_ConsoleOut,
- {
- // keeping these lines long improves readability, so:
- // clang-format off
+ADD_CASES(
+ TC_ConsoleOut,
+ {
+ // keeping these lines long improves readability, so:
+ // clang-format off
{"^[-]+$", MR_Next},
{"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next},
{"^[-]+$", MR_Next},
- {"^BM_Counters_Tabular/repeats:2/threads:1 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
- {"^BM_Counters_Tabular/repeats:2/threads:1 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
- {"^BM_Counters_Tabular/repeats:2/threads:1_mean %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
- {"^BM_Counters_Tabular/repeats:2/threads:1_median %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
- {"^BM_Counters_Tabular/repeats:2/threads:1_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
- {"^BM_Counters_Tabular/repeats:2/threads:2 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
- {"^BM_Counters_Tabular/repeats:2/threads:2 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
- {"^BM_Counters_Tabular/repeats:2/threads:2_mean %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
- {"^BM_Counters_Tabular/repeats:2/threads:2_median %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
- {"^BM_Counters_Tabular/repeats:2/threads:2_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
+ {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
@@ -50,8 +46,8 @@ ADD_CASES(TC_ConsoleOut,
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"},
- // clang-format on
- });
+ // clang-format on
+ });
ADD_CASES(TC_CSVOut, {{"%csv_header,"
"\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}});
@@ -72,144 +68,11 @@ void BM_Counters_Tabular(benchmark::State& state) {
{"Lob", {32, bm::Counter::kAvgThreads}},
});
}
-BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 2)->Repetitions(2);
+BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
- {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
- {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
- MR_Next},
- {"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"Bar\": %float,$", MR_Next},
- {"\"Bat\": %float,$", MR_Next},
- {"\"Baz\": %float,$", MR_Next},
- {"\"Foo\": %float,$", MR_Next},
- {"\"Frob\": %float,$", MR_Next},
- {"\"Lob\": %float$", MR_Next},
- {"}", MR_Next}});
-ADD_CASES(TC_JSONOut,
- {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
- {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
- MR_Next},
- {"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"repetition_index\": 1,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"Bar\": %float,$", MR_Next},
- {"\"Bat\": %float,$", MR_Next},
- {"\"Baz\": %float,$", MR_Next},
- {"\"Foo\": %float,$", MR_Next},
- {"\"Frob\": %float,$", MR_Next},
- {"\"Lob\": %float$", MR_Next},
- {"}", MR_Next}});
-ADD_CASES(TC_JSONOut,
- {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_mean\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
- {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
- MR_Next},
- {"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
- {"\"aggregate_name\": \"mean\",$", MR_Next},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"Bar\": %float,$", MR_Next},
- {"\"Bat\": %float,$", MR_Next},
- {"\"Baz\": %float,$", MR_Next},
- {"\"Foo\": %float,$", MR_Next},
- {"\"Frob\": %float,$", MR_Next},
- {"\"Lob\": %float$", MR_Next},
- {"}", MR_Next}});
-ADD_CASES(TC_JSONOut,
- {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_median\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
- {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
- MR_Next},
- {"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
- {"\"aggregate_name\": \"median\",$", MR_Next},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"Bar\": %float,$", MR_Next},
- {"\"Bat\": %float,$", MR_Next},
- {"\"Baz\": %float,$", MR_Next},
- {"\"Foo\": %float,$", MR_Next},
- {"\"Frob\": %float,$", MR_Next},
- {"\"Lob\": %float$", MR_Next},
- {"}", MR_Next}});
-ADD_CASES(TC_JSONOut,
- {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_stddev\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
- {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
- MR_Next},
- {"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
- {"\"aggregate_name\": \"stddev\",$", MR_Next},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"Bar\": %float,$", MR_Next},
- {"\"Bat\": %float,$", MR_Next},
- {"\"Baz\": %float,$", MR_Next},
- {"\"Foo\": %float,$", MR_Next},
- {"\"Frob\": %float,$", MR_Next},
- {"\"Lob\": %float$", MR_Next},
- {"}", MR_Next}});
-
-ADD_CASES(TC_JSONOut,
- {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 1,$", MR_Next},
- {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
- MR_Next},
- {"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 2,$", MR_Next},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"Bar\": %float,$", MR_Next},
- {"\"Bat\": %float,$", MR_Next},
- {"\"Baz\": %float,$", MR_Next},
- {"\"Foo\": %float,$", MR_Next},
- {"\"Frob\": %float,$", MR_Next},
- {"\"Lob\": %float$", MR_Next},
- {"}", MR_Next}});
-ADD_CASES(TC_JSONOut,
- {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 1,$", MR_Next},
- {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
- MR_Next},
+ {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"},
+ {"\"run_name\": \"BM_Counters_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"repetition_index\": 1,$", MR_Next},
- {"\"threads\": 2,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -221,78 +84,8 @@ ADD_CASES(TC_JSONOut,
{"\"Frob\": %float,$", MR_Next},
{"\"Lob\": %float$", MR_Next},
{"}", MR_Next}});
-ADD_CASES(TC_JSONOut,
- {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_median\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 1,$", MR_Next},
- {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
- MR_Next},
- {"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"threads\": 2,$", MR_Next},
- {"\"aggregate_name\": \"median\",$", MR_Next},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"Bar\": %float,$", MR_Next},
- {"\"Bat\": %float,$", MR_Next},
- {"\"Baz\": %float,$", MR_Next},
- {"\"Foo\": %float,$", MR_Next},
- {"\"Frob\": %float,$", MR_Next},
- {"\"Lob\": %float$", MR_Next},
- {"}", MR_Next}});
-ADD_CASES(TC_JSONOut,
- {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_stddev\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 1,$", MR_Next},
- {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
- MR_Next},
- {"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"threads\": 2,$", MR_Next},
- {"\"aggregate_name\": \"stddev\",$", MR_Next},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"Bar\": %float,$", MR_Next},
- {"\"Bat\": %float,$", MR_Next},
- {"\"Baz\": %float,$", MR_Next},
- {"\"Foo\": %float,$", MR_Next},
- {"\"Frob\": %float,$", MR_Next},
- {"\"Lob\": %float$", MR_Next},
- {"}", MR_Next}});
-ADD_CASES(TC_CSVOut,
- {{"^\"BM_Counters_Tabular/repeats:2/threads:1\",%csv_report,"
- "%float,%float,%float,%float,%float,%float$"}});
-ADD_CASES(TC_CSVOut,
- {{"^\"BM_Counters_Tabular/repeats:2/threads:1\",%csv_report,"
- "%float,%float,%float,%float,%float,%float$"}});
-ADD_CASES(TC_CSVOut,
- {{"^\"BM_Counters_Tabular/repeats:2/threads:1_mean\",%csv_report,"
- "%float,%float,%float,%float,%float,%float$"}});
-ADD_CASES(TC_CSVOut,
- {{"^\"BM_Counters_Tabular/repeats:2/threads:1_median\",%csv_report,"
- "%float,%float,%float,%float,%float,%float$"}});
-ADD_CASES(TC_CSVOut,
- {{"^\"BM_Counters_Tabular/repeats:2/threads:1_stddev\",%csv_report,"
- "%float,%float,%float,%float,%float,%float$"}});
-ADD_CASES(TC_CSVOut,
- {{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report,"
- "%float,%float,%float,%float,%float,%float$"}});
-ADD_CASES(TC_CSVOut,
- {{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report,"
- "%float,%float,%float,%float,%float,%float$"}});
-ADD_CASES(TC_CSVOut,
- {{"^\"BM_Counters_Tabular/repeats:2/threads:2_mean\",%csv_report,"
- "%float,%float,%float,%float,%float,%float$"}});
-ADD_CASES(TC_CSVOut,
- {{"^\"BM_Counters_Tabular/repeats:2/threads:2_median\",%csv_report,"
- "%float,%float,%float,%float,%float,%float$"}});
-ADD_CASES(TC_CSVOut,
- {{"^\"BM_Counters_Tabular/repeats:2/threads:2_stddev\",%csv_report,"
- "%float,%float,%float,%float,%float,%float$"}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/threads:%int\",%csv_report,"
+ "%float,%float,%float,%float,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckTabular(Results const& e) {
@@ -303,10 +96,7 @@ void CheckTabular(Results const& e) {
CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16);
CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32);
}
-CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:1$",
- &CheckTabular);
-CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:2$",
- &CheckTabular);
+CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular);
// ========================================================================= //
// -------------------- Tabular+Rate Counters Output ----------------------- //
@@ -314,8 +104,6 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:2$",
void BM_CounterRates_Tabular(benchmark::State& state) {
for (auto _ : state) {
- // This test requires a non-zero CPU time to avoid divide-by-zero
- benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters.insert({
@@ -330,14 +118,9 @@ void BM_CounterRates_Tabular(benchmark::State& state) {
BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
- {"\"family_index\": 1,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -383,13 +166,8 @@ void BM_CounterSet0_Tabular(benchmark::State& state) {
BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
- {"\"family_index\": 2,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -423,13 +201,8 @@ void BM_CounterSet1_Tabular(benchmark::State& state) {
BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
- {"\"family_index\": 3,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -467,13 +240,8 @@ void BM_CounterSet2_Tabular(benchmark::State& state) {
BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
- {"\"family_index\": 4,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/user_counters_test.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/user_counters_test.cc
index 377bb32ca94..bb0d6b4c5a9 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/user_counters_test.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/user_counters_test.cc
@@ -32,13 +32,8 @@ BENCHMARK(BM_Counters_Simple);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Simple\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -66,8 +61,6 @@ int num_calls1 = 0;
}
void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
for (auto _ : state) {
- // This test requires a non-zero CPU time to avoid divide-by-zero
- benchmark::DoNotOptimize(state.iterations());
}
state.counters["foo"] = 1;
state.counters["bar"] = ++num_calls1;
@@ -80,13 +73,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report "
"foo=%hrfloat items_per_second=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
- {"\"family_index\": 1,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -117,8 +105,6 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec",
void BM_Counters_Rate(benchmark::State& state) {
for (auto _ : state) {
- // This test requires a non-zero CPU time to avoid divide-by-zero
- benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate};
@@ -129,13 +115,8 @@ ADD_CASES(
TC_ConsoleOut,
{{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
- {"\"family_index\": 2,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Rate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -155,93 +136,6 @@ void CheckRate(Results const& e) {
CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
// ========================================================================= //
-// ----------------------- Inverted Counters Output ------------------------ //
-// ========================================================================= //
-
-void BM_Invert(benchmark::State& state) {
- for (auto _ : state) {
- // This test requires a non-zero CPU time to avoid divide-by-zero
- benchmark::DoNotOptimize(state.iterations());
- }
- namespace bm = benchmark;
- state.counters["foo"] = bm::Counter{0.0001, bm::Counter::kInvert};
- state.counters["bar"] = bm::Counter{10000, bm::Counter::kInvert};
-}
-BENCHMARK(BM_Invert);
-ADD_CASES(TC_ConsoleOut,
- {{"^BM_Invert %console_report bar=%hrfloatu foo=%hrfloatk$"}});
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"},
- {"\"family_index\": 3,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
- {"\"run_name\": \"BM_Invert\",$", MR_Next},
- {"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"bar\": %float,$", MR_Next},
- {"\"foo\": %float$", MR_Next},
- {"}", MR_Next}});
-ADD_CASES(TC_CSVOut, {{"^\"BM_Invert\",%csv_report,%float,%float$"}});
-// VS2013 does not allow this function to be passed as a lambda argument
-// to CHECK_BENCHMARK_RESULTS()
-void CheckInvert(Results const& e) {
- CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 10000, 0.0001);
- CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 0.0001, 0.0001);
-}
-CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert);
-
-// ========================================================================= //
-// ------------------------- InvertedRate Counters Output
-// -------------------------- //
-// ========================================================================= //
-
-void BM_Counters_InvertedRate(benchmark::State& state) {
- for (auto _ : state) {
- // This test requires a non-zero CPU time to avoid divide-by-zero
- benchmark::DoNotOptimize(state.iterations());
- }
- namespace bm = benchmark;
- state.counters["foo"] =
- bm::Counter{1, bm::Counter::kIsRate | bm::Counter::kInvert};
- state.counters["bar"] =
- bm::Counter{8192, bm::Counter::kIsRate | bm::Counter::kInvert};
-}
-BENCHMARK(BM_Counters_InvertedRate);
-ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_InvertedRate %console_report "
- "bar=%hrfloats foo=%hrfloats$"}});
-ADD_CASES(TC_JSONOut,
- {{"\"name\": \"BM_Counters_InvertedRate\",$"},
- {"\"family_index\": 4,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
- {"\"run_name\": \"BM_Counters_InvertedRate\",$", MR_Next},
- {"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"bar\": %float,$", MR_Next},
- {"\"foo\": %float$", MR_Next},
- {"}", MR_Next}});
-ADD_CASES(TC_CSVOut,
- {{"^\"BM_Counters_InvertedRate\",%csv_report,%float,%float$"}});
-// VS2013 does not allow this function to be passed as a lambda argument
-// to CHECK_BENCHMARK_RESULTS()
-void CheckInvertedRate(Results const& e) {
- double t = e.DurationCPUTime(); // this (and not real time) is the time used
- // check that the values are within 0.1% of the expected values
- CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, t, 0.001);
- CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, t / 8192.0, 0.001);
-}
-CHECK_BENCHMARK_RESULTS("BM_Counters_InvertedRate", &CheckInvertedRate);
-
-// ========================================================================= //
// ------------------------- Thread Counters Output ------------------------ //
// ========================================================================= //
@@ -256,13 +150,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report "
"bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
- {"\"family_index\": 5,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -297,13 +186,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int "
"%console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
- {"\"family_index\": 6,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -329,8 +213,6 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int",
void BM_Counters_AvgThreadsRate(benchmark::State& state) {
for (auto _ : state) {
- // This test requires a non-zero CPU time to avoid divide-by-zero
- benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate};
@@ -341,14 +223,9 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
- {"\"family_index\": 7,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -383,13 +260,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report "
"bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_IterationInvariant\",$"},
- {"\"family_index\": 8,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -416,8 +288,6 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant",
void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) {
for (auto _ : state) {
- // This test requires a non-zero CPU time to avoid divide-by-zero
- benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] =
@@ -430,14 +300,9 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"},
- {"\"family_index\": 9,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -475,13 +340,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report "
"bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_AvgIterations\",$"},
- {"\"family_index\": 10,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -507,8 +367,6 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations);
void BM_Counters_kAvgIterationsRate(benchmark::State& state) {
for (auto _ : state) {
- // This test requires a non-zero CPU time to avoid divide-by-zero
- benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate};
@@ -520,13 +378,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"},
- {"\"family_index\": 11,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 1,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/test/user_counters_thousands_test.cc b/gnu/llvm/libcxx/utils/google-benchmark/test/user_counters_thousands_test.cc
index bbe194264ed..fa0ef972047 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/test/user_counters_thousands_test.cc
+++ b/gnu/llvm/libcxx/utils/google-benchmark/test/user_counters_thousands_test.cc
@@ -51,13 +51,8 @@ ADD_CASES(
});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"repetition_index\": 0,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -70,13 +65,8 @@ ADD_CASES(TC_JSONOut,
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"repetition_index\": 1,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -89,12 +79,8 @@ ADD_CASES(TC_JSONOut,
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
@@ -108,12 +94,8 @@ ADD_CASES(TC_JSONOut,
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
@@ -127,12 +109,8 @@ ADD_CASES(TC_JSONOut,
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"},
- {"\"family_index\": 0,$", MR_Next},
- {"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
- {"\"repetitions\": 2,$", MR_Next},
- {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/tools/compare.py b/gnu/llvm/libcxx/utils/google-benchmark/tools/compare.py
index 01d2c89f50f..539ace6fb16 100755
--- a/gnu/llvm/libcxx/utils/google-benchmark/tools/compare.py
+++ b/gnu/llvm/libcxx/utils/google-benchmark/tools/compare.py
@@ -7,7 +7,6 @@ compare.py - versatile benchmark output compare tool
import argparse
from argparse import ArgumentParser
-import json
import sys
import gbench
from gbench import util, report
@@ -49,20 +48,6 @@ def create_parser():
"of repetitions. Do note that only the display is affected. "
"Internally, all the actual runs are still used, e.g. for U test.")
- parser.add_argument(
- '--no-color',
- dest='color',
- default=True,
- action="store_false",
- help="Do not use colors in the terminal output"
- )
-
- parser.add_argument(
- '-d',
- '--dump_to_json',
- dest='dump_to_json',
- help="Additionally, dump benchmark comparison output to this file in JSON format.")
-
utest = parser.add_argument_group()
utest.add_argument(
'--no-utest',
@@ -238,10 +223,10 @@ def main():
options_contender = ['--benchmark_filter=%s' % filter_contender]
# Run the benchmarks and report the results
- json1 = json1_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark(
- test_baseline, benchmark_options + options_baseline))
- json2 = json2_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark(
- test_contender, benchmark_options + options_contender))
+ json1 = json1_orig = gbench.util.run_or_load_benchmark(
+ test_baseline, benchmark_options + options_baseline)
+ json2 = json2_orig = gbench.util.run_or_load_benchmark(
+ test_contender, benchmark_options + options_contender)
# Now, filter the benchmarks so that the difference report can work
if filter_baseline and filter_contender:
@@ -251,20 +236,14 @@ def main():
json2 = gbench.report.filter_benchmark(
json2_orig, filter_contender, replacement)
- diff_report = gbench.report.get_difference_report(
- json1, json2, args.utest)
- output_lines = gbench.report.print_difference_report(
- diff_report,
- args.display_aggregates_only,
- args.utest, args.utest_alpha, args.color)
+ # Diff and output
+ output_lines = gbench.report.generate_difference_report(
+ json1, json2, args.display_aggregates_only,
+ args.utest, args.utest_alpha)
print(description)
for ln in output_lines:
print(ln)
- # Optionally, diff and output to JSON
- if args.dump_to_json is not None:
- with open(args.dump_to_json, 'w') as f_json:
- json.dump(diff_report, f_json)
class TestParser(unittest.TestCase):
def setUp(self):
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run1.json b/gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run1.json
index 601e327aefb..d7ec6a9c8f6 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run1.json
+++ b/gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run1.json
@@ -85,24 +85,7 @@
"time_unit": "ns"
},
{
- "name": "MyComplexityTest_BigO",
- "run_name": "MyComplexityTest",
- "run_type": "aggregate",
- "aggregate_name": "BigO",
- "cpu_coefficient": 4.2749856294592886e+00,
- "real_coefficient": 6.4789275289789780e+00,
- "big_o": "N",
- "time_unit": "ns"
- },
- {
- "name": "MyComplexityTest_RMS",
- "run_name": "MyComplexityTest",
- "run_type": "aggregate",
- "aggregate_name": "RMS",
- "rms": 4.5097802512472874e-03
- },
- {
- "name": "BM_NotBadTimeUnit",
+ "name": "BM_BadTimeUnit",
"iterations": 1000,
"real_time": 0.4,
"cpu_time": 0.5,
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run2.json b/gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run2.json
index 3cbcf39b0c9..59a5ffaca4d 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run2.json
+++ b/gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run2.json
@@ -85,24 +85,7 @@
"time_unit": "ns"
},
{
- "name": "MyComplexityTest_BigO",
- "run_name": "MyComplexityTest",
- "run_type": "aggregate",
- "aggregate_name": "BigO",
- "cpu_coefficient": 5.6215779594361486e+00,
- "real_coefficient": 5.6288314793554610e+00,
- "big_o": "N",
- "time_unit": "ns"
- },
- {
- "name": "MyComplexityTest_RMS",
- "run_name": "MyComplexityTest",
- "run_type": "aggregate",
- "aggregate_name": "RMS",
- "rms": 3.3128901852342174e-03
- },
- {
- "name": "BM_NotBadTimeUnit",
+ "name": "BM_BadTimeUnit",
"iterations": 1000,
"real_time": 0.04,
"cpu_time": 0.6,
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/report.py b/gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/report.py
index 6bea82f6bf7..5085b931947 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/report.py
+++ b/gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/report.py
@@ -1,11 +1,9 @@
+import unittest
"""report.py - Utilities for reporting statistics about benchmark results
"""
-
-import unittest
import os
import re
import copy
-import random
from scipy.stats import mannwhitneyu
@@ -116,10 +114,6 @@ def intersect(list1, list2):
return [x for x in list1 if x in list2]
-def is_potentially_comparable_benchmark(x):
- return ('time_unit' in x and 'real_time' in x and 'cpu_time' in x)
-
-
def partition_benchmarks(json1, json2):
"""
While preserving the ordering, find benchmarks with the same names in
@@ -131,17 +125,10 @@ def partition_benchmarks(json1, json2):
names = intersect(json1_unique_names, json2_unique_names)
partitions = []
for name in names:
- time_unit = None
# Pick the time unit from the first entry of the lhs benchmark.
- # We should be careful not to crash with unexpected input.
- for x in json1['benchmarks']:
- if (x['name'] == name and is_potentially_comparable_benchmark(x)):
- time_unit = x['time_unit']
- break
- if time_unit is None:
- continue
+ time_unit = (x['time_unit']
+ for x in json1['benchmarks'] if x['name'] == name).next()
# Filter by name and time unit.
- # All the repetitions are assumed to be comparable.
lhs = [x for x in json1['benchmarks'] if x['name'] == name and
x['time_unit'] == time_unit]
rhs = [x for x in json2['benchmarks'] if x['name'] == name and
@@ -157,7 +144,10 @@ def extract_field(partition, field_name):
return [lhs, rhs]
-def calc_utest(timings_cpu, timings_time):
+def print_utest(partition, utest_alpha, first_col_width, use_color=True):
+ timings_time = extract_field(partition, 'real_time')
+ timings_cpu = extract_field(partition, 'cpu_time')
+
min_rep_cnt = min(len(timings_time[0]),
len(timings_time[1]),
len(timings_cpu[0]),
@@ -165,115 +155,43 @@ def calc_utest(timings_cpu, timings_time):
# Does *everything* has at least UTEST_MIN_REPETITIONS repetitions?
if min_rep_cnt < UTEST_MIN_REPETITIONS:
- return False, None, None
+ return []
+
+ def get_utest_color(pval):
+ return BC_FAIL if pval >= utest_alpha else BC_OKGREEN
time_pvalue = mannwhitneyu(
timings_time[0], timings_time[1], alternative='two-sided').pvalue
cpu_pvalue = mannwhitneyu(
timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue
- return (min_rep_cnt >= UTEST_OPTIMAL_REPETITIONS), cpu_pvalue, time_pvalue
-
-def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True):
- def get_utest_color(pval):
- return BC_FAIL if pval >= utest_alpha else BC_OKGREEN
-
- # Check if we failed miserably with minimum required repetitions for utest
- if not utest['have_optimal_repetitions'] and utest['cpu_pvalue'] is None and utest['time_pvalue'] is None:
- return []
-
dsc = "U Test, Repetitions: {} vs {}".format(
- utest['nr_of_repetitions'], utest['nr_of_repetitions_other'])
+ len(timings_cpu[0]), len(timings_cpu[1]))
dsc_color = BC_OKGREEN
- # We still got some results to show but issue a warning about it.
- if not utest['have_optimal_repetitions']:
+ if min_rep_cnt < UTEST_OPTIMAL_REPETITIONS:
dsc_color = BC_WARNING
dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format(
UTEST_OPTIMAL_REPETITIONS)
special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}"
+ last_name = partition[0][0]['name']
return [color_format(use_color,
special_str,
BC_HEADER,
- "{}{}".format(bc_name, UTEST_COL_NAME),
+ "{}{}".format(last_name, UTEST_COL_NAME),
first_col_width,
- get_utest_color(
- utest['time_pvalue']), utest['time_pvalue'],
- get_utest_color(
- utest['cpu_pvalue']), utest['cpu_pvalue'],
+ get_utest_color(time_pvalue), time_pvalue,
+ get_utest_color(cpu_pvalue), cpu_pvalue,
dsc_color, dsc,
endc=BC_ENDC)]
-def get_difference_report(
+def generate_difference_report(
json1,
json2,
- utest=False):
- """
- Calculate and report the difference between each test of two benchmarks
- runs specified as 'json1' and 'json2'. Output is another json containing
- relevant details for each test run.
- """
- assert utest is True or utest is False
-
- diff_report = []
- partitions = partition_benchmarks(json1, json2)
- for partition in partitions:
- benchmark_name = partition[0][0]['name']
- time_unit = partition[0][0]['time_unit']
- measurements = []
- utest_results = {}
- # Careful, we may have different repetition count.
- for i in range(min(len(partition[0]), len(partition[1]))):
- bn = partition[0][i]
- other_bench = partition[1][i]
- measurements.append({
- 'real_time': bn['real_time'],
- 'cpu_time': bn['cpu_time'],
- 'real_time_other': other_bench['real_time'],
- 'cpu_time_other': other_bench['cpu_time'],
- 'time': calculate_change(bn['real_time'], other_bench['real_time']),
- 'cpu': calculate_change(bn['cpu_time'], other_bench['cpu_time'])
- })
-
- # After processing the whole partition, if requested, do the U test.
- if utest:
- timings_cpu = extract_field(partition, 'cpu_time')
- timings_time = extract_field(partition, 'real_time')
- have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest(timings_cpu, timings_time)
- if cpu_pvalue and time_pvalue:
- utest_results = {
- 'have_optimal_repetitions': have_optimal_repetitions,
- 'cpu_pvalue': cpu_pvalue,
- 'time_pvalue': time_pvalue,
- 'nr_of_repetitions': len(timings_cpu[0]),
- 'nr_of_repetitions_other': len(timings_cpu[1])
- }
-
- # Store only if we had any measurements for given benchmark.
- # E.g. partition_benchmarks will filter out the benchmarks having
- # time units which are not compatible with other time units in the
- # benchmark suite.
- if measurements:
- run_type = partition[0][0]['run_type'] if 'run_type' in partition[0][0] else ''
- aggregate_name = partition[0][0]['aggregate_name'] if run_type == 'aggregate' and 'aggregate_name' in partition[0][0] else ''
- diff_report.append({
- 'name': benchmark_name,
- 'measurements': measurements,
- 'time_unit': time_unit,
- 'run_type': run_type,
- 'aggregate_name': aggregate_name,
- 'utest': utest_results
- })
-
- return diff_report
-
-
-def print_difference_report(
- json_diff_report,
- include_aggregates_only=False,
+ display_aggregates_only=False,
utest=False,
utest_alpha=0.05,
use_color=True):
@@ -282,16 +200,14 @@ def print_difference_report(
runs specified as 'json1' and 'json2'.
"""
assert utest is True or utest is False
+ first_col_width = find_longest_name(json1['benchmarks'])
- def get_color(res):
- if res > 0.05:
- return BC_FAIL
- elif res > -0.07:
- return BC_WHITE
- else:
- return BC_CYAN
+ def find_test(name):
+ for b in json2['benchmarks']:
+ if b['name'] == name:
+ return b
+ return None
- first_col_width = find_longest_name(json_diff_report)
first_col_width = max(
first_col_width,
len('Benchmark'))
@@ -300,33 +216,50 @@ def print_difference_report(
'Benchmark', 12 + first_col_width)
output_strs = [first_line, '-' * len(first_line)]
- fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
- for benchmark in json_diff_report:
- # *If* we were asked to only include aggregates,
- # and if it is non-aggregate, then don't print it.
- if not include_aggregates_only or not 'run_type' in benchmark or benchmark['run_type'] == 'aggregate':
- for measurement in benchmark['measurements']:
- output_strs += [color_format(use_color,
- fmt_str,
- BC_HEADER,
- benchmark['name'],
- first_col_width,
- get_color(measurement['time']),
- measurement['time'],
- get_color(measurement['cpu']),
- measurement['cpu'],
- measurement['real_time'],
- measurement['real_time_other'],
- measurement['cpu_time'],
- measurement['cpu_time_other'],
- endc=BC_ENDC)]
-
- # After processing the measurements, if requested and
- # if applicable (e.g. u-test exists for given benchmark),
- # print the U test.
- if utest and benchmark['utest']:
- output_strs += print_utest(benchmark['name'],
- benchmark['utest'],
+ partitions = partition_benchmarks(json1, json2)
+ for partition in partitions:
+ # Careful, we may have different repetition count.
+ for i in range(min(len(partition[0]), len(partition[1]))):
+ bn = partition[0][i]
+ other_bench = partition[1][i]
+
+ # *If* we were asked to only display aggregates,
+ # and if it is non-aggregate, then skip it.
+ if display_aggregates_only and 'run_type' in bn and 'run_type' in other_bench:
+ assert bn['run_type'] == other_bench['run_type']
+ if bn['run_type'] != 'aggregate':
+ continue
+
+ fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
+
+ def get_color(res):
+ if res > 0.05:
+ return BC_FAIL
+ elif res > -0.07:
+ return BC_WHITE
+ else:
+ return BC_CYAN
+
+ tres = calculate_change(bn['real_time'], other_bench['real_time'])
+ cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
+ output_strs += [color_format(use_color,
+ fmt_str,
+ BC_HEADER,
+ bn['name'],
+ first_col_width,
+ get_color(tres),
+ tres,
+ get_color(cpures),
+ cpures,
+ bn['real_time'],
+ other_bench['real_time'],
+ bn['cpu_time'],
+ other_bench['cpu_time'],
+ endc=BC_ENDC)]
+
+ # After processing the whole partition, if requested, do the U test.
+ if utest:
+ output_strs += print_utest(partition,
utest_alpha=utest_alpha,
first_col_width=first_col_width,
use_color=use_color)
@@ -367,26 +300,21 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
class TestReportDifference(unittest.TestCase):
- @classmethod
- def setUpClass(cls):
- def load_results():
- import json
- testInputs = os.path.join(
- os.path.dirname(
- os.path.realpath(__file__)),
- 'Inputs')
- testOutput1 = os.path.join(testInputs, 'test1_run1.json')
- testOutput2 = os.path.join(testInputs, 'test1_run2.json')
- with open(testOutput1, 'r') as f:
- json1 = json.load(f)
- with open(testOutput2, 'r') as f:
- json2 = json.load(f)
- return json1, json2
-
- json1, json2 = load_results()
- cls.json_diff_report = get_difference_report(json1, json2)
-
- def test_json_diff_report_pretty_printing(self):
+ def load_results(self):
+ import json
+ testInputs = os.path.join(
+ os.path.dirname(
+ os.path.realpath(__file__)),
+ 'Inputs')
+ testOutput1 = os.path.join(testInputs, 'test1_run1.json')
+ testOutput2 = os.path.join(testInputs, 'test1_run2.json')
+ with open(testOutput1, 'r') as f:
+ json1 = json.load(f)
+ with open(testOutput2, 'r') as f:
+ json2 = json.load(f)
+ return json1, json2
+
+ def test_basic(self):
expect_lines = [
['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'],
['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'],
@@ -402,10 +330,11 @@ class TestReportDifference(unittest.TestCase):
['BM_10PercentCPUToTime', '+0.1000',
'-0.1000', '100', '110', '100', '90'],
['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'],
- ['BM_NotBadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
+ ['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
]
- output_lines_with_header = print_difference_report(
- self.json_diff_report, use_color=False)
+ json1, json2 = self.load_results()
+ output_lines_with_header = generate_difference_report(
+ json1, json2, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
@@ -415,118 +344,31 @@ class TestReportDifference(unittest.TestCase):
self.assertEqual(len(parts), 7)
self.assertEqual(expect_lines[i], parts)
- def test_json_diff_report_output(self):
- expected_output = [
- {
- 'name': 'BM_SameTimes',
- 'measurements': [{'time': 0.0000, 'cpu': 0.0000, 'real_time': 10, 'real_time_other': 10, 'cpu_time': 10, 'cpu_time_other': 10}],
- 'time_unit': 'ns',
- 'utest': {}
- },
- {
- 'name': 'BM_2xFaster',
- 'measurements': [{'time': -0.5000, 'cpu': -0.5000, 'real_time': 50, 'real_time_other': 25, 'cpu_time': 50, 'cpu_time_other': 25}],
- 'time_unit': 'ns',
- 'utest': {}
- },
- {
- 'name': 'BM_2xSlower',
- 'measurements': [{'time': 1.0000, 'cpu': 1.0000, 'real_time': 50, 'real_time_other': 100, 'cpu_time': 50, 'cpu_time_other': 100}],
- 'time_unit': 'ns',
- 'utest': {}
- },
- {
- 'name': 'BM_1PercentFaster',
- 'measurements': [{'time': -0.0100, 'cpu': -0.0100, 'real_time': 100, 'real_time_other': 98.9999999, 'cpu_time': 100, 'cpu_time_other': 98.9999999}],
- 'time_unit': 'ns',
- 'utest': {}
- },
- {
- 'name': 'BM_1PercentSlower',
- 'measurements': [{'time': 0.0100, 'cpu': 0.0100, 'real_time': 100, 'real_time_other': 101, 'cpu_time': 100, 'cpu_time_other': 101}],
- 'time_unit': 'ns',
- 'utest': {}
- },
- {
- 'name': 'BM_10PercentFaster',
- 'measurements': [{'time': -0.1000, 'cpu': -0.1000, 'real_time': 100, 'real_time_other': 90, 'cpu_time': 100, 'cpu_time_other': 90}],
- 'time_unit': 'ns',
- 'utest': {}
- },
- {
- 'name': 'BM_10PercentSlower',
- 'measurements': [{'time': 0.1000, 'cpu': 0.1000, 'real_time': 100, 'real_time_other': 110, 'cpu_time': 100, 'cpu_time_other': 110}],
- 'time_unit': 'ns',
- 'utest': {}
- },
- {
- 'name': 'BM_100xSlower',
- 'measurements': [{'time': 99.0000, 'cpu': 99.0000, 'real_time': 100, 'real_time_other': 10000, 'cpu_time': 100, 'cpu_time_other': 10000}],
- 'time_unit': 'ns',
- 'utest': {}
- },
- {
- 'name': 'BM_100xFaster',
- 'measurements': [{'time': -0.9900, 'cpu': -0.9900, 'real_time': 10000, 'real_time_other': 100, 'cpu_time': 10000, 'cpu_time_other': 100}],
- 'time_unit': 'ns',
- 'utest': {}
- },
- {
- 'name': 'BM_10PercentCPUToTime',
- 'measurements': [{'time': 0.1000, 'cpu': -0.1000, 'real_time': 100, 'real_time_other': 110, 'cpu_time': 100, 'cpu_time_other': 90}],
- 'time_unit': 'ns',
- 'utest': {}
- },
- {
- 'name': 'BM_ThirdFaster',
- 'measurements': [{'time': -0.3333, 'cpu': -0.3334, 'real_time': 100, 'real_time_other': 67, 'cpu_time': 100, 'cpu_time_other': 67}],
- 'time_unit': 'ns',
- 'utest': {}
- },
- {
- 'name': 'BM_NotBadTimeUnit',
- 'measurements': [{'time': -0.9000, 'cpu': 0.2000, 'real_time': 0.4, 'real_time_other': 0.04, 'cpu_time': 0.5, 'cpu_time_other': 0.6}],
- 'time_unit': 's',
- 'utest': {}
- },
- ]
- self.assertEqual(len(self.json_diff_report), len(expected_output))
- for out, expected in zip(
- self.json_diff_report, expected_output):
- self.assertEqual(out['name'], expected['name'])
- self.assertEqual(out['time_unit'], expected['time_unit'])
- assert_utest(self, out, expected)
- assert_measurements(self, out, expected)
-
class TestReportDifferenceBetweenFamilies(unittest.TestCase):
- @classmethod
- def setUpClass(cls):
- def load_result():
- import json
- testInputs = os.path.join(
- os.path.dirname(
- os.path.realpath(__file__)),
- 'Inputs')
- testOutput = os.path.join(testInputs, 'test2_run.json')
- with open(testOutput, 'r') as f:
- json = json.load(f)
- return json
-
- json = load_result()
- json1 = filter_benchmark(json, "BM_Z.ro", ".")
- json2 = filter_benchmark(json, "BM_O.e", ".")
- cls.json_diff_report = get_difference_report(json1, json2)
+ def load_result(self):
+ import json
+ testInputs = os.path.join(
+ os.path.dirname(
+ os.path.realpath(__file__)),
+ 'Inputs')
+ testOutput = os.path.join(testInputs, 'test2_run.json')
+ with open(testOutput, 'r') as f:
+ json = json.load(f)
+ return json
- def test_json_diff_report_pretty_printing(self):
+ def test_basic(self):
expect_lines = [
['.', '-0.5000', '-0.5000', '10', '5', '10', '5'],
['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'],
['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'],
['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'],
]
- output_lines_with_header = print_difference_report(
- self.json_diff_report, use_color=False)
+ json = self.load_result()
+ json1 = filter_benchmark(json, "BM_Z.ro", ".")
+ json2 = filter_benchmark(json, "BM_O.e", ".")
+ output_lines_with_header = generate_difference_report(
+ json1, json2, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
@@ -536,64 +378,24 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase):
self.assertEqual(len(parts), 7)
self.assertEqual(expect_lines[i], parts)
- def test_json_diff_report(self):
- expected_output = [
- {
- 'name': u'.',
- 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 10, 'real_time_other': 5, 'cpu_time': 10, 'cpu_time_other': 5}],
- 'time_unit': 'ns',
- 'utest': {}
- },
- {
- 'name': u'./4',
- 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 40, 'real_time_other': 20, 'cpu_time': 40, 'cpu_time_other': 20}],
- 'time_unit': 'ns',
- 'utest': {},
- },
- {
- 'name': u'Prefix/.',
- 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 20, 'real_time_other': 10, 'cpu_time': 20, 'cpu_time_other': 10}],
- 'time_unit': 'ns',
- 'utest': {}
- },
- {
- 'name': u'Prefix/./3',
- 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 30, 'real_time_other': 15, 'cpu_time': 30, 'cpu_time_other': 15}],
- 'time_unit': 'ns',
- 'utest': {}
- }
- ]
- self.assertEqual(len(self.json_diff_report), len(expected_output))
- for out, expected in zip(
- self.json_diff_report, expected_output):
- self.assertEqual(out['name'], expected['name'])
- self.assertEqual(out['time_unit'], expected['time_unit'])
- assert_utest(self, out, expected)
- assert_measurements(self, out, expected)
-
class TestReportDifferenceWithUTest(unittest.TestCase):
- @classmethod
- def setUpClass(cls):
- def load_results():
- import json
- testInputs = os.path.join(
- os.path.dirname(
- os.path.realpath(__file__)),
- 'Inputs')
- testOutput1 = os.path.join(testInputs, 'test3_run0.json')
- testOutput2 = os.path.join(testInputs, 'test3_run1.json')
- with open(testOutput1, 'r') as f:
- json1 = json.load(f)
- with open(testOutput2, 'r') as f:
- json2 = json.load(f)
- return json1, json2
-
- json1, json2 = load_results()
- cls.json_diff_report = get_difference_report(
- json1, json2, utest=True)
-
- def test_json_diff_report_pretty_printing(self):
+ def load_results(self):
+ import json
+ testInputs = os.path.join(
+ os.path.dirname(
+ os.path.realpath(__file__)),
+ 'Inputs')
+ testOutput1 = os.path.join(testInputs, 'test3_run0.json')
+ testOutput2 = os.path.join(testInputs, 'test3_run1.json')
+ with open(testOutput1, 'r') as f:
+ json1 = json.load(f)
+ with open(testOutput2, 'r') as f:
+ json2 = json.load(f)
+ return json1, json2
+
+ def test_utest(self):
+ expect_lines = []
expect_lines = [
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
@@ -632,8 +434,9 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
'recommended.'],
['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'],
]
- output_lines_with_header = print_difference_report(
- self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False)
+ json1, json2 = self.load_results()
+ output_lines_with_header = generate_difference_report(
+ json1, json2, utest=True, utest_alpha=0.05, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
@@ -642,151 +445,25 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
- def test_json_diff_report_pretty_printing_aggregates_only(self):
- expect_lines = [
- ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
- ['BM_Two_pvalue',
- '0.6985',
- '0.6985',
- 'U',
- 'Test,',
- 'Repetitions:',
- '2',
- 'vs',
- '2.',
- 'WARNING:',
- 'Results',
- 'unreliable!',
- '9+',
- 'repetitions',
- 'recommended.'],
- ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
- ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
- ['short_pvalue',
- '0.7671',
- '0.1489',
- 'U',
- 'Test,',
- 'Repetitions:',
- '2',
- 'vs',
- '3.',
- 'WARNING:',
- 'Results',
- 'unreliable!',
- '9+',
- 'repetitions',
- 'recommended.'],
- ]
- output_lines_with_header = print_difference_report(
- self.json_diff_report, include_aggregates_only=True, utest=True, utest_alpha=0.05, use_color=False)
- output_lines = output_lines_with_header[2:]
- print("\n")
- print("\n".join(output_lines_with_header))
- self.assertEqual(len(output_lines), len(expect_lines))
- for i in range(0, len(output_lines)):
- parts = [x for x in output_lines[i].split(' ') if x]
- self.assertEqual(expect_lines[i], parts)
-
- def test_json_diff_report(self):
- expected_output = [
- {
- 'name': u'BM_One',
- 'measurements': [
- {'time': -0.1,
- 'cpu': 0.1,
- 'real_time': 10,
- 'real_time_other': 9,
- 'cpu_time': 100,
- 'cpu_time_other': 110}
- ],
- 'time_unit': 'ns',
- 'utest': {}
- },
- {
- 'name': u'BM_Two',
- 'measurements': [
- {'time': 0.1111111111111111,
- 'cpu': -0.011111111111111112,
- 'real_time': 9,
- 'real_time_other': 10,
- 'cpu_time': 90,
- 'cpu_time_other': 89},
- {'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8,
- 'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72}
- ],
- 'time_unit': 'ns',
- 'utest': {
- 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6985353583033387, 'time_pvalue': 0.6985353583033387
- }
- },
- {
- 'name': u'short',
- 'measurements': [
- {'time': -0.125,
- 'cpu': -0.0625,
- 'real_time': 8,
- 'real_time_other': 7,
- 'cpu_time': 80,
- 'cpu_time_other': 75},
- {'time': -0.4325,
- 'cpu': -0.13506493506493514,
- 'real_time': 8,
- 'real_time_other': 4.54,
- 'cpu_time': 77,
- 'cpu_time_other': 66.6}
- ],
- 'time_unit': 'ns',
- 'utest': {
- 'have_optimal_repetitions': False, 'cpu_pvalue': 0.14891467317876572, 'time_pvalue': 0.7670968684102772
- }
- },
- {
- 'name': u'medium',
- 'measurements': [
- {'time': -0.375,
- 'cpu': -0.3375,
- 'real_time': 8,
- 'real_time_other': 5,
- 'cpu_time': 80,
- 'cpu_time_other': 53}
- ],
- 'time_unit': 'ns',
- 'utest': {}
- }
- ]
- self.assertEqual(len(self.json_diff_report), len(expected_output))
- for out, expected in zip(
- self.json_diff_report, expected_output):
- self.assertEqual(out['name'], expected['name'])
- self.assertEqual(out['time_unit'], expected['time_unit'])
- assert_utest(self, out, expected)
- assert_measurements(self, out, expected)
-
class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
unittest.TestCase):
- @classmethod
- def setUpClass(cls):
- def load_results():
- import json
- testInputs = os.path.join(
- os.path.dirname(
- os.path.realpath(__file__)),
- 'Inputs')
- testOutput1 = os.path.join(testInputs, 'test3_run0.json')
- testOutput2 = os.path.join(testInputs, 'test3_run1.json')
- with open(testOutput1, 'r') as f:
- json1 = json.load(f)
- with open(testOutput2, 'r') as f:
- json2 = json.load(f)
- return json1, json2
-
- json1, json2 = load_results()
- cls.json_diff_report = get_difference_report(
- json1, json2, utest=True)
-
- def test_json_diff_report_pretty_printing(self):
+ def load_results(self):
+ import json
+ testInputs = os.path.join(
+ os.path.dirname(
+ os.path.realpath(__file__)),
+ 'Inputs')
+ testOutput1 = os.path.join(testInputs, 'test3_run0.json')
+ testOutput2 = os.path.join(testInputs, 'test3_run1.json')
+ with open(testOutput1, 'r') as f:
+ json1 = json.load(f)
+ with open(testOutput2, 'r') as f:
+ json2 = json.load(f)
+ return json1, json2
+
+ def test_utest(self):
+ expect_lines = []
expect_lines = [
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
@@ -823,10 +500,10 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
'9+',
'repetitions',
'recommended.'],
- ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53']
]
- output_lines_with_header = print_difference_report(
- self.json_diff_report,
+ json1, json2 = self.load_results()
+ output_lines_with_header = generate_difference_report(
+ json1, json2, display_aggregates_only=True,
utest=True, utest_alpha=0.05, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
@@ -836,152 +513,6 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
- def test_json_diff_report(self):
- expected_output = [
- {
- 'name': u'BM_One',
- 'measurements': [
- {'time': -0.1,
- 'cpu': 0.1,
- 'real_time': 10,
- 'real_time_other': 9,
- 'cpu_time': 100,
- 'cpu_time_other': 110}
- ],
- 'time_unit': 'ns',
- 'utest': {}
- },
- {
- 'name': u'BM_Two',
- 'measurements': [
- {'time': 0.1111111111111111,
- 'cpu': -0.011111111111111112,
- 'real_time': 9,
- 'real_time_other': 10,
- 'cpu_time': 90,
- 'cpu_time_other': 89},
- {'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8,
- 'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72}
- ],
- 'time_unit': 'ns',
- 'utest': {
- 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6985353583033387, 'time_pvalue': 0.6985353583033387
- }
- },
- {
- 'name': u'short',
- 'measurements': [
- {'time': -0.125,
- 'cpu': -0.0625,
- 'real_time': 8,
- 'real_time_other': 7,
- 'cpu_time': 80,
- 'cpu_time_other': 75},
- {'time': -0.4325,
- 'cpu': -0.13506493506493514,
- 'real_time': 8,
- 'real_time_other': 4.54,
- 'cpu_time': 77,
- 'cpu_time_other': 66.6}
- ],
- 'time_unit': 'ns',
- 'utest': {
- 'have_optimal_repetitions': False, 'cpu_pvalue': 0.14891467317876572, 'time_pvalue': 0.7670968684102772
- }
- },
- {
- 'name': u'medium',
- 'measurements': [
- {'real_time_other': 5,
- 'cpu_time': 80,
- 'time': -0.375,
- 'real_time': 8,
- 'cpu_time_other': 53,
- 'cpu': -0.3375
- }
- ],
- 'utest': {},
- 'time_unit': u'ns',
- 'aggregate_name': ''
- }
- ]
- self.assertEqual(len(self.json_diff_report), len(expected_output))
- for out, expected in zip(
- self.json_diff_report, expected_output):
- self.assertEqual(out['name'], expected['name'])
- self.assertEqual(out['time_unit'], expected['time_unit'])
- assert_utest(self, out, expected)
- assert_measurements(self, out, expected)
-
-
-class TestReportSorting(unittest.TestCase):
- @classmethod
- def setUpClass(cls):
- def load_result():
- import json
- testInputs = os.path.join(
- os.path.dirname(
- os.path.realpath(__file__)),
- 'Inputs')
- testOutput = os.path.join(testInputs, 'test4_run.json')
- with open(testOutput, 'r') as f:
- json = json.load(f)
- return json
-
- cls.json = load_result()
-
- def test_json_diff_report_pretty_printing(self):
- import util
-
- expected_names = [
- "99 family 0 instance 0 repetition 0",
- "98 family 0 instance 0 repetition 1",
- "97 family 0 instance 0 aggregate",
- "96 family 0 instance 1 repetition 0",
- "95 family 0 instance 1 repetition 1",
- "94 family 0 instance 1 aggregate",
- "93 family 1 instance 0 repetition 0",
- "92 family 1 instance 0 repetition 1",
- "91 family 1 instance 0 aggregate",
- "90 family 1 instance 1 repetition 0",
- "89 family 1 instance 1 repetition 1",
- "88 family 1 instance 1 aggregate"
- ]
-
- for n in range(len(self.json['benchmarks']) ** 2):
- random.shuffle(self.json['benchmarks'])
- sorted_benchmarks = util.sort_benchmark_results(self.json)[
- 'benchmarks']
- self.assertEqual(len(expected_names), len(sorted_benchmarks))
- for out, expected in zip(sorted_benchmarks, expected_names):
- self.assertEqual(out['name'], expected)
-
-
-def assert_utest(unittest_instance, lhs, rhs):
- if lhs['utest']:
- unittest_instance.assertAlmostEqual(
- lhs['utest']['cpu_pvalue'],
- rhs['utest']['cpu_pvalue'])
- unittest_instance.assertAlmostEqual(
- lhs['utest']['time_pvalue'],
- rhs['utest']['time_pvalue'])
- unittest_instance.assertEqual(
- lhs['utest']['have_optimal_repetitions'],
- rhs['utest']['have_optimal_repetitions'])
- else:
- # lhs is empty. assert if rhs is not.
- unittest_instance.assertEqual(lhs['utest'], rhs['utest'])
-
-
-def assert_measurements(unittest_instance, lhs, rhs):
- for m1, m2 in zip(lhs['measurements'], rhs['measurements']):
- unittest_instance.assertEqual(m1['real_time'], m2['real_time'])
- unittest_instance.assertEqual(m1['cpu_time'], m2['cpu_time'])
- # m1['time'] and m1['cpu'] hold values which are being calculated,
- # and therefore we must use almost-equal pattern.
- unittest_instance.assertAlmostEqual(m1['time'], m2['time'], places=4)
- unittest_instance.assertAlmostEqual(m1['cpu'], m2['cpu'], places=4)
-
if __name__ == '__main__':
unittest.main()
diff --git a/gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/util.py b/gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/util.py
index 5d0012c0cb1..1f8e8e2c479 100644
--- a/gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/util.py
+++ b/gnu/llvm/libcxx/utils/google-benchmark/tools/gbench/util.py
@@ -5,7 +5,6 @@ import os
import tempfile
import subprocess
import sys
-import functools
# Input file type enumeration
IT_Invalid = 0
@@ -120,23 +119,6 @@ def load_benchmark_results(fname):
return json.load(f)
-def sort_benchmark_results(result):
- benchmarks = result['benchmarks']
-
- # From inner key to the outer key!
- benchmarks = sorted(
- benchmarks, key=lambda benchmark: benchmark['repetition_index'] if 'repetition_index' in benchmark else -1)
- benchmarks = sorted(
- benchmarks, key=lambda benchmark: 1 if 'run_type' in benchmark and benchmark['run_type'] == "aggregate" else 0)
- benchmarks = sorted(
- benchmarks, key=lambda benchmark: benchmark['per_family_instance_index'] if 'per_family_instance_index' in benchmark else -1)
- benchmarks = sorted(
- benchmarks, key=lambda benchmark: benchmark['family_index'] if 'family_index' in benchmark else -1)
-
- result['benchmarks'] = benchmarks
- return result
-
-
def run_benchmark(exe_name, benchmark_flags):
"""
Run a benchmark specified by 'exe_name' with the specified
@@ -176,6 +158,7 @@ def run_or_load_benchmark(filename, benchmark_flags):
ftype = check_input_file(filename)
if ftype == IT_JSON:
return load_benchmark_results(filename)
- if ftype == IT_Executable:
+ elif ftype == IT_Executable:
return run_benchmark(filename, benchmark_flags)
- raise ValueError('Unknown file type %s' % ftype)
+ else:
+ assert False # This branch is unreachable
diff --git a/gnu/llvm/libcxx/utils/libcxx/test/config.py b/gnu/llvm/libcxx/utils/libcxx/test/config.py
index 3cd0db45496..3c9b3cd6f55 100644
--- a/gnu/llvm/libcxx/utils/libcxx/test/config.py
+++ b/gnu/llvm/libcxx/utils/libcxx/test/config.py
@@ -6,11 +6,11 @@
#
#===----------------------------------------------------------------------===##
-import copy
+import locale
import os
+import platform
import pkgutil
import pipes
-import platform
import re
import shlex
import shutil
@@ -18,11 +18,9 @@ import sys
from libcxx.compiler import CXXCompiler
from libcxx.test.target_info import make_target_info
+from libcxx.test.executor import *
+from libcxx.test.tracing import *
import libcxx.util
-import libcxx.test.features
-import libcxx.test.newconfig
-import libcxx.test.params
-import lit
def loadSiteConfig(lit_config, config, param_name, env_name):
# We haven't loaded the site specific configuration (the user is
@@ -73,7 +71,11 @@ class Configuration(object):
self.link_shared = self.get_lit_bool('enable_shared', default=True)
self.debug_build = self.get_lit_bool('debug_build', default=False)
self.exec_env = dict()
+ self.use_target = False
+ self.use_system_cxx_lib = False
self.use_clang_verify = False
+ self.long_tests = None
+ self.execute_external = False
def get_lit_conf(self, name, default=None):
val = self.lit_config.params.get(name, None)
@@ -109,56 +111,68 @@ class Configuration(object):
return check_value(val, env_var)
return check_value(conf_val, name)
+ def get_modules_enabled(self):
+ return self.get_lit_bool('enable_modules',
+ default=False,
+ env_var='LIBCXX_ENABLE_MODULES')
+
def make_static_lib_name(self, name):
"""Return the full filename for the specified library name"""
- if self.target_info.is_windows() and not self.target_info.is_mingw():
+ if self.target_info.is_windows():
assert name == 'c++' # Only allow libc++ to use this function for now.
return 'lib' + name + '.lib'
else:
return 'lib' + name + '.a'
def configure(self):
- self.target_info = make_target_info(self)
- self.executor = self.get_lit_conf('executor')
+ self.configure_target_info()
+ self.configure_executor()
+ self.configure_use_system_cxx_lib()
self.configure_cxx()
+ self.configure_triple()
+ self.configure_deployment()
self.configure_src_root()
self.configure_obj_root()
- self.cxx_stdlib_under_test = self.get_lit_conf('cxx_stdlib_under_test', 'libc++')
- self.cxx_library_root = self.get_lit_conf('cxx_library_root', self.libcxx_obj_root)
- self.abi_library_root = self.get_lit_conf('abi_library_root') or self.cxx_library_root
- self.cxx_runtime_root = self.get_lit_conf('cxx_runtime_root', self.cxx_library_root)
- self.abi_runtime_root = self.get_lit_conf('abi_runtime_root', self.abi_library_root)
+ self.configure_cxx_stdlib_under_test()
+ self.configure_cxx_library_root()
+ self.configure_use_clang_verify()
+ self.configure_use_thread_safety()
+ self.configure_execute_external()
+ self.configure_ccache()
self.configure_compile_flags()
+ self.configure_filesystem_compile_flags()
self.configure_link_flags()
self.configure_env()
+ self.configure_color_diagnostics()
+ self.configure_debug_mode()
+ self.configure_warnings()
+ self.configure_sanitizer()
self.configure_coverage()
+ self.configure_modules()
+ self.configure_coroutines()
self.configure_substitutions()
self.configure_features()
- libcxx.test.newconfig.configure(
- libcxx.test.params.DEFAULT_PARAMETERS,
- libcxx.test.features.DEFAULT_FEATURES,
- self.config,
- self.lit_config
- )
-
- self.lit_config.note("All available features: {}".format(self.config.available_features))
-
def print_config_info(self):
+ # Print the final compile and link flags.
+ self.lit_config.note('Using compiler: %s' % self.cxx.path)
+ self.lit_config.note('Using flags: %s' % self.cxx.flags)
if self.cxx.use_modules:
self.lit_config.note('Using modules flags: %s' %
self.cxx.modules_flags)
+ self.lit_config.note('Using compile flags: %s'
+ % self.cxx.compile_flags)
if len(self.cxx.warning_flags):
self.lit_config.note('Using warnings: %s' % self.cxx.warning_flags)
+ self.lit_config.note('Using link flags: %s' % self.cxx.link_flags)
+ # Print as list to prevent "set([...])" from being printed.
+ self.lit_config.note('Using available_features: %s' %
+ list(self.config.available_features))
show_env_vars = {}
for k,v in self.exec_env.items():
if k not in os.environ or os.environ[k] != v:
show_env_vars[k] = v
self.lit_config.note('Adding environment variables: %r' % show_env_vars)
- self.lit_config.note("Linking against the C++ Library at {}".format(self.cxx_library_root))
- self.lit_config.note("Running against the C++ Library at {}".format(self.cxx_runtime_root))
- self.lit_config.note("Linking against the ABI Library at {}".format(self.abi_library_root))
- self.lit_config.note("Running against the ABI Library at {}".format(self.abi_runtime_root))
sys.stderr.flush() # Force flushing to avoid broken output on Windows
def get_test_format(self):
@@ -166,14 +180,39 @@ class Configuration(object):
return LibcxxTestFormat(
self.cxx,
self.use_clang_verify,
+ self.execute_external,
self.executor,
exec_env=self.exec_env)
+ def configure_executor(self):
+ exec_str = self.get_lit_conf('executor', "None")
+ te = eval(exec_str)
+ if te:
+ self.lit_config.note("Using executor: %r" % exec_str)
+ if self.lit_config.useValgrind:
+ # We have no way of knowing where in the chain the
+ # ValgrindExecutor is supposed to go. It is likely
+ # that the user wants it at the end, but we have no
+ # way of getting at that easily.
+ self.lit_config.fatal("Cannot infer how to create a Valgrind "
+ " executor.")
+ else:
+ te = LocalExecutor()
+ if self.lit_config.useValgrind:
+ te = ValgrindExecutor(self.lit_config.valgrindArgs, te)
+
+ te.target_info = self.target_info
+
+ self.executor = te
+
+ def configure_target_info(self):
+ self.target_info = make_target_info(self)
+
def configure_cxx(self):
# Gather various compiler parameters.
cxx = self.get_lit_conf('cxx_under_test')
self.cxx_is_clang_cl = cxx is not None and \
- os.path.basename(cxx).startswith('clang-cl')
+ os.path.basename(cxx) == 'clang-cl.exe'
# If no specific cxx_under_test was given, attempt to infer it as
# clang++.
if cxx is None or self.cxx_is_clang_cl:
@@ -193,7 +232,21 @@ class Configuration(object):
'(e.g., --param=cxx_under_test=clang++)')
self.cxx = CXXCompiler(self, cxx) if not self.cxx_is_clang_cl else \
self._configure_clang_cl(cxx)
+ cxx_type = self.cxx.type
+ if cxx_type is not None:
+ assert self.cxx.version is not None
+ maj_v, min_v, patch_v = self.cxx.version
+ self.config.available_features.add(cxx_type)
+ self.config.available_features.add('%s-%s' % (cxx_type, maj_v))
+ self.config.available_features.add('%s-%s.%s' % (
+ cxx_type, maj_v, min_v))
+ self.config.available_features.add('%s-%s.%s.%s' % (
+ cxx_type, maj_v, min_v, patch_v))
self.cxx.compile_env = dict(os.environ)
+ # 'CCACHE_CPP2' prevents ccache from stripping comments while
+ # preprocessing. This is required to prevent stripping of '-verify'
+ # comments.
+ self.cxx.compile_env['CCACHE_CPP2'] = '1'
def _configure_clang_cl(self, clang_path):
def _split_env_var(var):
@@ -205,12 +258,25 @@ class Configuration(object):
assert self.cxx_is_clang_cl
flags = []
- compile_flags = []
+ compile_flags = _prefixed_env_list('INCLUDE', '-isystem')
link_flags = _prefixed_env_list('LIB', '-L')
+ for path in _split_env_var('LIB'):
+ self.add_path(self.exec_env, path)
return CXXCompiler(self, clang_path, flags=flags,
compile_flags=compile_flags,
link_flags=link_flags)
+ def _dump_macros_verbose(self, *args, **kwargs):
+ macros_or_error = self.cxx.dumpMacros(*args, **kwargs)
+ if isinstance(macros_or_error, tuple):
+ cmd, out, err, rc = macros_or_error
+ report = libcxx.util.makeReport(cmd, out, err, rc)
+ report += "Compiler failed unexpectedly when dumping macros!"
+ self.lit_config.fatal(report)
+ return None
+ assert isinstance(macros_or_error, dict)
+ return macros_or_error
+
def configure_src_root(self):
self.libcxx_src_root = self.get_lit_conf(
'libcxx_src_root', os.path.dirname(self.config.test_source_root))
@@ -231,8 +297,184 @@ class Configuration(object):
else:
self.libcxx_obj_root = self.project_obj_root
+ def configure_cxx_library_root(self):
+ self.cxx_library_root = self.get_lit_conf('cxx_library_root',
+ self.libcxx_obj_root)
+ self.cxx_runtime_root = self.get_lit_conf('cxx_runtime_root',
+ self.cxx_library_root)
+
+ def configure_use_system_cxx_lib(self):
+ # This test suite supports testing against either the system library or
+ # the locally built one; the former mode is useful for testing ABI
+ # compatibility between the current headers and a shipping dynamic
+ # library.
+ # Default to testing against the locally built libc++ library.
+ self.use_system_cxx_lib = self.get_lit_conf('use_system_cxx_lib')
+ if self.use_system_cxx_lib == 'true':
+ self.use_system_cxx_lib = True
+ elif self.use_system_cxx_lib == 'false':
+ self.use_system_cxx_lib = False
+ elif self.use_system_cxx_lib:
+ assert os.path.isdir(self.use_system_cxx_lib), "the specified use_system_cxx_lib parameter (%s) is not a valid directory" % self.use_system_cxx_lib
+ self.use_system_cxx_lib = os.path.abspath(self.use_system_cxx_lib)
+ self.lit_config.note(
+ "inferred use_system_cxx_lib as: %r" % self.use_system_cxx_lib)
+
+ def configure_cxx_stdlib_under_test(self):
+ self.cxx_stdlib_under_test = self.get_lit_conf(
+ 'cxx_stdlib_under_test', 'libc++')
+ if self.cxx_stdlib_under_test not in \
+ ['libc++', 'libstdc++', 'msvc', 'cxx_default']:
+ self.lit_config.fatal(
+ 'unsupported value for "cxx_stdlib_under_test": %s'
+ % self.cxx_stdlib_under_test)
+ self.config.available_features.add(self.cxx_stdlib_under_test)
+ if self.cxx_stdlib_under_test == 'libstdc++':
+ self.config.available_features.add('libstdc++')
+ # Manually enable the experimental and filesystem tests for libstdc++
+ # if the options aren't present.
+ # FIXME this is a hack.
+ if self.get_lit_conf('enable_experimental') is None:
+ self.config.enable_experimental = 'true'
+
+ def configure_use_clang_verify(self):
+ '''If set, run clang with -verify on failing tests.'''
+ self.use_clang_verify = self.get_lit_bool('use_clang_verify')
+ if self.use_clang_verify is None:
+ # NOTE: We do not test for the -verify flag directly because
+ # -verify will always exit with non-zero on an empty file.
+ self.use_clang_verify = self.cxx.isVerifySupported()
+ self.lit_config.note(
+ "inferred use_clang_verify as: %r" % self.use_clang_verify)
+ if self.use_clang_verify:
+ self.config.available_features.add('verify-support')
+
+ def configure_use_thread_safety(self):
+ '''If set, run clang with -verify on failing tests.'''
+ has_thread_safety = self.cxx.hasCompileFlag('-Werror=thread-safety')
+ if has_thread_safety:
+ self.cxx.compile_flags += ['-Werror=thread-safety']
+ self.config.available_features.add('thread-safety')
+ self.lit_config.note("enabling thread-safety annotations")
+
+ def configure_execute_external(self):
+ # Choose between lit's internal shell pipeline runner and a real shell.
+ # If LIT_USE_INTERNAL_SHELL is in the environment, we use that as the
+ # default value. Otherwise we ask the target_info.
+ use_lit_shell_default = os.environ.get('LIT_USE_INTERNAL_SHELL')
+ if use_lit_shell_default is not None:
+ use_lit_shell_default = use_lit_shell_default != '0'
+ else:
+ use_lit_shell_default = self.target_info.use_lit_shell_default()
+ # Check for the command line parameter using the default value if it is
+ # not present.
+ use_lit_shell = self.get_lit_bool('use_lit_shell',
+ use_lit_shell_default)
+ self.execute_external = not use_lit_shell
+
+ def configure_ccache(self):
+ use_ccache_default = os.environ.get('LIBCXX_USE_CCACHE') is not None
+ use_ccache = self.get_lit_bool('use_ccache', use_ccache_default)
+ if use_ccache:
+ self.cxx.use_ccache = True
+ self.lit_config.note('enabling ccache')
+
+ def add_deployment_feature(self, feature):
+ (arch, name, version) = self.config.deployment
+ self.config.available_features.add('%s=%s-%s' % (feature, arch, name))
+ self.config.available_features.add('%s=%s' % (feature, name))
+ self.config.available_features.add('%s=%s%s' % (feature, name, version))
+
def configure_features(self):
+ additional_features = self.get_lit_conf('additional_features')
+ if additional_features:
+ for f in additional_features.split(','):
+ self.config.available_features.add(f.strip())
+ self.target_info.add_locale_features(self.config.available_features)
+
+ target_platform = self.target_info.platform()
+
+ # Write an "available feature" that combines the triple when
+ # use_system_cxx_lib is enabled. This is so that we can easily write
+ # XFAIL markers for tests that are known to fail with versions of
+ # libc++ as were shipped with a particular triple.
+ if self.use_system_cxx_lib:
+ self.config.available_features.add('with_system_cxx_lib')
+ self.config.available_features.add(
+ 'with_system_cxx_lib=%s' % self.config.target_triple)
+
+ # Add subcomponents individually.
+ target_components = self.config.target_triple.split('-')
+ for component in target_components:
+ self.config.available_features.add(
+ 'with_system_cxx_lib=%s' % component)
+
+ # Add available features for more generic versions of the target
+ # triple attached to with_system_cxx_lib.
+ if self.use_deployment:
+ self.add_deployment_feature('with_system_cxx_lib')
+
+ # Configure the availability feature. Availability is only enabled
+ # with libc++, because other standard libraries do not provide
+ # availability markup.
+ if self.use_deployment and self.cxx_stdlib_under_test == 'libc++':
+ self.config.available_features.add('availability')
+ self.add_deployment_feature('availability')
+
+ if self.target_info.is_darwin():
+ self.config.available_features.add('apple-darwin')
+
+ # Insert the platform name into the available features as a lower case.
+ self.config.available_features.add(target_platform)
+
+ # Simulator testing can take a really long time for some of these tests
+ # so add a feature check so we can REQUIRES: long_tests in them
+ self.long_tests = self.get_lit_bool('long_tests')
+ if self.long_tests is None:
+ # Default to running long tests.
+ self.long_tests = True
+ self.lit_config.note(
+ "inferred long_tests as: %r" % self.long_tests)
+
+ if self.long_tests:
+ self.config.available_features.add('long_tests')
+
+ if not self.get_lit_bool('enable_filesystem', default=True):
+ self.config.available_features.add('c++filesystem-disabled')
+ self.config.available_features.add('dylib-has-no-filesystem')
+
+
+ # Run a compile test for the -fsized-deallocation flag. This is needed
+ # in test/std/language.support/support.dynamic/new.delete
+ if self.cxx.hasCompileFlag('-fsized-deallocation'):
+ self.config.available_features.add('-fsized-deallocation')
+
+ if self.cxx.hasCompileFlag('-faligned-allocation'):
+ self.config.available_features.add('-faligned-allocation')
+ else:
+ # FIXME remove this once more than just clang-4.0 support
+ # C++17 aligned allocation.
+ self.config.available_features.add('no-aligned-allocation')
+
+ if self.cxx.hasCompileFlag('-fdelayed-template-parsing'):
+ self.config.available_features.add('fdelayed-template-parsing')
+
+ if self.get_lit_bool('has_libatomic', False):
+ self.config.available_features.add('libatomic')
+
+ macros = self._dump_macros_verbose()
+ if '__cpp_if_constexpr' not in macros:
+ self.config.available_features.add('libcpp-no-if-constexpr')
+
+ if '__cpp_structured_bindings' not in macros:
+ self.config.available_features.add('libcpp-no-structured-bindings')
+
+ if '__cpp_deduction_guides' not in macros or \
+ intMacroValue(macros['__cpp_deduction_guides']) < 201611:
+ self.config.available_features.add('libcpp-no-deduction-guides')
+
if self.target_info.is_windows():
+ self.config.available_features.add('windows')
if self.cxx_stdlib_under_test == 'libc++':
# LIBCXX-WINDOWS-FIXME is the feature name used to XFAIL the
# initial Windows failures until they can be properly diagnosed
@@ -241,19 +483,34 @@ class Configuration(object):
# using this feature. (Also see llvm.org/PR32730)
self.config.available_features.add('LIBCXX-WINDOWS-FIXME')
+ # Attempt to detect the glibc version by querying for __GLIBC__
+ # in 'features.h'.
+ macros = self.cxx.dumpMacros(flags=['-include', 'features.h'])
+ if isinstance(macros, dict) and '__GLIBC__' in macros:
+ maj_v, min_v = (macros['__GLIBC__'], macros['__GLIBC_MINOR__'])
+ self.config.available_features.add('glibc')
+ self.config.available_features.add('glibc-%s' % maj_v)
+ self.config.available_features.add('glibc-%s.%s' % (maj_v, min_v))
+
+ libcxx_gdb = self.get_lit_conf('libcxx_gdb')
+ if libcxx_gdb and 'NOTFOUND' not in libcxx_gdb:
+ self.config.available_features.add('libcxx_gdb')
+ self.cxx.libcxx_gdb = libcxx_gdb
+
+ # Support Objective-C++ only on MacOS and if the compiler supports it.
+ if self.target_info.platform() == "darwin" and \
+ self.target_info.is_host_macosx() and \
+ self.cxx.hasCompileFlag(["-x", "objective-c++", "-fobjc-arc"]):
+ self.config.available_features.add("objective-c++")
+
def configure_compile_flags(self):
self.configure_default_compile_flags()
# Configure extra flags
compile_flags_str = self.get_lit_conf('compile_flags', '')
self.cxx.compile_flags += shlex.split(compile_flags_str)
if self.target_info.is_windows():
+ # FIXME: Can we remove this?
self.cxx.compile_flags += ['-D_CRT_SECURE_NO_WARNINGS']
- # Don't warn about using common but nonstandard unprefixed functions
- # like chdir, fileno.
- self.cxx.compile_flags += ['-D_CRT_NONSTDC_NO_WARNINGS']
- # Build the tests in the same configuration as libcxx itself,
- # to avoid mismatches if linked statically.
- self.cxx.compile_flags += ['-D_CRT_STDIO_ISO_WIDE_SPECIFIERS']
# Required so that tests using min/max don't fail on Windows,
# and so that those tests don't have to be changed to tolerate
# this insanity.
@@ -263,10 +520,48 @@ class Configuration(object):
self.cxx.compile_flags += shlex.split(additional_flags)
def configure_default_compile_flags(self):
+ # Try and get the std version from the command line. Fall back to
+ # default given in lit.site.cfg is not present. If default is not
+ # present then force c++11.
+ std = self.get_lit_conf('std')
+ if not std:
+ # Choose the newest possible language dialect if none is given.
+ possible_stds = ['c++2a', 'c++17', 'c++1z', 'c++14', 'c++11',
+ 'c++03']
+ if self.cxx.type == 'gcc':
+ maj_v, _, _ = self.cxx.version
+ maj_v = int(maj_v)
+ if maj_v < 7:
+ possible_stds.remove('c++1z')
+ possible_stds.remove('c++17')
+ # FIXME: How many C++14 tests actually fail under GCC 5 and 6?
+ # Should we XFAIL them individually instead?
+ if maj_v <= 6:
+ possible_stds.remove('c++14')
+ for s in possible_stds:
+ if self.cxx.hasCompileFlag('-std=%s' % s):
+ std = s
+ self.lit_config.note(
+ 'inferred language dialect as: %s' % std)
+ break
+ if not std:
+ self.lit_config.fatal(
+ 'Failed to infer a supported language dialect from one of %r'
+ % possible_stds)
+ self.cxx.compile_flags += ['-std={0}'.format(std)]
+ std_feature = std.replace('gnu++', 'c++')
+ std_feature = std.replace('1z', '17')
+ self.config.available_features.add(std_feature)
# Configure include paths
self.configure_compile_flags_header_includes()
self.target_info.add_cxx_compile_flags(self.cxx.compile_flags)
- self.target_info.add_cxx_flags(self.cxx.flags)
+ # Configure feature flags.
+ self.configure_compile_flags_exceptions()
+ self.configure_compile_flags_rtti()
+ self.configure_compile_flags_abi_version()
+ enable_32bit = self.get_lit_bool('enable_32bit', False)
+ if enable_32bit:
+ self.cxx.flags += ['-m32']
# Use verbose output for better errors
self.cxx.flags += ['-v']
sysroot = self.get_lit_conf('sysroot')
@@ -281,6 +576,15 @@ class Configuration(object):
# being elided.
if self.target_info.is_windows() and self.debug_build:
self.cxx.compile_flags += ['-D_DEBUG']
+ if self.use_target:
+ if not self.cxx.addFlagIfSupported(
+ ['--target=' + self.config.target_triple]):
+ self.lit_config.warning('use_target is true but --target is '\
+ 'not supported by the compiler')
+ if self.use_deployment:
+ arch, name, version = self.config.deployment
+ self.cxx.flags += ['-arch', arch]
+ self.cxx.flags += ['-m' + name + '-version-min=' + version]
# Add includes for support headers used in the tests.
support_path = os.path.join(self.libcxx_src_root, 'test/support')
@@ -295,11 +599,15 @@ class Configuration(object):
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'test')]
self.config.available_features.add('parallel-algorithms')
+ # FIXME(EricWF): variant_size.pass.cpp requires a slightly larger
+ # template depth with older Clang versions.
+ self.cxx.addFlagIfSupported('-ftemplate-depth=270')
+
def configure_compile_flags_header_includes(self):
support_path = os.path.join(self.libcxx_src_root, 'test', 'support')
+ self.configure_config_site_header()
if self.cxx_stdlib_under_test != 'libstdc++' and \
- not self.target_info.is_windows() and \
- not self.target_info.is_zos():
+ not self.target_info.is_windows():
self.cxx.compile_flags += [
'-include', os.path.join(support_path, 'nasty_macros.h')]
if self.cxx_stdlib_under_test == 'msvc':
@@ -314,19 +622,16 @@ class Configuration(object):
'set_windows_crt_report_mode.h')
]
cxx_headers = self.get_lit_conf('cxx_headers')
- if cxx_headers is None and self.cxx_stdlib_under_test != 'libc++':
+ if cxx_headers == '' or (cxx_headers is None
+ and self.cxx_stdlib_under_test != 'libc++'):
self.lit_config.note('using the system cxx headers')
return
self.cxx.compile_flags += ['-nostdinc++']
+ if cxx_headers is None:
+ cxx_headers = os.path.join(self.libcxx_src_root, 'include')
if not os.path.isdir(cxx_headers):
- self.lit_config.fatal("cxx_headers='{}' is not a directory.".format(cxx_headers))
- (path, version) = os.path.split(cxx_headers)
- (path, cxx) = os.path.split(path)
- triple = self.get_lit_conf('target_triple', None)
- if triple is not None:
- cxx_target_headers = os.path.join(path, triple, cxx, version)
- if os.path.isdir(cxx_target_headers):
- self.cxx.compile_flags += ['-I' + cxx_target_headers]
+ self.lit_config.fatal("cxx_headers='%s' is not a directory."
+ % cxx_headers)
self.cxx.compile_flags += ['-I' + cxx_headers]
if self.libcxx_obj_root is not None:
cxxabi_headers = os.path.join(self.libcxx_obj_root, 'include',
@@ -334,6 +639,117 @@ class Configuration(object):
if os.path.isdir(cxxabi_headers):
self.cxx.compile_flags += ['-I' + cxxabi_headers]
+ def configure_config_site_header(self):
+ # Check for a possible __config_site in the build directory. We
+ # use this if it exists.
+ if self.libcxx_obj_root is None:
+ return
+ config_site_header = os.path.join(self.libcxx_obj_root, '__config_site')
+ if not os.path.isfile(config_site_header):
+ return
+ contained_macros = self.parse_config_site_and_add_features(
+ config_site_header)
+ self.lit_config.note('Using __config_site header %s with macros: %r'
+ % (config_site_header, contained_macros))
+ # FIXME: This must come after the call to
+ # 'parse_config_site_and_add_features(...)' in order for it to work.
+ self.cxx.compile_flags += ['-include', config_site_header]
+
+ def parse_config_site_and_add_features(self, header):
+ """ parse_config_site_and_add_features - Deduce and add the test
+ features that that are implied by the #define's in the __config_site
+ header. Return a dictionary containing the macros found in the
+ '__config_site' header.
+ """
+ # Parse the macro contents of __config_site by dumping the macros
+ # using 'c++ -dM -E' and filtering the predefines.
+ predefines = self._dump_macros_verbose()
+ macros = self._dump_macros_verbose(header)
+ feature_macros_keys = set(macros.keys()) - set(predefines.keys())
+ feature_macros = {}
+ for k in feature_macros_keys:
+ feature_macros[k] = macros[k]
+ # We expect the header guard to be one of the definitions
+ assert '_LIBCPP_CONFIG_SITE' in feature_macros
+ del feature_macros['_LIBCPP_CONFIG_SITE']
+ # The __config_site header should be non-empty. Otherwise it should
+ # have never been emitted by CMake.
+ assert len(feature_macros) > 0
+ # FIXME: This is a hack that should be fixed using module maps.
+ # If modules are enabled then we have to lift all of the definitions
+ # in __config_site onto the command line.
+ for m in feature_macros:
+ define = '-D%s' % m
+ if feature_macros[m]:
+ define += '=%s' % (feature_macros[m])
+ self.cxx.modules_flags += [define]
+ self.cxx.compile_flags += ['-Wno-macro-redefined']
+ # Transform each macro name into the feature name used in the tests.
+ # Ex. _LIBCPP_HAS_NO_THREADS -> libcpp-has-no-threads
+ for m in feature_macros:
+ if m == '_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS' or \
+ m == '_LIBCPP_HIDE_FROM_ABI_PER_TU_BY_DEFAULT':
+ continue
+ if m == '_LIBCPP_ABI_VERSION':
+ self.config.available_features.add('libcpp-abi-version-v%s'
+ % feature_macros[m])
+ continue
+ if m == '_LIBCPP_NO_VCRUNTIME':
+ self.config.available_features.add('libcpp-no-vcruntime')
+ continue
+ assert m.startswith('_LIBCPP_HAS_') or m.startswith('_LIBCPP_ABI_')
+ m = m.lower()[1:].replace('_', '-')
+ self.config.available_features.add(m)
+ return feature_macros
+
+
+
+ def configure_compile_flags_exceptions(self):
+ enable_exceptions = self.get_lit_bool('enable_exceptions', True)
+ if not enable_exceptions:
+ self.config.available_features.add('libcpp-no-exceptions')
+ self.cxx.compile_flags += ['-fno-exceptions']
+
+ def configure_compile_flags_rtti(self):
+ enable_rtti = self.get_lit_bool('enable_rtti', True)
+ if not enable_rtti:
+ self.config.available_features.add('libcpp-no-rtti')
+ self.cxx.compile_flags += ['-fno-rtti', '-D_LIBCPP_NO_RTTI']
+
+ def configure_compile_flags_abi_version(self):
+ abi_version = self.get_lit_conf('abi_version', '').strip()
+ abi_unstable = self.get_lit_bool('abi_unstable')
+ # Only add the ABI version when it is non-default.
+ # FIXME(EricWF): Get the ABI version from the "__config_site".
+ if abi_version and abi_version != '1':
+ self.cxx.compile_flags += ['-D_LIBCPP_ABI_VERSION=' + abi_version]
+ if abi_unstable:
+ self.config.available_features.add('libcpp-abi-unstable')
+ self.cxx.compile_flags += ['-D_LIBCPP_ABI_UNSTABLE']
+
+ def configure_filesystem_compile_flags(self):
+ static_env = os.path.join(self.libcxx_src_root, 'test', 'std',
+ 'input.output', 'filesystems', 'Inputs', 'static_test_env')
+ static_env = os.path.realpath(static_env)
+ assert os.path.isdir(static_env)
+ self.cxx.compile_flags += ['-DLIBCXX_FILESYSTEM_STATIC_TEST_ROOT="%s"' % static_env]
+
+ dynamic_env = os.path.join(self.config.test_exec_root,
+ 'filesystem', 'Output', 'dynamic_env')
+ dynamic_env = os.path.realpath(dynamic_env)
+ if not os.path.isdir(dynamic_env):
+ os.makedirs(dynamic_env)
+ self.cxx.compile_flags += ['-DLIBCXX_FILESYSTEM_DYNAMIC_TEST_ROOT="%s"' % dynamic_env]
+ self.exec_env['LIBCXX_FILESYSTEM_DYNAMIC_TEST_ROOT'] = ("%s" % dynamic_env)
+
+ dynamic_helper = os.path.join(self.libcxx_src_root, 'test', 'support',
+ 'filesystem_dynamic_test_helper.py')
+ assert os.path.isfile(dynamic_helper)
+
+ self.cxx.compile_flags += ['-DLIBCXX_FILESYSTEM_DYNAMIC_TEST_HELPER="%s %s"'
+ % (sys.executable, dynamic_helper)]
+
+
def configure_link_flags(self):
# Configure library path
self.configure_link_flags_cxx_library_path()
@@ -341,17 +757,15 @@ class Configuration(object):
# Configure libraries
if self.cxx_stdlib_under_test == 'libc++':
- if self.target_info.is_mingw():
- self.cxx.link_flags += ['-nostdlib++']
- else:
- self.cxx.link_flags += ['-nodefaultlibs']
+ self.cxx.link_flags += ['-nodefaultlibs']
# FIXME: Handle MSVCRT as part of the ABI library handling.
- if self.target_info.is_windows() and not self.target_info.is_mingw():
+ if self.target_info.is_windows():
self.cxx.link_flags += ['-nostdlib']
self.configure_link_flags_cxx_library()
self.configure_link_flags_abi_library()
self.configure_extra_library_flags()
elif self.cxx_stdlib_under_test == 'libstdc++':
+ self.config.available_features.add('c++experimental')
self.cxx.link_flags += ['-lstdc++fs', '-lm', '-pthread']
elif self.cxx_stdlib_under_test == 'msvc':
# FIXME: Correctly setup debug/release flags here.
@@ -365,37 +779,50 @@ class Configuration(object):
self.cxx.link_flags += shlex.split(link_flags_str)
def configure_link_flags_cxx_library_path(self):
- if self.cxx_library_root:
- self.cxx.link_flags += ['-L' + self.cxx_library_root]
- if self.target_info.is_windows() and self.link_shared:
- self.add_path(self.cxx.compile_env, self.cxx_library_root)
- if self.cxx_runtime_root:
+ if not self.use_system_cxx_lib:
+ if self.cxx_library_root:
+ self.cxx.link_flags += ['-L' + self.cxx_library_root]
+ if self.target_info.is_windows() and self.link_shared:
+ self.add_path(self.cxx.compile_env, self.cxx_library_root)
+ if self.cxx_runtime_root:
+ if not self.target_info.is_windows():
+ self.cxx.link_flags += ['-Wl,-rpath,' +
+ self.cxx_runtime_root]
+ elif self.target_info.is_windows() and self.link_shared:
+ self.add_path(self.exec_env, self.cxx_runtime_root)
+ elif os.path.isdir(str(self.use_system_cxx_lib)):
+ self.cxx.link_flags += ['-L' + self.use_system_cxx_lib]
if not self.target_info.is_windows():
self.cxx.link_flags += ['-Wl,-rpath,' +
- self.cxx_runtime_root]
- elif self.target_info.is_windows() and self.link_shared:
- self.add_path(self.exec_env, self.cxx_runtime_root)
+ self.use_system_cxx_lib]
+ if self.target_info.is_windows() and self.link_shared:
+ self.add_path(self.cxx.compile_env, self.use_system_cxx_lib)
additional_flags = self.get_lit_conf('test_linker_flags')
if additional_flags:
self.cxx.link_flags += shlex.split(additional_flags)
def configure_link_flags_abi_library_path(self):
# Configure ABI library paths.
+ self.abi_library_root = self.get_lit_conf('abi_library_path')
if self.abi_library_root:
self.cxx.link_flags += ['-L' + self.abi_library_root]
- if self.abi_runtime_root:
if not self.target_info.is_windows():
- self.cxx.link_flags += ['-Wl,-rpath,' + self.abi_runtime_root]
+ self.cxx.link_flags += ['-Wl,-rpath,' + self.abi_library_root]
else:
- self.add_path(self.exec_env, self.abi_runtime_root)
+ self.add_path(self.exec_env, self.abi_library_root)
def configure_link_flags_cxx_library(self):
+ libcxx_experimental = self.get_lit_bool('enable_experimental', default=False)
+ if libcxx_experimental:
+ self.config.available_features.add('c++experimental')
+ self.cxx.link_flags += ['-lc++experimental']
if self.link_shared:
self.cxx.link_flags += ['-lc++']
else:
- if self.cxx_library_root:
+ cxx_library_root = self.get_lit_conf('cxx_library_root')
+ if cxx_library_root:
libname = self.make_static_lib_name('c++')
- abs_path = os.path.join(self.cxx_library_root, libname)
+ abs_path = os.path.join(cxx_library_root, libname)
assert os.path.exists(abs_path) and \
"static libc++ library does not exist"
self.cxx.link_flags += [abs_path]
@@ -418,9 +845,10 @@ class Configuration(object):
if libcxxabi_shared:
self.cxx.link_flags += ['-lc++abi']
else:
- if self.abi_library_root:
+ cxxabi_library_root = self.get_lit_conf('abi_library_path')
+ if cxxabi_library_root:
libname = self.make_static_lib_name('c++abi')
- abs_path = os.path.join(self.abi_library_root, libname)
+ abs_path = os.path.join(cxxabi_library_root, libname)
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++abi']
@@ -428,13 +856,8 @@ class Configuration(object):
self.cxx.link_flags += ['-lcxxrt']
elif cxx_abi == 'vcruntime':
debug_suffix = 'd' if self.debug_build else ''
- # This matches the set of libraries linked in the toplevel
- # libcxx CMakeLists.txt if building targeting msvc.
self.cxx.link_flags += ['-l%s%s' % (lib, debug_suffix) for lib in
- ['vcruntime', 'ucrt', 'msvcrt', 'msvcprt']]
- # The compiler normally links in oldnames.lib too, but we've
- # specified -nostdlib above, so we need to specify it manually.
- self.cxx.link_flags += ['-loldnames']
+ ['vcruntime', 'ucrt', 'msvcrt']]
elif cxx_abi == 'none' or cxx_abi == 'default':
if self.target_info.is_windows():
debug_suffix = 'd' if self.debug_build else ''
@@ -448,37 +871,338 @@ class Configuration(object):
self.cxx.link_flags += ['-lc++external_threads']
self.target_info.add_cxx_link_flags(self.cxx.link_flags)
+ def configure_color_diagnostics(self):
+ use_color = self.get_lit_conf('color_diagnostics')
+ if use_color is None:
+ use_color = os.environ.get('LIBCXX_COLOR_DIAGNOSTICS')
+ if use_color is None:
+ return
+ if use_color != '':
+ self.lit_config.fatal('Invalid value for color_diagnostics "%s".'
+ % use_color)
+ color_flag = '-fdiagnostics-color=always'
+ # Check if the compiler supports the color diagnostics flag. Issue a
+ # warning if it does not since color diagnostics have been requested.
+ if not self.cxx.hasCompileFlag(color_flag):
+ self.lit_config.warning(
+ 'color diagnostics have been requested but are not supported '
+ 'by the compiler')
+ else:
+ self.cxx.flags += [color_flag]
+
+ def configure_debug_mode(self):
+ debug_level = self.get_lit_conf('debug_level', None)
+ if not debug_level:
+ return
+ if debug_level not in ['0', '1']:
+ self.lit_config.fatal('Invalid value for debug_level "%s".'
+ % debug_level)
+ self.cxx.compile_flags += ['-D_LIBCPP_DEBUG=%s' % debug_level]
+
+ def configure_warnings(self):
+ # Turn on warnings by default for Clang based compilers when C++ >= 11
+ default_enable_warnings = self.cxx.type in ['clang', 'apple-clang'] \
+ and len(self.config.available_features.intersection(
+ ['c++11', 'c++14', 'c++17', 'c++2a'])) != 0
+ enable_warnings = self.get_lit_bool('enable_warnings',
+ default_enable_warnings)
+ self.cxx.useWarnings(enable_warnings)
+ self.cxx.warning_flags += [
+ '-D_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER',
+ '-Wall', '-Wextra', '-Werror'
+ ]
+ if self.cxx.hasWarningFlag('-Wuser-defined-warnings'):
+ self.cxx.warning_flags += ['-Wuser-defined-warnings']
+ self.config.available_features.add('diagnose-if-support')
+ self.cxx.addWarningFlagIfSupported('-Wshadow')
+ self.cxx.addWarningFlagIfSupported('-Wno-unused-command-line-argument')
+ self.cxx.addWarningFlagIfSupported('-Wno-attributes')
+ self.cxx.addWarningFlagIfSupported('-Wno-pessimizing-move')
+ self.cxx.addWarningFlagIfSupported('-Wno-c++11-extensions')
+ self.cxx.addWarningFlagIfSupported('-Wno-user-defined-literals')
+ self.cxx.addWarningFlagIfSupported('-Wno-noexcept-type')
+ self.cxx.addWarningFlagIfSupported('-Wno-aligned-allocation-unavailable')
+ # These warnings should be enabled in order to support the MSVC
+ # team using the test suite; They enable the warnings below and
+ # expect the test suite to be clean.
+ self.cxx.addWarningFlagIfSupported('-Wsign-compare')
+ self.cxx.addWarningFlagIfSupported('-Wunused-variable')
+ self.cxx.addWarningFlagIfSupported('-Wunused-parameter')
+ self.cxx.addWarningFlagIfSupported('-Wunreachable-code')
+ std = self.get_lit_conf('std', None)
+ if std in ['c++98', 'c++03']:
+ # The '#define static_assert' provided by libc++ in C++03 mode
+ # causes an unused local typedef whenever it is used.
+ self.cxx.addWarningFlagIfSupported('-Wno-unused-local-typedef')
+
+ def configure_sanitizer(self):
+ san = self.get_lit_conf('use_sanitizer', '').strip()
+ if san:
+ self.target_info.add_sanitizer_features(san, self.config.available_features)
+ # Search for llvm-symbolizer along the compiler path first
+ # and then along the PATH env variable.
+ symbolizer_search_paths = os.environ.get('PATH', '')
+ cxx_path = libcxx.util.which(self.cxx.path)
+ if cxx_path is not None:
+ symbolizer_search_paths = (
+ os.path.dirname(cxx_path) +
+ os.pathsep + symbolizer_search_paths)
+ llvm_symbolizer = libcxx.util.which('llvm-symbolizer',
+ symbolizer_search_paths)
+
+ def add_ubsan():
+ self.cxx.flags += ['-fsanitize=undefined',
+ '-fno-sanitize=float-divide-by-zero',
+ '-fno-sanitize-recover=all']
+ self.exec_env['UBSAN_OPTIONS'] = 'print_stacktrace=1'
+ self.config.available_features.add('ubsan')
+
+ # Setup the sanitizer compile flags
+ self.cxx.flags += ['-g', '-fno-omit-frame-pointer']
+ if san == 'Address' or san == 'Address;Undefined' or san == 'Undefined;Address':
+ self.cxx.flags += ['-fsanitize=address']
+ if llvm_symbolizer is not None:
+ self.exec_env['ASAN_SYMBOLIZER_PATH'] = llvm_symbolizer
+ # FIXME: Turn ODR violation back on after PR28391 is resolved
+ # https://bugs.llvm.org/show_bug.cgi?id=28391
+ self.exec_env['ASAN_OPTIONS'] = 'detect_odr_violation=0'
+ self.config.available_features.add('asan')
+ self.config.available_features.add('sanitizer-new-delete')
+ self.cxx.compile_flags += ['-O1']
+ if san == 'Address;Undefined' or san == 'Undefined;Address':
+ add_ubsan()
+ elif san == 'Memory' or san == 'MemoryWithOrigins':
+ self.cxx.flags += ['-fsanitize=memory']
+ if san == 'MemoryWithOrigins':
+ self.cxx.compile_flags += [
+ '-fsanitize-memory-track-origins']
+ if llvm_symbolizer is not None:
+ self.exec_env['MSAN_SYMBOLIZER_PATH'] = llvm_symbolizer
+ self.config.available_features.add('msan')
+ self.config.available_features.add('sanitizer-new-delete')
+ self.cxx.compile_flags += ['-O1']
+ elif san == 'Undefined':
+ add_ubsan()
+ self.cxx.compile_flags += ['-O2']
+ elif san == 'Thread':
+ self.cxx.flags += ['-fsanitize=thread']
+ self.config.available_features.add('tsan')
+ self.config.available_features.add('sanitizer-new-delete')
+ else:
+ self.lit_config.fatal('unsupported value for '
+ 'use_sanitizer: {0}'.format(san))
+ san_lib = self.get_lit_conf('sanitizer_library')
+ if san_lib:
+ self.cxx.link_flags += [
+ san_lib, '-Wl,-rpath,%s' % os.path.dirname(san_lib)]
+
def configure_coverage(self):
self.generate_coverage = self.get_lit_bool('generate_coverage', False)
if self.generate_coverage:
self.cxx.flags += ['-g', '--coverage']
self.cxx.compile_flags += ['-O0']
- def quote(self, s):
- if platform.system() == 'Windows':
- return lit.TestRunner.quote_windows_command([s])
- return pipes.quote(s)
+ def configure_coroutines(self):
+ if self.cxx.hasCompileFlag('-fcoroutines-ts'):
+ macros = self._dump_macros_verbose(flags=['-fcoroutines-ts'])
+ if '__cpp_coroutines' not in macros:
+ self.lit_config.warning('-fcoroutines-ts is supported but '
+ '__cpp_coroutines is not defined')
+ # Consider coroutines supported only when the feature test macro
+ # reflects a recent value.
+ if intMacroValue(macros['__cpp_coroutines']) >= 201703:
+ self.config.available_features.add('fcoroutines-ts')
+
+ def configure_modules(self):
+ modules_flags = ['-fmodules']
+ if not self.target_info.is_darwin():
+ modules_flags += ['-Xclang', '-fmodules-local-submodule-visibility']
+ supports_modules = self.cxx.hasCompileFlag(modules_flags)
+ enable_modules = self.get_modules_enabled()
+ if enable_modules and not supports_modules:
+ self.lit_config.fatal(
+ '-fmodules is enabled but not supported by the compiler')
+ if not supports_modules:
+ return
+ self.config.available_features.add('modules-support')
+ module_cache = os.path.join(self.config.test_exec_root,
+ 'modules.cache')
+ module_cache = os.path.realpath(module_cache)
+ if os.path.isdir(module_cache):
+ shutil.rmtree(module_cache)
+ os.makedirs(module_cache)
+ self.cxx.modules_flags += modules_flags + \
+ ['-fmodules-cache-path=' + module_cache]
+ if enable_modules:
+ self.config.available_features.add('-fmodules')
+ self.cxx.useModules()
def configure_substitutions(self):
- sub = self.config.substitutions
- sub.append(('%{cxx}', self.quote(self.cxx.path)))
- flags = self.cxx.flags + (self.cxx.modules_flags if self.cxx.use_modules else [])
- compile_flags = self.cxx.compile_flags + (self.cxx.warning_flags if self.cxx.use_warnings else [])
- sub.append(('%{flags}', ' '.join(map(self.quote, flags))))
- sub.append(('%{compile_flags}', ' '.join(map(self.quote, compile_flags))))
- sub.append(('%{link_flags}', ' '.join(map(self.quote, self.cxx.link_flags))))
+ tool_env = ''
+ if self.target_info.is_darwin():
+ # Do not pass DYLD_LIBRARY_PATH to the compiler, linker, etc. as
+ # these tools are not meant to exercise the just-built libraries.
+ tool_env += 'DYLD_LIBRARY_PATH="" '
+ sub = self.config.substitutions
+ cxx_path = tool_env + pipes.quote(self.cxx.path)
+ # Configure compiler substitutions
+ sub.append(('%cxx', cxx_path))
+ sub.append(('%libcxx_src_root', self.libcxx_src_root))
+ # Configure flags substitutions
+ flags_str = ' '.join([pipes.quote(f) for f in self.cxx.flags])
+ compile_flags_str = ' '.join([pipes.quote(f) for f in self.cxx.compile_flags])
+ link_flags_str = ' '.join([pipes.quote(f) for f in self.cxx.link_flags])
+ all_flags = '%s %s %s' % (flags_str, compile_flags_str, link_flags_str)
+ sub.append(('%flags', flags_str))
+ sub.append(('%compile_flags', compile_flags_str))
+ sub.append(('%link_flags', link_flags_str))
+ sub.append(('%all_flags', all_flags))
+ if self.cxx.isVerifySupported():
+ verify_str = ' ' + ' '.join(self.cxx.verify_flags) + ' '
+ sub.append(('%verify', verify_str))
+ # Add compile and link shortcuts
+ compile_str = (cxx_path + ' -o %t.o %s -c ' + flags_str
+ + ' ' + compile_flags_str)
+ link_str = (cxx_path + ' -o %t.exe %t.o ' + flags_str + ' '
+ + link_flags_str)
+ assert type(link_str) is str
+ build_str = cxx_path + ' -o %t.exe %s ' + all_flags
+ if self.cxx.use_modules:
+ sub.append(('%compile_module', compile_str))
+ sub.append(('%build_module', build_str))
+ elif self.cxx.modules_flags is not None:
+ modules_str = ' '.join(self.cxx.modules_flags) + ' '
+ sub.append(('%compile_module', compile_str + ' ' + modules_str))
+ sub.append(('%build_module', build_str + ' ' + modules_str))
+ sub.append(('%compile', compile_str))
+ sub.append(('%link', link_str))
+ sub.append(('%build', build_str))
+ # Configure exec prefix substitutions.
+ # Configure run env substitution.
codesign_ident = self.get_lit_conf('llvm_codesign_identity', '')
- env_vars = ' '.join('%s=%s' % (k, self.quote(v)) for (k, v) in self.exec_env.items())
- exec_args = [
- '--execdir %T',
- '--codesign_identity "{}"'.format(codesign_ident),
- '--env {}'.format(env_vars)
- ]
- sub.append(('%{exec}', '{} {} -- '.format(self.executor, ' '.join(exec_args))))
+ run_py = os.path.join(self.libcxx_src_root, 'utils', 'run.py')
+ run_str = '%s %s "%s" %%t.exe' % (pipes.quote(sys.executable), \
+ pipes.quote(run_py), codesign_ident)
+ sub.append(('%run', run_str))
+ # Configure not program substitutions
+ not_py = os.path.join(self.libcxx_src_root, 'utils', 'not.py')
+ not_str = '%s %s ' % (pipes.quote(sys.executable), pipes.quote(not_py))
+ sub.append(('not ', not_str))
+ if self.get_lit_conf('libcxx_gdb'):
+ sub.append(('%libcxx_gdb', self.get_lit_conf('libcxx_gdb')))
+
+ def can_use_deployment(self):
+ # Check if the host is on an Apple platform using clang.
+ if not self.target_info.is_darwin():
+ return False
+ if not self.target_info.is_host_macosx():
+ return False
+ if not self.cxx.type.endswith('clang'):
+ return False
+ return True
+
+ def configure_triple(self):
+ # Get or infer the target triple.
+ target_triple = self.get_lit_conf('target_triple')
+ self.use_target = self.get_lit_bool('use_target', False)
+ if self.use_target and target_triple:
+ self.lit_config.warning('use_target is true but no triple is specified')
+
+ # Use deployment if possible.
+ self.use_deployment = not self.use_target and self.can_use_deployment()
+ if self.use_deployment:
+ return
+
+ # Save the triple (and warn on Apple platforms).
+ self.config.target_triple = target_triple
+ if self.use_target and 'apple' in target_triple:
+ self.lit_config.warning('consider using arch and platform instead'
+ ' of target_triple on Apple platforms')
+
+ # If no target triple was given, try to infer it from the compiler
+ # under test.
+ if not self.config.target_triple:
+ target_triple = self.cxx.getTriple()
+ # Drop sub-major version components from the triple, because the
+ # current XFAIL handling expects exact matches for feature checks.
+ # Example: x86_64-apple-darwin14.0.0 -> x86_64-apple-darwin14
+ # The 5th group handles triples greater than 3 parts
+ # (ex x86_64-pc-linux-gnu).
+ target_triple = re.sub(r'([^-]+)-([^-]+)-([^.]+)([^-]*)(.*)',
+ r'\1-\2-\3\5', target_triple)
+ # linux-gnu is needed in the triple to properly identify linuxes
+ # that use GLIBC. Handle redhat and opensuse triples as special
+ # cases and append the missing `-gnu` portion.
+ if (target_triple.endswith('redhat-linux') or
+ target_triple.endswith('suse-linux')):
+ target_triple += '-gnu'
+ self.config.target_triple = target_triple
+ self.lit_config.note(
+ "inferred target_triple as: %r" % self.config.target_triple)
+
+ def configure_deployment(self):
+ assert not self.use_deployment is None
+ assert not self.use_target is None
+ if not self.use_deployment:
+ # Warn about ignored parameters.
+ if self.get_lit_conf('arch'):
+ self.lit_config.warning('ignoring arch, using target_triple')
+ if self.get_lit_conf('platform'):
+ self.lit_config.warning('ignoring platform, using target_triple')
+ return
+
+ assert not self.use_target
+ assert self.target_info.is_host_macosx()
+
+ # Always specify deployment explicitly on Apple platforms, since
+ # otherwise a platform is picked up from the SDK. If the SDK version
+ # doesn't match the system version, tests that use the system library
+ # may fail spuriously.
+ arch = self.get_lit_conf('arch')
+ if not arch:
+ arch = self.cxx.getTriple().split('-', 1)[0]
+ self.lit_config.note("inferred arch as: %r" % arch)
+
+ inferred_platform, name, version = self.target_info.get_platform()
+ if inferred_platform:
+ self.lit_config.note("inferred platform as: %r" % (name + version))
+ self.config.deployment = (arch, name, version)
+
+ # Set the target triple for use by lit.
+ self.config.target_triple = arch + '-apple-' + name + version
+ self.lit_config.note(
+ "computed target_triple as: %r" % self.config.target_triple)
+
+ # If we're testing a system libc++ as opposed to the upstream LLVM one,
+ # take the version of the system libc++ into account to compute which
+ # features are enabled/disabled. Otherwise, disable availability markup,
+ # which is not relevant for non-shipped flavors of libc++.
+ if self.use_system_cxx_lib:
+ # Dylib support for shared_mutex was added in macosx10.12.
+ if name == 'macosx' and version in ('10.%s' % v for v in range(7, 12)):
+ self.config.available_features.add('dylib-has-no-shared_mutex')
+ self.lit_config.note("shared_mutex is not supported by the deployment target")
+ # Throwing bad_optional_access, bad_variant_access and bad_any_cast is
+ # supported starting in macosx10.14.
+ if name == 'macosx' and version in ('10.%s' % v for v in range(7, 14)):
+ self.config.available_features.add('dylib-has-no-bad_optional_access')
+ self.lit_config.note("throwing bad_optional_access is not supported by the deployment target")
+
+ self.config.available_features.add('dylib-has-no-bad_variant_access')
+ self.lit_config.note("throwing bad_variant_access is not supported by the deployment target")
+
+ self.config.available_features.add('dylib-has-no-bad_any_cast')
+ self.lit_config.note("throwing bad_any_cast is not supported by the deployment target")
+ # Filesystem is support on Apple platforms starting with macosx10.15.
+ if name == 'macosx' and version in ('10.%s' % v for v in range(7, 15)):
+ self.config.available_features.add('dylib-has-no-filesystem')
+ self.lit_config.note("the deployment target does not support <filesystem>")
+ else:
+ self.cxx.flags += ['-D_LIBCPP_DISABLE_AVAILABILITY']
def configure_env(self):
- self.config.environment = dict(os.environ)
+ self.target_info.configure_env(self.exec_env)
def add_path(self, dest_env, new_path):
self.target_info.add_path(dest_env, new_path)
diff --git a/gnu/llvm/libcxx/utils/libcxx/test/target_info.py b/gnu/llvm/libcxx/utils/libcxx/test/target_info.py
index 198d2eebe97..fa57a2c7485 100644
--- a/gnu/llvm/libcxx/utils/libcxx/test/target_info.py
+++ b/gnu/llvm/libcxx/utils/libcxx/test/target_info.py
@@ -7,7 +7,7 @@
#===----------------------------------------------------------------------===//
import importlib
-import lit.util
+import locale
import os
import platform
import re
@@ -19,21 +19,26 @@ from libcxx.util import executeCommand
class DefaultTargetInfo(object):
def __init__(self, full_config):
self.full_config = full_config
- self.executor = None
+
+ def platform(self):
+ return sys.platform.lower().strip()
def is_windows(self):
- return False
+ return self.platform() == 'win32'
- def is_zos(self):
- return False
+ def is_darwin(self):
+ return self.platform() == 'darwin'
- def is_mingw(self):
- return False
+ def add_locale_features(self, features):
+ self.full_config.lit_config.warning(
+ "No locales entry for target_system: %s" % self.platform())
- def add_cxx_flags(self, flags): pass
def add_cxx_compile_flags(self, flags): pass
def add_cxx_link_flags(self, flags): pass
+ def configure_env(self, env): pass
def allow_cxxabi_link(self): return True
+ def add_sanitizer_features(self, sanitizer_type, features): pass
+ def use_lit_shell_default(self): return False
def add_path(self, dest_env, new_path):
if not new_path:
@@ -46,12 +51,100 @@ class DefaultTargetInfo(object):
dest_env['PATH'])
+def test_locale(loc):
+ assert loc is not None
+ default_locale = locale.setlocale(locale.LC_ALL)
+ try:
+ locale.setlocale(locale.LC_ALL, loc)
+ return True
+ except locale.Error:
+ return False
+ finally:
+ locale.setlocale(locale.LC_ALL, default_locale)
+
+
+def add_common_locales(features, lit_config, is_windows=False):
+ # A list of locales needed by the test-suite.
+ # The list uses the canonical name for the locale used in the test-suite
+ # TODO: On Linux ISO8859 *may* needs to hyphenated.
+ locales = [
+ ('en_US.UTF-8', 'English_United States.1252'),
+ ('fr_FR.UTF-8', 'French_France.1252'),
+ ('ru_RU.UTF-8', 'Russian_Russia.1251'),
+ ('zh_CN.UTF-8', 'Chinese_China.936'),
+ ('fr_CA.ISO8859-1', 'French_Canada.1252'),
+ ('cs_CZ.ISO8859-2', 'Czech_Czech Republic.1250')
+ ]
+ for loc_id, windows_loc_name in locales:
+ loc_name = windows_loc_name if is_windows else loc_id
+ if test_locale(loc_name):
+ features.add('locale.{0}'.format(loc_id))
+ else:
+ lit_config.warning('The locale {0} is not supported by '
+ 'your platform. Some tests will be '
+ 'unsupported.'.format(loc_name))
+
+
class DarwinLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(DarwinLocalTI, self).__init__(full_config)
- def add_cxx_flags(self, flags):
- out, err, exit_code = executeCommand(['xcrun', '--show-sdk-path'])
+ def is_host_macosx(self):
+ name = subprocess.check_output(['sw_vers', '-productName']).strip()
+ return name == "Mac OS X"
+
+ def get_macosx_version(self):
+ assert self.is_host_macosx()
+ version = subprocess.check_output(
+ ['sw_vers', '-productVersion']).strip()
+ version = re.sub(r'([0-9]+\.[0-9]+)(\..*)?', r'\1', version)
+ return version
+
+ def get_sdk_version(self, name):
+ assert self.is_host_macosx()
+ cmd = ['xcrun', '--sdk', name, '--show-sdk-path']
+ try:
+ out = subprocess.check_output(cmd).strip()
+ except OSError:
+ pass
+
+ if not out:
+ self.full_config.lit_config.fatal(
+ "cannot infer sdk version with: %r" % cmd)
+
+ return re.sub(r'.*/[^0-9]+([0-9.]+)\.sdk', r'\1', out)
+
+ def get_platform(self):
+ platform = self.full_config.get_lit_conf('platform')
+ if platform:
+ platform = re.sub(r'([^0-9]+)([0-9\.]*)', r'\1-\2', platform)
+ name, version = tuple(platform.split('-', 1))
+ else:
+ name = 'macosx'
+ version = None
+
+ if version:
+ return (False, name, version)
+
+ # Infer the version, either from the SDK or the system itself. For
+ # macosx, ignore the SDK version; what matters is what's at
+ # /usr/lib/libc++.dylib.
+ if name == 'macosx':
+ version = self.get_macosx_version()
+ else:
+ version = self.get_sdk_version(name)
+ return (True, name, version)
+
+ def add_locale_features(self, features):
+ add_common_locales(features, self.full_config.lit_config)
+
+ def add_cxx_compile_flags(self, flags):
+ if self.full_config.use_deployment:
+ _, name, _ = self.full_config.config.deployment
+ cmd = ['xcrun', '--sdk', name, '--show-sdk-path']
+ else:
+ cmd = ['xcrun', '--show-sdk-path']
+ out, err, exit_code = executeCommand(cmd)
if exit_code != 0:
self.full_config.lit_config.warning("Could not determine macOS SDK path! stderr was " + err)
if exit_code == 0 and out:
@@ -63,6 +156,21 @@ class DarwinLocalTI(DefaultTargetInfo):
def add_cxx_link_flags(self, flags):
flags += ['-lSystem']
+ def configure_env(self, env):
+ library_paths = []
+ # Configure the library path for libc++
+ if self.full_config.cxx_runtime_root:
+ library_paths += [self.full_config.cxx_runtime_root]
+ elif self.full_config.use_system_cxx_lib:
+ if (os.path.isdir(str(self.full_config.use_system_cxx_lib))):
+ library_paths += [self.full_config.use_system_cxx_lib]
+
+ # Configure the abi library path
+ if self.full_config.abi_library_root:
+ library_paths += [self.full_config.abi_library_root]
+ if library_paths:
+ env['DYLD_LIBRARY_PATH'] = ':'.join(library_paths)
+
def allow_cxxabi_link(self):
# Don't link libc++abi explicitly on OS X because the symbols
# should be available in libc++ directly.
@@ -73,6 +181,9 @@ class FreeBSDLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(FreeBSDLocalTI, self).__init__(full_config)
+ def add_locale_features(self, features):
+ add_common_locales(features, self.full_config.lit_config)
+
def add_cxx_link_flags(self, flags):
flags += ['-lc', '-lm', '-lpthread', '-lgcc_s', '-lcxxrt']
@@ -81,6 +192,9 @@ class NetBSDLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(NetBSDLocalTI, self).__init__(full_config)
+ def add_locale_features(self, features):
+ add_common_locales(features, self.full_config.lit_config)
+
def add_cxx_link_flags(self, flags):
flags += ['-lc', '-lm', '-lpthread', '-lgcc_s', '-lc++abi',
'-lunwind']
@@ -90,6 +204,43 @@ class LinuxLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(LinuxLocalTI, self).__init__(full_config)
+ def platform(self):
+ return 'linux'
+
+ def _distribution(self):
+ try:
+ # linux_distribution is not available since Python 3.8
+ # However, this function is only used to detect SLES 11,
+ # which is quite an old distribution that doesn't have
+ # Python 3.8.
+ return platform.linux_distribution()
+ except AttributeError:
+ return '', '', ''
+
+ def platform_name(self):
+ name, _, _ = self._distribution()
+ # Some distros have spaces, e.g. 'SUSE Linux Enterprise Server'
+ # lit features can't have spaces
+ name = name.lower().strip().replace(' ', '-')
+ return name # Permitted to be None
+
+ def platform_ver(self):
+ _, ver, _ = self._distribution()
+ ver = ver.lower().strip().replace(' ', '-')
+ return ver # Permitted to be None.
+
+ def add_locale_features(self, features):
+ add_common_locales(features, self.full_config.lit_config)
+ # Some linux distributions have different locale data than others.
+ # Insert the distributions name and name-version into the available
+ # features to allow tests to XFAIL on them.
+ name = self.platform_name()
+ ver = self.platform_ver()
+ if name:
+ features.add(name)
+ if name and ver:
+ features.add('%s-%s' % (name, ver))
+
def add_cxx_compile_flags(self, flags):
flags += ['-D__STDC_FORMAT_MACROS',
'-D__STDC_LIMIT_MACROS',
@@ -117,8 +268,8 @@ class LinuxLocalTI(DefaultTargetInfo):
flags += [builtins_lib]
else:
flags += ['-lgcc']
- has_libatomic = self.full_config.get_lit_bool('has_libatomic', False)
- if has_libatomic:
+ use_libatomic = self.full_config.get_lit_bool('use_libatomic', False)
+ if use_libatomic:
flags += ['-latomic']
san = self.full_config.get_lit_conf('use_sanitizer', '').strip()
if san:
@@ -127,30 +278,20 @@ class LinuxLocalTI(DefaultTargetInfo):
# clang/lib/Driver/Tools.cpp
flags += ['-lpthread', '-lrt', '-lm', '-ldl']
-class LinuxRemoteTI(LinuxLocalTI):
- def __init__(self, full_config):
- super(LinuxRemoteTI, self).__init__(full_config)
class WindowsLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(WindowsLocalTI, self).__init__(full_config)
- def is_windows(self):
- return True
-
-class ZOSLocalTI(DefaultTargetInfo):
- def __init__(self, full_config):
- super(ZOSLocalTI, self).__init__(full_config)
+ def add_locale_features(self, features):
+ add_common_locales(features, self.full_config.lit_config,
+ is_windows=True)
- def is_zos(self):
+ def use_lit_shell_default(self):
+ # Default to the internal shell on Windows, as bash on Windows is
+ # usually very slow.
return True
-class MingwLocalTI(WindowsLocalTI):
- def __init__(self, full_config):
- super(MingwLocalTI, self).__init__(full_config)
-
- def is_mingw(self):
- return True
def make_target_info(full_config):
default = "libcxx.test.target_info.LocalTI"
@@ -167,5 +308,4 @@ def make_target_info(full_config):
if target_system == 'NetBSD': return NetBSDLocalTI(full_config)
if target_system == 'Linux': return LinuxLocalTI(full_config)
if target_system == 'Windows': return WindowsLocalTI(full_config)
- if target_system == 'OS/390': return ZOSLocalTI(full_config)
return DefaultTargetInfo(full_config)
diff --git a/gnu/llvm/libcxx/utils/libcxx/util.py b/gnu/llvm/libcxx/utils/libcxx/util.py
index 8c93f392ed3..2fd95232abb 100644
--- a/gnu/llvm/libcxx/utils/libcxx/util.py
+++ b/gnu/llvm/libcxx/utils/libcxx/util.py
@@ -286,16 +286,3 @@ def executeCommandVerbose(cmd, *args, **kwargs):
report += "\n\nFailed!"
sys.stderr.write('%s\n' % report)
return out, err, exitCode
-
-
-def executeCommandOrDie(cmd, *args, **kwargs):
- """
- Execute a command and print its output on failure.
- """
- out, err, exitCode = executeCommand(cmd, *args, **kwargs)
- if exitCode != 0:
- report = makeReport(cmd, out, err, exitCode)
- report += "\n\nFailed!"
- sys.stderr.write('%s\n' % report)
- sys.exit(exitCode)
- return out, err, exitCode
diff --git a/gnu/llvm/libcxx/utils/merge_archives.py b/gnu/llvm/libcxx/utils/merge_archives.py
index 2fcb474d3d5..4c31854d2b7 100755
--- a/gnu/llvm/libcxx/utils/merge_archives.py
+++ b/gnu/llvm/libcxx/utils/merge_archives.py
@@ -93,7 +93,7 @@ def main():
parser.add_argument(
'-L', dest='search_paths',
help='Paths to search for the libraries along', action='append',
- nargs=1, default=[])
+ nargs=1)
parser.add_argument(
'--ar', dest='ar_exe', required=False,
help='The ar executable to use, finds \'ar\' in the path if not given',