diff options
author | Patrick Wildt <patrick@cvs.openbsd.org> | 2021-01-02 17:10:12 +0000 |
---|---|---|
committer | Patrick Wildt <patrick@cvs.openbsd.org> | 2021-01-02 17:10:12 +0000 |
commit | ac63a3868d0079ed93660b89b2c6e6dbf67aea16 (patch) | |
tree | 7f055322abddaf10c9dbd7cbeb91c8f83d78e301 /gnu/llvm | |
parent | becf90779831a234f9200480d6009ecb39ebb192 (diff) |
Import compiler-rt 11.0.0 release.
ok kettenis@
Diffstat (limited to 'gnu/llvm')
357 files changed, 10123 insertions, 3776 deletions
diff --git a/gnu/llvm/compiler-rt/CMakeLists.txt b/gnu/llvm/compiler-rt/CMakeLists.txt index 8d768a404f2..fa62814b635 100644 --- a/gnu/llvm/compiler-rt/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/CMakeLists.txt @@ -81,12 +81,34 @@ if (COMPILER_RT_STANDALONE_BUILD) set_target_properties(intrinsics_gen PROPERTIES FOLDER "Compiler-RT Misc") endif() - # Find Python interpreter. - include(FindPythonInterp) - if(NOT PYTHONINTERP_FOUND) - message(FATAL_ERROR " - Unable to find Python interpreter required testing. Please install Python - or specify the PYTHON_EXECUTABLE CMake variable.") + if(CMAKE_VERSION VERSION_LESS 3.12) + # Find Python interpreter. + include(FindPythonInterp) + if(NOT PYTHONINTERP_FOUND) + message(FATAL_ERROR " + Unable to find Python interpreter required testing. Please install Python + or specify the PYTHON_EXECUTABLE CMake variable.") + endif() + + add_executable(Python3::Interpreter IMPORTED) + set_target_properties(Python3::Interpreter PROPERTIES + IMPORTED_LOCATION ${PYTHON_EXECUTABLE}) + set(Python3_EXECUTABLE ${PYTHON_EXECUTABLE}) + else() + find_package(Python3 COMPONENTS Interpreter) + if(NOT Python3_Interpreter_FOUND) + message(WARNING "Python3 not found, using python2 as a fallback") + find_package(Python2 COMPONENTS Interpreter REQUIRED) + if(Python2_VERSION VERSION_LESS 2.7) + message(SEND_ERROR "Python 2.7 or newer is required") + endif() + + # Treat python2 as python3 + add_executable(Python3::Interpreter IMPORTED) + set_target_properties(Python3::Interpreter PROPERTIES + IMPORTED_LOCATION ${Python2_EXECUTABLE}) + set(Python3_EXECUTABLE ${Python2_EXECUTABLE}) + endif() endif() # Ensure that fat libraries are built correctly on Darwin @@ -281,12 +303,28 @@ if(NOT COMPILER_RT_HAS_FVISIBILITY_HIDDEN_FLAG) endif() append_list_if(COMPILER_RT_HAS_FNO_LTO_FLAG -fno-lto SANITIZER_COMMON_CFLAGS) +# By default do not instrument or use profdata for compiler-rt. +if(NOT COMPILER_RT_ENABLE_PGO) + if(LLVM_PROFDATA_FILE AND COMPILER_RT_HAS_FNO_PROFILE_INSTR_USE_FLAG) + list(APPEND SANITIZER_COMMON_CFLAGS "-fno-profile-instr-use") + endif() + if(LLVM_BUILD_INSTRUMENTED MATCHES IR AND COMPILER_RT_HAS_FNO_PROFILE_GENERATE_FLAG) + list(APPEND SANITIZER_COMMON_CFLAGS "-fno-profile-generate") + elseif(LLVM_BUILD_INSTRUMENTED AND COMPILER_RT_HAS_FNO_PROFILE_INSTR_GENERATE_FLAG) + list(APPEND SANITIZER_COMMON_CFLAGS "-fno-profile-instr-generate") + endif() +endif() + # The following is a workaround for powerpc64le. This is the only architecture # that requires -fno-function-sections to work properly. If lacking, the ASan # Linux test function-sections-are-bad.cpp fails with the following error: # 'undefined symbol: __sanitizer_unaligned_load32'. if(DEFINED TARGET_powerpc64le_CFLAGS) - append_list_if(COMPILER_RT_HAS_FNO_FUNCTION_SECTIONS_FLAG -fno-function-sections TARGET_powerpc64le_CFLAGS) + if(CMAKE_CXX_COMPILER_ID MATCHES "XL") + append("-qnofuncsect" TARGET_powerpc64le_CFLAGS) + else() + append_list_if(COMPILER_RT_HAS_FNO_FUNCTION_SECTIONS_FLAG -fno-function-sections TARGET_powerpc64le_CFLAGS) + endif() endif() # The following is a workaround for s390x. This avoids creation of "partial @@ -397,7 +435,6 @@ append_list_if(COMPILER_RT_HAS_NODEFAULTLIBS_FLAG -nodefaultlibs SANITIZER_COMMO append_list_if(COMPILER_RT_HAS_Z_TEXT -Wl,-z,text SANITIZER_COMMON_LINK_FLAGS) if (COMPILER_RT_USE_BUILTINS_LIBRARY) - list(APPEND SANITIZER_COMMON_LINK_LIBS ${COMPILER_RT_BUILTINS_LIBRARY}) string(REPLACE "-Wl,-z,defs" "" CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS}") else() if (ANDROID) @@ -498,8 +535,6 @@ else() set(COMPILER_RT_LLD_PATH ${LLVM_MAIN_SRC_DIR}/../lld) if(EXISTS ${COMPILER_RT_LLD_PATH}/ AND LLVM_TOOL_LLD_BUILD) set(COMPILER_RT_HAS_LLD TRUE) - else() - set(COMPILER_RT_HAS_LLD ${COMPILER_RT_HAS_FUSE_LD_LLD_FLAG}) endif() endif() pythonize_bool(COMPILER_RT_HAS_LLD) @@ -514,6 +549,8 @@ if(COMPILER_RT_INCLUDE_TESTS) # The user can still choose to have the check targets *use* a different lit # by specifying -DLLVM_EXTERNAL_LIT, but we generate it regardless. if (EXISTS ${LLVM_MAIN_SRC_DIR}/utils/llvm-lit) + # Needed for lit support in standalone builds. + include(AddLLVM) add_subdirectory(${LLVM_MAIN_SRC_DIR}/utils/llvm-lit ${CMAKE_CURRENT_BINARY_DIR}/llvm-lit) elseif(NOT EXISTS ${LLVM_EXTERNAL_LIT}) message(WARNING "Could not find LLVM source directory and LLVM_EXTERNAL_LIT does not" diff --git a/gnu/llvm/compiler-rt/cmake/Modules/AddCompilerRT.cmake b/gnu/llvm/compiler-rt/cmake/Modules/AddCompilerRT.cmake index 35a48c6af29..dab55707338 100644 --- a/gnu/llvm/compiler-rt/cmake/Modules/AddCompilerRT.cmake +++ b/gnu/llvm/compiler-rt/cmake/Modules/AddCompilerRT.cmake @@ -1,5 +1,6 @@ include(ExternalProject) include(CompilerRTUtils) +include(HandleCompilerRT) function(set_target_output_directories target output_dir) # For RUNTIME_OUTPUT_DIRECTORY variable, Multi-configuration generators @@ -162,6 +163,19 @@ function(add_compiler_rt_runtime name type) set(NO_LTO_FLAGS "") endif() + # By default do not instrument or use profdata for compiler-rt. + set(NO_PGO_FLAGS "") + if(NOT COMPILER_RT_ENABLE_PGO) + if(LLVM_PROFDATA_FILE AND COMPILER_RT_HAS_FNO_PROFILE_INSTR_USE_FLAG) + list(APPEND NO_PGO_FLAGS "-fno-profile-instr-use") + endif() + if(LLVM_BUILD_INSTRUMENTED MATCHES IR AND COMPILER_RT_HAS_FNO_PROFILE_GENERATE_FLAG) + list(APPEND NO_PGO_FLAGS "-fno-profile-generate") + elseif(LLVM_BUILD_INSTRUMENTED AND COMPILER_RT_HAS_FNO_PROFILE_INSTR_GENERATE_FLAG) + list(APPEND NO_PGO_FLAGS "-fno-profile-instr-generate") + endif() + endif() + list(LENGTH LIB_SOURCES LIB_SOURCES_LENGTH) if (${LIB_SOURCES_LENGTH} GREATER 0) # Add headers to LIB_SOURCES for IDEs. It doesn't make sense to @@ -190,7 +204,7 @@ function(add_compiler_rt_runtime name type) list_intersect(LIB_ARCHS_${libname} DARWIN_${os}_ARCHS LIB_ARCHS) if(LIB_ARCHS_${libname}) list(APPEND libnames ${libname}) - set(extra_cflags_${libname} ${DARWIN_${os}_CFLAGS} ${NO_LTO_FLAGS} ${LIB_CFLAGS}) + set(extra_cflags_${libname} ${DARWIN_${os}_CFLAGS} ${NO_LTO_FLAGS} ${NO_PGO_FLAGS} ${LIB_CFLAGS}) set(output_name_${libname} ${libname}${COMPILER_RT_OS_SUFFIX}) set(sources_${libname} ${LIB_SOURCES}) format_object_libs(sources_${libname} ${os} ${LIB_OBJECT_LIBS}) @@ -220,10 +234,18 @@ function(add_compiler_rt_runtime name type) set_output_name(output_name_${libname} ${name} ${arch}) endif() endif() + if(COMPILER_RT_USE_BUILTINS_LIBRARY AND NOT type STREQUAL "OBJECT" AND + NOT name STREQUAL "clang_rt.builtins") + get_compiler_rt_target(${arch} target) + find_compiler_rt_library(builtins ${target} builtins_${libname}) + if(builtins_${libname} STREQUAL "NOTFOUND") + message(FATAL_ERROR "Cannot find builtins library for the target architecture") + endif() + endif() set(sources_${libname} ${LIB_SOURCES}) format_object_libs(sources_${libname} ${arch} ${LIB_OBJECT_LIBS}) set(libnames ${libnames} ${libname}) - set(extra_cflags_${libname} ${TARGET_${arch}_CFLAGS} ${NO_LTO_FLAGS} ${LIB_CFLAGS}) + set(extra_cflags_${libname} ${TARGET_${arch}_CFLAGS} ${NO_LTO_FLAGS} ${NO_PGO_FLAGS} ${LIB_CFLAGS}) get_compiler_rt_output_dir(${arch} output_dir_${libname}) get_compiler_rt_install_dir(${arch} install_dir_${libname}) endforeach() @@ -313,6 +335,9 @@ function(add_compiler_rt_runtime name type) if(LIB_LINK_LIBS) target_link_libraries(${libname} PRIVATE ${LIB_LINK_LIBS}) endif() + if(builtins_${libname}) + target_link_libraries(${libname} PRIVATE ${builtins_${libname}}) + endif() if(${type} STREQUAL "SHARED") if(COMMAND llvm_setup_rpath) llvm_setup_rpath(${libname}) @@ -461,7 +486,13 @@ function(add_compiler_rt_test test_suite test_name arch) # trump. With MSVC we can't do that because CMake is set up to run link.exe # when linking, not the compiler. Here, we hack it to use the compiler # because we want to use -fsanitize flags. - if(NOT MSVC) + + # Only add CMAKE_EXE_LINKER_FLAGS when in a standalone bulid. + # Or else CMAKE_EXE_LINKER_FLAGS contains flags for build compiler of Clang/llvm. + # This might not be the same as what the COMPILER_RT_TEST_COMPILER supports. + # eg: the build compiler use lld linker and we build clang with default ld linker + # then to be tested clang will complain about lld options like --color-diagnostics. + if(NOT MSVC AND COMPILER_RT_STANDALONE_BUILD) set(TEST_LINK_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${TEST_LINK_FLAGS}") separate_arguments(TEST_LINK_FLAGS) endif() diff --git a/gnu/llvm/compiler-rt/cmake/Modules/CompilerRTDarwinUtils.cmake b/gnu/llvm/compiler-rt/cmake/Modules/CompilerRTDarwinUtils.cmake index 9f501a61c4b..be8d7e733c7 100644 --- a/gnu/llvm/compiler-rt/cmake/Modules/CompilerRTDarwinUtils.cmake +++ b/gnu/llvm/compiler-rt/cmake/Modules/CompilerRTDarwinUtils.cmake @@ -44,8 +44,11 @@ function(find_darwin_sdk_dir var sdk_name) endfunction() function(find_darwin_sdk_version var sdk_name) - # We deliberately don't cache the result here because - # CMake's caching causes too many problems. + if (DARWIN_${sdk_name}_OVERRIDE_SDK_VERSION) + message(WARNING "Overriding ${sdk_name} SDK version to ${DARWIN_${sdk_name}_OVERRIDE_SDK_VERSION}") + set(${var} "${DARWIN_${sdk_name}_OVERRIDE_SDK_VERSION}" PARENT_SCOPE) + return() + endif() set(result_process 1) if(NOT DARWIN_PREFER_PUBLIC_SDK) # Let's first try the internal SDK, otherwise use the public SDK. @@ -166,25 +169,46 @@ function(darwin_test_archs os valid_archs) CACHE STRING "List of valid architectures for platform ${os}." FORCE) endfunction() -# This function checks the host cpusubtype to see if it is post-haswell. Haswell -# and later machines can run x86_64h binaries. Haswell is cpusubtype 8. +# This function checks the host cputype/cpusubtype to filter supported +# architecture for the host OS. This is used to determine which tests are +# available for the host. function(darwin_filter_host_archs input output) list_intersect(tmp_var DARWIN_osx_ARCHS ${input}) execute_process( - COMMAND sysctl hw.cpusubtype - OUTPUT_VARIABLE SUBTYPE) - - string(REGEX MATCH "hw.cpusubtype: ([0-9]*)" - SUBTYPE_MATCHED "${SUBTYPE}") - set(HASWELL_SUPPORTED Off) - if(SUBTYPE_MATCHED) - if(${CMAKE_MATCH_1} GREATER 7) - set(HASWELL_SUPPORTED On) + COMMAND sysctl hw.cputype + OUTPUT_VARIABLE CPUTYPE) + string(REGEX MATCH "hw.cputype: ([0-9]*)" + CPUTYPE_MATCHED "${CPUTYPE}") + set(ARM_HOST Off) + if(CPUTYPE_MATCHED) + # ARM cputype is (0x01000000 | 12) and X86(_64) is always 7. + if(${CMAKE_MATCH_1} GREATER 11) + set(ARM_HOST On) endif() endif() - if(NOT HASWELL_SUPPORTED) - list(REMOVE_ITEM tmp_var x86_64h) + + if(ARM_HOST) + list(REMOVE_ITEM tmp_var i386) + else() + list(REMOVE_ITEM tmp_var arm64) + list(REMOVE_ITEM tmp_var arm64e) + execute_process( + COMMAND sysctl hw.cpusubtype + OUTPUT_VARIABLE SUBTYPE) + string(REGEX MATCH "hw.cpusubtype: ([0-9]*)" + SUBTYPE_MATCHED "${SUBTYPE}") + + set(HASWELL_SUPPORTED Off) + if(SUBTYPE_MATCHED) + if(${CMAKE_MATCH_1} GREATER 7) + set(HASWELL_SUPPORTED On) + endif() + endif() + if(NOT HASWELL_SUPPORTED) + list(REMOVE_ITEM tmp_var x86_64h) + endif() endif() + set(${output} ${tmp_var} PARENT_SCOPE) endfunction() @@ -320,6 +344,38 @@ function(darwin_lipo_libs name) endif() endfunction() +# Filter the list of builtin sources for Darwin, then delegate to the generic +# filtering. +# +# `exclude_or_include` must be one of: +# - EXCLUDE: remove every item whose name (w/o extension) matches a name in +# `excluded_list`. +# - INCLUDE: keep only items whose name (w/o extension) matches something +# in `excluded_list`. +function(darwin_filter_builtin_sources output_var name exclude_or_include excluded_list) + if(exclude_or_include STREQUAL "EXCLUDE") + set(filter_action GREATER) + set(filter_value -1) + elseif(exclude_or_include STREQUAL "INCLUDE") + set(filter_action LESS) + set(filter_value 0) + else() + message(FATAL_ERROR "darwin_filter_builtin_sources called without EXCLUDE|INCLUDE") + endif() + + set(intermediate ${ARGN}) + foreach(_file ${intermediate}) + get_filename_component(_name_we ${_file} NAME_WE) + list(FIND ${excluded_list} ${_name_we} _found) + if(_found ${filter_action} ${filter_value}) + list(REMOVE_ITEM intermediate ${_file}) + endif() + endforeach() + + filter_builtin_sources(intermediate ${name}) + set(${output_var} ${intermediate} PARENT_SCOPE) +endfunction() + # Generates builtin libraries for all operating systems specified in ARGN. Each # OS library is constructed by lipo-ing together single-architecture libraries. macro(darwin_add_builtin_libraries) @@ -342,7 +398,8 @@ macro(darwin_add_builtin_libraries) ARCH ${arch} MIN_VERSION ${DARWIN_${os}_BUILTIN_MIN_VER}) - filter_builtin_sources(filtered_sources + darwin_filter_builtin_sources(filtered_sources + ${os}_${arch} EXCLUDE ${arch}_${os}_EXCLUDED_BUILTINS ${${arch}_SOURCES}) @@ -364,7 +421,8 @@ macro(darwin_add_builtin_libraries) OS ${os} ARCH ${arch}) - filter_builtin_sources(filtered_sources + darwin_filter_builtin_sources(filtered_sources + cc_kext_${os}_${arch} EXCLUDE ${arch}_${os}_EXCLUDED_BUILTINS ${${arch}_SOURCES}) @@ -460,7 +518,8 @@ macro(darwin_add_embedded_builtin_libraries) set(x86_64_FUNCTIONS ${common_FUNCTIONS}) foreach(arch ${DARWIN_macho_embedded_ARCHS}) - filter_builtin_sources(${arch}_filtered_sources + darwin_filter_builtin_sources(${arch}_filtered_sources + macho_embedded_${arch} INCLUDE ${arch}_FUNCTIONS ${${arch}_SOURCES}) if(NOT ${arch}_filtered_sources) diff --git a/gnu/llvm/compiler-rt/cmake/Modules/CompilerRTUtils.cmake b/gnu/llvm/compiler-rt/cmake/Modules/CompilerRTUtils.cmake index 6e672b1e181..99b9f0e4af4 100644 --- a/gnu/llvm/compiler-rt/cmake/Modules/CompilerRTUtils.cmake +++ b/gnu/llvm/compiler-rt/cmake/Modules/CompilerRTUtils.cmake @@ -166,6 +166,7 @@ macro(detect_target_arch) check_symbol_exists(__sparcv9 "" __SPARCV9) check_symbol_exists(__wasm32__ "" __WEBASSEMBLY32) check_symbol_exists(__wasm64__ "" __WEBASSEMBLY64) + check_symbol_exists(__ve__ "" __VE) if(__ARM) add_default_target_arch(arm) elseif(__AARCH64) @@ -200,6 +201,8 @@ macro(detect_target_arch) add_default_target_arch(wasm32) elseif(__WEBASSEMBLY64) add_default_target_arch(wasm64) + elseif(__VE) + add_default_target_arch(ve) endif() endmacro() @@ -248,6 +251,8 @@ macro(load_llvm_config) string(REGEX REPLACE "[ \t]*[\r\n]+[ \t]*" ";" CONFIG_OUTPUT ${CONFIG_OUTPUT}) list(GET CONFIG_OUTPUT 0 LDFLAGS) list(GET CONFIG_OUTPUT 1 LIBLIST) + file(TO_CMAKE_PATH "${LDFLAGS}" LDFLAGS) + file(TO_CMAKE_PATH "${LIBLIST}" LIBLIST) set(LLVM_XRAY_LDFLAGS ${LDFLAGS} CACHE STRING "Linker flags for LLVMXRay library") set(LLVM_XRAY_LIBLIST ${LIBLIST} CACHE STRING "Library list for LLVMXRay") set(COMPILER_RT_HAS_LLVMXRAY TRUE) @@ -261,13 +266,15 @@ macro(load_llvm_config) ERROR_QUIET) if (HAD_ERROR) message(WARNING "llvm-config finding testingsupport failed with status ${HAD_ERROR}") - else() + elseif(COMPILER_RT_INCLUDE_TESTS) string(REGEX REPLACE "[ \t]*[\r\n]+[ \t]*" ";" CONFIG_OUTPUT ${CONFIG_OUTPUT}) list(GET CONFIG_OUTPUT 0 LDFLAGS) list(GET CONFIG_OUTPUT 1 LIBLIST) if (LIBLIST STREQUAL "") message(WARNING "testingsupport library not installed, some tests will be skipped") else() + file(TO_CMAKE_PATH "${LDFLAGS}" LDFLAGS) + file(TO_CMAKE_PATH "${LIBLIST}" LIBLIST) set(LLVM_TESTINGSUPPORT_LDFLAGS ${LDFLAGS} CACHE STRING "Linker flags for LLVMTestingSupport library") set(LLVM_TESTINGSUPPORT_LIBLIST ${LIBLIST} CACHE STRING "Library list for LLVMTestingSupport") set(COMPILER_RT_HAS_LLVMTESTINGSUPPORT TRUE) @@ -325,33 +332,30 @@ macro(construct_compiler_rt_default_triple) endif() endmacro() -# Filter out generic versions of routines that are re-implemented in -# architecture specific manner. This prevents multiple definitions of the -# same symbols, making the symbol selection non-deterministic. -function(filter_builtin_sources output_var exclude_or_include excluded_list) - if(exclude_or_include STREQUAL "EXCLUDE") - set(filter_action GREATER) - set(filter_value -1) - elseif(exclude_or_include STREQUAL "INCLUDE") - set(filter_action LESS) - set(filter_value 0) - else() - message(FATAL_ERROR "filter_builtin_sources called without EXCLUDE|INCLUDE") - endif() - - set(intermediate ${ARGN}) - foreach (_file ${intermediate}) - get_filename_component(_name_we ${_file} NAME_WE) - list(FIND ${excluded_list} ${_name_we} _found) - if(_found ${filter_action} ${filter_value}) - list(REMOVE_ITEM intermediate ${_file}) - elseif(${_file} MATCHES ".*/.*\\.S" OR ${_file} MATCHES ".*/.*\\.c") +# Filter out generic versions of routines that are re-implemented in an +# architecture specific manner. This prevents multiple definitions of the same +# symbols, making the symbol selection non-deterministic. +# +# We follow the convention that a source file that exists in a sub-directory +# (e.g. `ppc/divtc3.c`) is architecture-specific and that if a generic +# implementation exists it will be a top-level source file with the same name +# modulo the file extension (e.g. `divtc3.c`). +function(filter_builtin_sources inout_var name) + set(intermediate ${${inout_var}}) + foreach(_file ${intermediate}) + get_filename_component(_file_dir ${_file} DIRECTORY) + if (NOT "${_file_dir}" STREQUAL "") + # Architecture specific file. If a generic version exists, print a notice + # and ensure that it is removed from the file list. get_filename_component(_name ${_file} NAME) - string(REPLACE ".S" ".c" _cname "${_name}") - list(REMOVE_ITEM intermediate ${_cname}) - endif () - endforeach () - set(${output_var} ${intermediate} PARENT_SCOPE) + string(REGEX REPLACE "\\.S$" ".c" _cname "${_name}") + if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/${_cname}") + message(STATUS "For ${name} builtins preferring ${_file} to ${_cname}") + list(REMOVE_ITEM intermediate ${_cname}) + endif() + endif() + endforeach() + set(${inout_var} ${intermediate} PARENT_SCOPE) endfunction() function(get_compiler_rt_target arch variable) @@ -361,7 +365,7 @@ function(get_compiler_rt_target arch variable) # Use exact spelling when building only for the target specified to CMake. set(target "${COMPILER_RT_DEFAULT_TARGET_TRIPLE}") elseif(ANDROID AND ${arch} STREQUAL "i386") - set(target "i686${COMPILER_RT_OS_SUFFIX}${triple_suffix}") + set(target "i686${triple_suffix}") else() set(target "${arch}${triple_suffix}") endif() diff --git a/gnu/llvm/compiler-rt/cmake/Modules/HandleCompilerRT.cmake b/gnu/llvm/compiler-rt/cmake/Modules/HandleCompilerRT.cmake index 61b7792789e..ac9e0871489 100644 --- a/gnu/llvm/compiler-rt/cmake/Modules/HandleCompilerRT.cmake +++ b/gnu/llvm/compiler-rt/cmake/Modules/HandleCompilerRT.cmake @@ -1,24 +1,65 @@ -function(find_compiler_rt_library name variable) - set(CLANG_COMMAND ${CMAKE_CXX_COMPILER} ${SANITIZER_COMMON_CFLAGS} - "--rtlib=compiler-rt" "--print-libgcc-file-name") - if (CMAKE_CXX_COMPILER_ID MATCHES Clang AND CMAKE_CXX_COMPILER_TARGET) - list(APPEND CLANG_COMMAND "--target=${CMAKE_CXX_COMPILER_TARGET}") +# Check if compile-rt library file path exists. +# If found, cache the path in: +# COMPILER_RT_LIBRARY-<name>-<target> +# If err_flag is true OR path not found, emit a message and set: +# COMPILER_RT_LIBRARY-<name>-<target> to NOTFOUND +function(cache_compiler_rt_library err_flag name target library_file) + if(err_flag OR NOT EXISTS "${library_file}") + message(STATUS "Failed to find compiler-rt ${name} library for ${target}") + set(COMPILER_RT_LIBRARY-${name}-${target} "NOTFOUND" CACHE INTERNAL + "compiler-rt ${name} library for ${target}") + else() + message(STATUS "Found compiler-rt ${name} library: ${library_file}") + set(COMPILER_RT_LIBRARY-${name}-${target} "${library_file}" CACHE INTERNAL + "compiler-rt ${name} library for ${target}") + endif() +endfunction() + +# Find the path to compiler-rt library `name` (e.g. "builtins") for +# the specified `target` (e.g. "x86_64-linux") and return it in `variable`. +# This calls cache_compiler_rt_library that caches the path to speed up +# repeated invocations with the same `name` and `target`. +function(find_compiler_rt_library name target variable) + if(NOT CMAKE_CXX_COMPILER_ID MATCHES Clang) + set(${variable} "NOTFOUND" PARENT_SCOPE) + return() + endif() + if (NOT target AND CMAKE_CXX_COMPILER_TARGET) + set(target "${CMAKE_CXX_COMPILER_TARGET}") endif() - get_property(SANITIZER_CXX_FLAGS CACHE CMAKE_CXX_FLAGS PROPERTY VALUE) - string(REPLACE " " ";" SANITIZER_CXX_FLAGS "${SANITIZER_CXX_FLAGS}") - list(APPEND CLANG_COMMAND ${SANITIZER_CXX_FLAGS}) - execute_process( + if(NOT DEFINED COMPILER_RT_LIBRARY-builtins-${target}) + # If the cache variable is not defined, invoke clang and then + # set it with cache_compiler_rt_library. + set(CLANG_COMMAND ${CMAKE_CXX_COMPILER} ${SANITIZER_COMMON_FLAGS} + "--rtlib=compiler-rt" "-print-libgcc-file-name") + if(target) + list(APPEND CLANG_COMMAND "--target=${target}") + endif() + get_property(SANITIZER_CXX_FLAGS CACHE CMAKE_CXX_FLAGS PROPERTY VALUE) + string(REPLACE " " ";" SANITIZER_CXX_FLAGS "${SANITIZER_CXX_FLAGS}") + list(APPEND CLANG_COMMAND ${SANITIZER_CXX_FLAGS}) + execute_process( COMMAND ${CLANG_COMMAND} RESULT_VARIABLE HAD_ERROR OUTPUT_VARIABLE LIBRARY_FILE - ) - string(STRIP "${LIBRARY_FILE}" LIBRARY_FILE) - file(TO_CMAKE_PATH "${LIBRARY_FILE}" LIBRARY_FILE) - string(REPLACE "builtins" "${name}" LIBRARY_FILE "${LIBRARY_FILE}") - if (NOT HAD_ERROR AND EXISTS "${LIBRARY_FILE}") - message(STATUS "Found compiler-rt ${name} library: ${LIBRARY_FILE}") - set(${variable} "${LIBRARY_FILE}" PARENT_SCOPE) - else() - message(STATUS "Failed to find compiler-rt ${name} library") + ) + string(STRIP "${LIBRARY_FILE}" LIBRARY_FILE) + file(TO_CMAKE_PATH "${LIBRARY_FILE}" LIBRARY_FILE) + cache_compiler_rt_library(${HAD_ERROR} + builtins "${target}" "${LIBRARY_FILE}") + endif() + if(NOT COMPILER_RT_LIBRARY-builtins-${target}) + set(${variable} "NOTFOUND" PARENT_SCOPE) + return() + endif() + if(NOT DEFINED COMPILER_RT_LIBRARY-${name}-${target}) + # clang gives only the builtins library path. Other library paths are + # obtained by substituting "builtins" with ${name} in the builtins + # path and then checking if the resultant path exists. The result of + # this check is also cached by cache_compiler_rt_library. + set(LIBRARY_FILE "${COMPILER_RT_LIBRARY-builtins-${target}}") + string(REPLACE "builtins" "${name}" LIBRARY_FILE "${LIBRARY_FILE}") + cache_compiler_rt_library(FALSE "${name}" "${target}" "${LIBRARY_FILE}") endif() + set(${variable} "${COMPILER_RT_LIBRARY-${name}-${target}}" PARENT_SCOPE) endfunction() diff --git a/gnu/llvm/compiler-rt/cmake/Modules/SanitizerUtils.cmake b/gnu/llvm/compiler-rt/cmake/Modules/SanitizerUtils.cmake index 699b03ae6a1..6c8651df3b3 100644 --- a/gnu/llvm/compiler-rt/cmake/Modules/SanitizerUtils.cmake +++ b/gnu/llvm/compiler-rt/cmake/Modules/SanitizerUtils.cmake @@ -34,7 +34,7 @@ macro(add_sanitizer_rt_symbols name) list(APPEND extra_args "--extra" ${arg}) endforeach() add_custom_command(OUTPUT ${stamp} - COMMAND ${PYTHON_EXECUTABLE} + COMMAND ${Python3_EXECUTABLE} ${SANITIZER_GEN_DYNAMIC_LIST} ${extra_args} $<TARGET_FILE:${target_name}> --nm-executable "${SANITIZER_NM}" -o $<TARGET_FILE:${target_name}>.syms COMMAND ${CMAKE_COMMAND} -E touch ${stamp} @@ -84,7 +84,7 @@ macro(add_sanitizer_rt_version_list name) list(APPEND args "$<TARGET_FILE:${arg}>") endforeach() add_custom_command(OUTPUT ${vers} - COMMAND ${PYTHON_EXECUTABLE} + COMMAND ${Python3_EXECUTABLE} ${SANITIZER_GEN_DYNAMIC_LIST} --version-list ${args} --nm-executable "${SANITIZER_NM}" -o ${vers} DEPENDS ${SANITIZER_GEN_DYNAMIC_LIST} ${ARG_EXTRA} ${ARG_LIBS} @@ -100,7 +100,7 @@ endmacro() if(CMAKE_HOST_UNIX AND NOT OS_NAME MATCHES "OpenBSD") add_custom_target(SanitizerLintCheck COMMAND env LLVM_CHECKOUT=${LLVM_MAIN_SRC_DIR} SILENT=1 TMPDIR= - PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE} + PYTHON_EXECUTABLE=${Python3_EXECUTABLE} COMPILER_RT=${COMPILER_RT_SOURCE_DIR} ${SANITIZER_LINT_SCRIPT} DEPENDS ${SANITIZER_LINT_SCRIPT} diff --git a/gnu/llvm/compiler-rt/cmake/base-config-ix.cmake b/gnu/llvm/compiler-rt/cmake/base-config-ix.cmake index b4b87aa5307..964dd598f10 100644 --- a/gnu/llvm/compiler-rt/cmake/base-config-ix.cmake +++ b/gnu/llvm/compiler-rt/cmake/base-config-ix.cmake @@ -5,6 +5,7 @@ include(CheckIncludeFile) include(CheckCXXSourceCompiles) +include(TestBigEndian) check_include_file(unwind.h HAVE_UNWIND_H) @@ -191,7 +192,7 @@ macro(test_targets) # Strip out -nodefaultlibs when calling TEST_BIG_ENDIAN. Configuration # will fail with this option when building with a sanitizer. cmake_push_check_state() - string(REPLACE "-nodefaultlibs" "" CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS}) + string(REPLACE "-nodefaultlibs" "" CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") TEST_BIG_ENDIAN(HOST_IS_BIG_ENDIAN) cmake_pop_check_state() @@ -236,6 +237,8 @@ macro(test_targets) test_target_arch(wasm32 "" "--target=wasm32-unknown-unknown") elseif("${COMPILER_RT_DEFAULT_TARGET_ARCH}" MATCHES "wasm64") test_target_arch(wasm64 "" "--target=wasm64-unknown-unknown") + elseif("${COMPILER_RT_DEFAULT_TARGET_ARCH}" MATCHES "ve") + test_target_arch(ve "__ve__" "--target=ve-unknown-none") endif() set(COMPILER_RT_OS_SUFFIX "") endif() diff --git a/gnu/llvm/compiler-rt/cmake/builtin-config-ix.cmake b/gnu/llvm/compiler-rt/cmake/builtin-config-ix.cmake index cd8e6fa9c9a..8de901513be 100644 --- a/gnu/llvm/compiler-rt/cmake/builtin-config-ix.cmake +++ b/gnu/llvm/compiler-rt/cmake/builtin-config-ix.cmake @@ -37,15 +37,19 @@ set(SPARC sparc) set(SPARCV9 sparcv9) set(WASM32 wasm32) set(WASM64 wasm64) +set(VE ve) if(APPLE) - set(ARM64 arm64) + set(ARM64 arm64 arm64e) set(ARM32 armv7 armv7k armv7s) set(X86_64 x86_64 x86_64h) endif() -set(ALL_BUILTIN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} - ${HEXAGON} ${MIPS32} ${MIPS64} ${PPC64} ${RISCV32} ${RISCV64} ${SPARC} ${SPARCV9} ${WASM32} ${WASM64}) +set(ALL_BUILTIN_SUPPORTED_ARCH + ${X86} ${X86_64} ${ARM32} ${ARM64} + ${HEXAGON} ${MIPS32} ${MIPS64} ${PPC64} + ${RISCV32} ${RISCV64} ${SPARC} ${SPARCV9} + ${WASM32} ${WASM64} ${VE}) include(CompilerRTUtils) include(CompilerRTDarwinUtils) @@ -60,11 +64,34 @@ if(APPLE) find_darwin_sdk_dir(DARWIN_tvossim_SYSROOT appletvsimulator) find_darwin_sdk_dir(DARWIN_tvos_SYSROOT appletvos) + # Get supported architecture from SDKSettings. + function(sdk_has_arch_support sdk_path os arch has_support) + execute_process(COMMAND + /usr/libexec/PlistBuddy -c "Print :SupportedTargets:${os}:Archs" ${sdk_path}/SDKSettings.plist + OUTPUT_VARIABLE SDK_SUPPORTED_ARCHS + RESULT_VARIABLE PLIST_ERROR) + if (PLIST_ERROR EQUAL 0 AND + SDK_SUPPORTED_ARCHS MATCHES " ${arch}\n") + message(STATUS "Found ${arch} support in ${sdk_path}/SDKSettings.plist") + set("${has_support}" On PARENT_SCOPE) + else() + message(STATUS "No ${arch} support in ${sdk_path}/SDKSettings.plist") + set("${has_support}" Off PARENT_SCOPE) + endif() + endfunction() + set(DARWIN_EMBEDDED_PLATFORMS) set(DARWIN_osx_BUILTIN_MIN_VER 10.5) set(DARWIN_osx_BUILTIN_MIN_VER_FLAG -mmacosx-version-min=${DARWIN_osx_BUILTIN_MIN_VER}) set(DARWIN_osx_BUILTIN_ALL_POSSIBLE_ARCHS ${X86} ${X86_64}) + # Add support for arm64 macOS if available in SDK. + foreach(arch ${ARM64}) + sdk_has_arch_support(${DARWIN_osx_SYSROOT} macosx ${arch} MACOS_ARM_SUPPORT) + if (MACOS_ARM_SUPPORT) + list(APPEND DARWIN_osx_BUILTIN_ALL_POSSIBLE_ARCHS ${arch}) + endif() + endforeach(arch) if(COMPILER_RT_ENABLE_IOS) list(APPEND DARWIN_EMBEDDED_PLATFORMS ios) diff --git a/gnu/llvm/compiler-rt/cmake/config-ix.cmake b/gnu/llvm/compiler-rt/cmake/config-ix.cmake index 3aad08e8896..2edc1dabd90 100644 --- a/gnu/llvm/compiler-rt/cmake/config-ix.cmake +++ b/gnu/llvm/compiler-rt/cmake/config-ix.cmake @@ -1,6 +1,7 @@ include(CMakePushCheckState) include(CheckCCompilerFlag) include(CheckCXXCompilerFlag) +include(CheckIncludeFiles) include(CheckLibraryExists) include(CheckSymbolExists) include(TestBigEndian) @@ -15,7 +16,7 @@ endfunction() check_library_exists(c fopen "" COMPILER_RT_HAS_LIBC) if (COMPILER_RT_USE_BUILTINS_LIBRARY) include(HandleCompilerRT) - find_compiler_rt_library(builtins COMPILER_RT_BUILTINS_LIBRARY) + find_compiler_rt_library(builtins "" COMPILER_RT_BUILTINS_LIBRARY) else() if (ANDROID) check_library_exists(gcc __gcc_personality_v0 "" COMPILER_RT_HAS_GCC_LIB) @@ -70,6 +71,9 @@ check_cxx_compiler_flag("-Werror -fno-function-sections" COMPILER_RT_HAS_FNO_FUN check_cxx_compiler_flag(-std=c++14 COMPILER_RT_HAS_STD_CXX14_FLAG) check_cxx_compiler_flag(-ftls-model=initial-exec COMPILER_RT_HAS_FTLS_MODEL_INITIAL_EXEC) check_cxx_compiler_flag(-fno-lto COMPILER_RT_HAS_FNO_LTO_FLAG) +check_cxx_compiler_flag(-fno-profile-generate COMPILER_RT_HAS_FNO_PROFILE_GENERATE_FLAG) +check_cxx_compiler_flag(-fno-profile-instr-generate COMPILER_RT_HAS_FNO_PROFILE_INSTR_GENERATE_FLAG) +check_cxx_compiler_flag(-fno-profile-instr-use COMPILER_RT_HAS_FNO_PROFILE_INSTR_USE_FLAG) check_cxx_compiler_flag("-Werror -msse3" COMPILER_RT_HAS_MSSE3_FLAG) check_cxx_compiler_flag("-Werror -msse4.2" COMPILER_RT_HAS_MSSE4_2_FLAG) check_cxx_compiler_flag(--sysroot=. COMPILER_RT_HAS_SYSROOT_FLAG) @@ -116,6 +120,9 @@ check_cxx_compiler_flag(/wd4800 COMPILER_RT_HAS_WD4800_FLAG) # Symbols. check_symbol_exists(__func__ "" COMPILER_RT_HAS_FUNC_SYMBOL) +# Includes. +check_include_files("sys/auxv.h" COMPILER_RT_HAS_AUXV) + # Libraries. check_library_exists(dl dlopen "" COMPILER_RT_HAS_LIBDL) check_library_exists(rt shm_open "" COMPILER_RT_HAS_LIBRT) @@ -224,6 +231,18 @@ function(get_test_cflags_for_apple_platform platform arch cflags_out) set(${cflags_out} "${test_cflags_str}" PARENT_SCOPE) endfunction() +function(get_capitalized_apple_platform platform platform_capitalized) + # TODO(dliew): Remove uses of this function. It exists to preserve needlessly complex + # directory naming conventions used by the Sanitizer lit test suites. + is_valid_apple_platform("${platform}" is_valid_platform) + if (NOT is_valid_platform) + message(FATAL_ERROR "\"${platform}\" is not a valid apple platform") + endif() + string(TOUPPER "${platform}" platform_upper) + string(REGEX REPLACE "OSSIM$" "OSSim" platform_upper_capitalized "${platform_upper}") + set(${platform_capitalized} "${platform_upper_capitalized}" PARENT_SCOPE) +endfunction() + function(is_valid_apple_platform platform is_valid_out) set(is_valid FALSE) if ("${platform}" STREQUAL "") @@ -284,9 +303,9 @@ set(ALL_GWP_ASAN_SUPPORTED_ARCH ${X86} ${X86_64}) if(APPLE) set(ALL_LSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64} ${ARM64}) else() - set(ALL_LSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64} ${ARM64} ${ARM32} ${PPC64}) + set(ALL_LSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64} ${ARM64} ${ARM32} ${PPC64} ${S390X}) endif() -set(ALL_MSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64}) +set(ALL_MSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64} ${S390X}) set(ALL_HWASAN_SUPPORTED_ARCH ${X86_64} ${ARM64}) set(ALL_PROFILE_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${PPC64} ${MIPS32} ${MIPS64} ${S390X} ${SPARC} ${SPARCV9}) @@ -326,33 +345,47 @@ if(APPLE) if(COMPILER_RT_ENABLE_IOS) list(APPEND DARWIN_EMBEDDED_PLATFORMS ios) + set(DARWIN_ios_MIN_VER 9.0) set(DARWIN_ios_MIN_VER_FLAG -miphoneos-version-min) set(DARWIN_ios_SANITIZER_MIN_VER_FLAG - ${DARWIN_ios_MIN_VER_FLAG}=9.0) + ${DARWIN_ios_MIN_VER_FLAG}=${DARWIN_ios_MIN_VER}) + set(DARWIN_iossim_MIN_VER_FLAG -mios-simulator-version-min) + set(DARWIN_iossim_SANITIZER_MIN_VER_FLAG + ${DARWIN_iossim_MIN_VER_FLAG}=${DARWIN_ios_MIN_VER}) endif() if(COMPILER_RT_ENABLE_WATCHOS) list(APPEND DARWIN_EMBEDDED_PLATFORMS watchos) + set(DARWIN_watchos_MIN_VER 2.0) set(DARWIN_watchos_MIN_VER_FLAG -mwatchos-version-min) set(DARWIN_watchos_SANITIZER_MIN_VER_FLAG - ${DARWIN_watchos_MIN_VER_FLAG}=2.0) + ${DARWIN_watchos_MIN_VER_FLAG}=${DARWIN_watchos_MIN_VER}) + set(DARWIN_watchossim_MIN_VER_FLAG -mwatchos-simulator-version-min) + set(DARWIN_watchossim_SANITIZER_MIN_VER_FLAG + ${DARWIN_watchossim_MIN_VER_FLAG}=${DARWIN_watchos_MIN_VER}) endif() if(COMPILER_RT_ENABLE_TVOS) list(APPEND DARWIN_EMBEDDED_PLATFORMS tvos) + set(DARWIN_tvos_MIN_VER 9.0) set(DARWIN_tvos_MIN_VER_FLAG -mtvos-version-min) set(DARWIN_tvos_SANITIZER_MIN_VER_FLAG - ${DARWIN_tvos_MIN_VER_FLAG}=9.0) + ${DARWIN_tvos_MIN_VER_FLAG}=${DARWIN_tvos_MIN_VER}) + set(DARWIN_tvossim_MIN_VER_FLAG -mtvos-simulator-version-min) + set(DARWIN_tvossim_SANITIZER_MIN_VER_FLAG + ${DARWIN_tvossim_MIN_VER_FLAG}=${DARWIN_tvos_MIN_VER}) endif() set(SANITIZER_COMMON_SUPPORTED_OS osx) set(PROFILE_SUPPORTED_OS osx) set(TSAN_SUPPORTED_OS osx) set(XRAY_SUPPORTED_OS osx) + set(FUZZER_SUPPORTED_OS osx) # Note: In order to target x86_64h on OS X the minimum deployment target must # be 10.8 or higher. set(DEFAULT_SANITIZER_MIN_OSX_VERSION 10.10) + set(DARWIN_osx_MIN_VER_FLAG "-mmacosx-version-min") if(NOT SANITIZER_MIN_OSX_VERSION) - string(REGEX MATCH "-mmacosx-version-min=([.0-9]+)" + string(REGEX MATCH "${DARWIN_osx_MIN_VER_FLAG}=([.0-9]+)" MACOSX_VERSION_MIN_FLAG "${CMAKE_CXX_FLAGS}") if(MACOSX_VERSION_MIN_FLAG) set(SANITIZER_MIN_OSX_VERSION "${CMAKE_MATCH_1}") @@ -386,10 +419,10 @@ if(APPLE) set(DARWIN_osx_CFLAGS ${DARWIN_COMMON_CFLAGS} - -mmacosx-version-min=${SANITIZER_MIN_OSX_VERSION}) + ${DARWIN_osx_MIN_VER_FLAG}=${SANITIZER_MIN_OSX_VERSION}) set(DARWIN_osx_LINK_FLAGS ${DARWIN_COMMON_LINK_FLAGS} - -mmacosx-version-min=${SANITIZER_MIN_OSX_VERSION}) + ${DARWIN_osx_MIN_VER_FLAG}=${SANITIZER_MIN_OSX_VERSION}) if(DARWIN_osx_SYSROOT) list(APPEND DARWIN_osx_CFLAGS -isysroot ${DARWIN_osx_SYSROOT}) @@ -414,11 +447,11 @@ if(APPLE) if(DARWIN_${platform}sim_SYSROOT) set(DARWIN_${platform}sim_CFLAGS ${DARWIN_COMMON_CFLAGS} - ${DARWIN_${platform}_SANITIZER_MIN_VER_FLAG} + ${DARWIN_${platform}sim_SANITIZER_MIN_VER_FLAG} -isysroot ${DARWIN_${platform}sim_SYSROOT}) set(DARWIN_${platform}sim_LINK_FLAGS ${DARWIN_COMMON_LINK_FLAGS} - ${DARWIN_${platform}_SANITIZER_MIN_VER_FLAG} + ${DARWIN_${platform}sim_SANITIZER_MIN_VER_FLAG} -isysroot ${DARWIN_${platform}sim_SYSROOT}) set(DARWIN_${platform}sim_SKIP_CC_KEXT On) @@ -430,6 +463,7 @@ if(APPLE) list(APPEND SANITIZER_COMMON_SUPPORTED_OS ${platform}sim) list(APPEND PROFILE_SUPPORTED_OS ${platform}sim) list(APPEND TSAN_SUPPORTED_OS ${platform}sim) + list(APPEND FUZZER_SUPPORTED_OS ${platform}sim) endif() foreach(arch ${DARWIN_${platform}sim_ARCHS}) list(APPEND COMPILER_RT_SUPPORTED_ARCH ${arch}) @@ -459,6 +493,7 @@ if(APPLE) if(DARWIN_${platform}_TSAN_ARCHS) list(APPEND TSAN_SUPPORTED_OS ${platform}) endif() + list(APPEND FUZZER_SUPPORTED_OS ${platform}) endif() foreach(arch ${DARWIN_${platform}_ARCHS}) list(APPEND COMPILER_RT_SUPPORTED_ARCH ${arch}) @@ -468,6 +503,10 @@ if(APPLE) endforeach() endif() + # Explictly disable unsupported Sanitizer configurations. + list(REMOVE_ITEM FUZZER_SUPPORTED_OS "watchos") + list(REMOVE_ITEM FUZZER_SUPPORTED_OS "watchossim") + # for list_intersect include(CompilerRTUtils) @@ -611,7 +650,7 @@ endif() # TODO: Add builtins support. -if (CRT_SUPPORTED_ARCH AND OS_NAME MATCHES "Linux") +if (CRT_SUPPORTED_ARCH AND OS_NAME MATCHES "Linux" AND NOT LLVM_USE_SANITIZER) set(COMPILER_RT_HAS_CRT TRUE) else() set(COMPILER_RT_HAS_CRT FALSE) @@ -625,7 +664,7 @@ else() endif() if (COMPILER_RT_HAS_SANITIZER_COMMON AND LSAN_SUPPORTED_ARCH AND - OS_NAME MATCHES "Darwin|Linux|NetBSD") + OS_NAME MATCHES "Darwin|Linux|NetBSD|Fuchsia") set(COMPILER_RT_HAS_LSAN TRUE) else() set(COMPILER_RT_HAS_LSAN FALSE) @@ -687,7 +726,8 @@ else() endif() #TODO(kostyak): add back Android & Fuchsia when the code settles a bit. -if (SCUDO_STANDALONE_SUPPORTED_ARCH AND OS_NAME MATCHES "Linux") +if (SCUDO_STANDALONE_SUPPORTED_ARCH AND OS_NAME MATCHES "Linux" AND + COMPILER_RT_HAS_AUXV) set(COMPILER_RT_HAS_SCUDO_STANDALONE TRUE) else() set(COMPILER_RT_HAS_SCUDO_STANDALONE FALSE) diff --git a/gnu/llvm/compiler-rt/include/fuzzer/FuzzedDataProvider.h b/gnu/llvm/compiler-rt/include/fuzzer/FuzzedDataProvider.h index 3e069eba69b..83bcd0134a7 100644 --- a/gnu/llvm/compiler-rt/include/fuzzer/FuzzedDataProvider.h +++ b/gnu/llvm/compiler-rt/include/fuzzer/FuzzedDataProvider.h @@ -34,272 +34,354 @@ class FuzzedDataProvider { : data_ptr_(data), remaining_bytes_(size) {} ~FuzzedDataProvider() = default; - // Returns a std::vector containing |num_bytes| of input data. If fewer than - // |num_bytes| of data remain, returns a shorter std::vector containing all - // of the data that's left. Can be used with any byte sized type, such as - // char, unsigned char, uint8_t, etc. - template <typename T> std::vector<T> ConsumeBytes(size_t num_bytes) { - num_bytes = std::min(num_bytes, remaining_bytes_); - return ConsumeBytes<T>(num_bytes, num_bytes); - } - - // Similar to |ConsumeBytes|, but also appends the terminator value at the end - // of the resulting vector. Useful, when a mutable null-terminated C-string is - // needed, for example. But that is a rare case. Better avoid it, if possible, - // and prefer using |ConsumeBytes| or |ConsumeBytesAsString| methods. + // See the implementation below (after the class definition) for more verbose + // comments for each of the methods. + + // Methods returning std::vector of bytes. These are the most popular choice + // when splitting fuzzing input into pieces, as every piece is put into a + // separate buffer (i.e. ASan would catch any under-/overflow) and the memory + // will be released automatically. + template <typename T> std::vector<T> ConsumeBytes(size_t num_bytes); template <typename T> - std::vector<T> ConsumeBytesWithTerminator(size_t num_bytes, - T terminator = 0) { - num_bytes = std::min(num_bytes, remaining_bytes_); - std::vector<T> result = ConsumeBytes<T>(num_bytes + 1, num_bytes); - result.back() = terminator; - return result; - } - - // Returns a std::string containing |num_bytes| of input data. Using this and - // |.c_str()| on the resulting string is the best way to get an immutable - // null-terminated C string. If fewer than |num_bytes| of data remain, returns - // a shorter std::string containing all of the data that's left. - std::string ConsumeBytesAsString(size_t num_bytes) { - static_assert(sizeof(std::string::value_type) == sizeof(uint8_t), - "ConsumeBytesAsString cannot convert the data to a string."); - - num_bytes = std::min(num_bytes, remaining_bytes_); - std::string result( - reinterpret_cast<const std::string::value_type *>(data_ptr_), - num_bytes); - Advance(num_bytes); - return result; - } + std::vector<T> ConsumeBytesWithTerminator(size_t num_bytes, T terminator = 0); + template <typename T> std::vector<T> ConsumeRemainingBytes(); - // Returns a number in the range [min, max] by consuming bytes from the - // input data. The value might not be uniformly distributed in the given - // range. If there's no input data left, always returns |min|. |min| must - // be less than or equal to |max|. - template <typename T> T ConsumeIntegralInRange(T min, T max) { - static_assert(std::is_integral<T>::value, "An integral type is required."); - static_assert(sizeof(T) <= sizeof(uint64_t), "Unsupported integral type."); + // Methods returning strings. Use only when you need a std::string or a null + // terminated C-string. Otherwise, prefer the methods returning std::vector. + std::string ConsumeBytesAsString(size_t num_bytes); + std::string ConsumeRandomLengthString(size_t max_length); + std::string ConsumeRandomLengthString(); + std::string ConsumeRemainingBytesAsString(); - if (min > max) - abort(); + // Methods returning integer values. + template <typename T> T ConsumeIntegral(); + template <typename T> T ConsumeIntegralInRange(T min, T max); - // Use the biggest type possible to hold the range and the result. - uint64_t range = static_cast<uint64_t>(max) - min; - uint64_t result = 0; - size_t offset = 0; - - while (offset < sizeof(T) * CHAR_BIT && (range >> offset) > 0 && - remaining_bytes_ != 0) { - // Pull bytes off the end of the seed data. Experimentally, this seems to - // allow the fuzzer to more easily explore the input space. This makes - // sense, since it works by modifying inputs that caused new code to run, - // and this data is often used to encode length of data read by - // |ConsumeBytes|. Separating out read lengths makes it easier modify the - // contents of the data that is actually read. - --remaining_bytes_; - result = (result << CHAR_BIT) | data_ptr_[remaining_bytes_]; - offset += CHAR_BIT; - } + // Methods returning floating point values. + template <typename T> T ConsumeFloatingPoint(); + template <typename T> T ConsumeFloatingPointInRange(T min, T max); - // Avoid division by 0, in case |range + 1| results in overflow. - if (range != std::numeric_limits<decltype(range)>::max()) - result = result % (range + 1); + // 0 <= return value <= 1. + template <typename T> T ConsumeProbability(); - return static_cast<T>(min + result); - } + bool ConsumeBool(); - // Returns a std::string of length from 0 to |max_length|. When it runs out of - // input data, returns what remains of the input. Designed to be more stable - // with respect to a fuzzer inserting characters than just picking a random - // length and then consuming that many bytes with |ConsumeBytes|. - std::string ConsumeRandomLengthString(size_t max_length) { - // Reads bytes from the start of |data_ptr_|. Maps "\\" to "\", and maps "\" - // followed by anything else to the end of the string. As a result of this - // logic, a fuzzer can insert characters into the string, and the string - // will be lengthened to include those new characters, resulting in a more - // stable fuzzer than picking the length of a string independently from - // picking its contents. - std::string result; - - // Reserve the anticipated capaticity to prevent several reallocations. - result.reserve(std::min(max_length, remaining_bytes_)); - for (size_t i = 0; i < max_length && remaining_bytes_ != 0; ++i) { - char next = ConvertUnsignedToSigned<char>(data_ptr_[0]); - Advance(1); - if (next == '\\' && remaining_bytes_ != 0) { - next = ConvertUnsignedToSigned<char>(data_ptr_[0]); - Advance(1); - if (next != '\\') - break; - } - result += next; - } + // Returns a value chosen from the given enum. + template <typename T> T ConsumeEnum(); - result.shrink_to_fit(); - return result; - } + // Returns a value from the given array. + template <typename T, size_t size> T PickValueInArray(const T (&array)[size]); + template <typename T> T PickValueInArray(std::initializer_list<const T> list); - // Returns a std::vector containing all remaining bytes of the input data. - template <typename T> std::vector<T> ConsumeRemainingBytes() { - return ConsumeBytes<T>(remaining_bytes_); - } + // Writes data to the given destination and returns number of bytes written. + size_t ConsumeData(void *destination, size_t num_bytes); - // Returns a std::string containing all remaining bytes of the input data. - // Prefer using |ConsumeRemainingBytes| unless you actually need a std::string - // object. - std::string ConsumeRemainingBytesAsString() { - return ConsumeBytesAsString(remaining_bytes_); - } + // Reports the remaining bytes available for fuzzed input. + size_t remaining_bytes() { return remaining_bytes_; } - // Returns a number in the range [Type's min, Type's max]. The value might - // not be uniformly distributed in the given range. If there's no input data - // left, always returns |min|. - template <typename T> T ConsumeIntegral() { - return ConsumeIntegralInRange(std::numeric_limits<T>::min(), - std::numeric_limits<T>::max()); - } + private: + FuzzedDataProvider(const FuzzedDataProvider &) = delete; + FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete; - // Reads one byte and returns a bool, or false when no data remains. - bool ConsumeBool() { return 1 & ConsumeIntegral<uint8_t>(); } + void CopyAndAdvance(void *destination, size_t num_bytes); - // Returns a copy of the value selected from the given fixed-size |array|. - template <typename T, size_t size> - T PickValueInArray(const T (&array)[size]) { - static_assert(size > 0, "The array must be non empty."); - return array[ConsumeIntegralInRange<size_t>(0, size - 1)]; - } + void Advance(size_t num_bytes); template <typename T> - T PickValueInArray(std::initializer_list<const T> list) { - // TODO(Dor1s): switch to static_assert once C++14 is allowed. - if (!list.size()) - abort(); - - return *(list.begin() + ConsumeIntegralInRange<size_t>(0, list.size() - 1)); - } - - // Returns an enum value. The enum must start at 0 and be contiguous. It must - // also contain |kMaxValue| aliased to its largest (inclusive) value. Such as: - // enum class Foo { SomeValue, OtherValue, kMaxValue = OtherValue }; - template <typename T> T ConsumeEnum() { - static_assert(std::is_enum<T>::value, "|T| must be an enum type."); - return static_cast<T>(ConsumeIntegralInRange<uint32_t>( - 0, static_cast<uint32_t>(T::kMaxValue))); - } + std::vector<T> ConsumeBytes(size_t size, size_t num_bytes); - // Returns a floating point number in the range [0.0, 1.0]. If there's no - // input data left, always returns 0. - template <typename T> T ConsumeProbability() { - static_assert(std::is_floating_point<T>::value, - "A floating point type is required."); + template <typename TS, typename TU> TS ConvertUnsignedToSigned(TU value); - // Use different integral types for different floating point types in order - // to provide better density of the resulting values. - using IntegralType = - typename std::conditional<(sizeof(T) <= sizeof(uint32_t)), uint32_t, - uint64_t>::type; + const uint8_t *data_ptr_; + size_t remaining_bytes_; +}; - T result = static_cast<T>(ConsumeIntegral<IntegralType>()); - result /= static_cast<T>(std::numeric_limits<IntegralType>::max()); - return result; +// Returns a std::vector containing |num_bytes| of input data. If fewer than +// |num_bytes| of data remain, returns a shorter std::vector containing all +// of the data that's left. Can be used with any byte sized type, such as +// char, unsigned char, uint8_t, etc. +template <typename T> +std::vector<T> FuzzedDataProvider::ConsumeBytes(size_t num_bytes) { + num_bytes = std::min(num_bytes, remaining_bytes_); + return ConsumeBytes<T>(num_bytes, num_bytes); +} + +// Similar to |ConsumeBytes|, but also appends the terminator value at the end +// of the resulting vector. Useful, when a mutable null-terminated C-string is +// needed, for example. But that is a rare case. Better avoid it, if possible, +// and prefer using |ConsumeBytes| or |ConsumeBytesAsString| methods. +template <typename T> +std::vector<T> FuzzedDataProvider::ConsumeBytesWithTerminator(size_t num_bytes, + T terminator) { + num_bytes = std::min(num_bytes, remaining_bytes_); + std::vector<T> result = ConsumeBytes<T>(num_bytes + 1, num_bytes); + result.back() = terminator; + return result; +} + +// Returns a std::vector containing all remaining bytes of the input data. +template <typename T> +std::vector<T> FuzzedDataProvider::ConsumeRemainingBytes() { + return ConsumeBytes<T>(remaining_bytes_); +} + +// Returns a std::string containing |num_bytes| of input data. Using this and +// |.c_str()| on the resulting string is the best way to get an immutable +// null-terminated C string. If fewer than |num_bytes| of data remain, returns +// a shorter std::string containing all of the data that's left. +inline std::string FuzzedDataProvider::ConsumeBytesAsString(size_t num_bytes) { + static_assert(sizeof(std::string::value_type) == sizeof(uint8_t), + "ConsumeBytesAsString cannot convert the data to a string."); + + num_bytes = std::min(num_bytes, remaining_bytes_); + std::string result( + reinterpret_cast<const std::string::value_type *>(data_ptr_), num_bytes); + Advance(num_bytes); + return result; +} + +// Returns a std::string of length from 0 to |max_length|. When it runs out of +// input data, returns what remains of the input. Designed to be more stable +// with respect to a fuzzer inserting characters than just picking a random +// length and then consuming that many bytes with |ConsumeBytes|. +inline std::string +FuzzedDataProvider::ConsumeRandomLengthString(size_t max_length) { + // Reads bytes from the start of |data_ptr_|. Maps "\\" to "\", and maps "\" + // followed by anything else to the end of the string. As a result of this + // logic, a fuzzer can insert characters into the string, and the string + // will be lengthened to include those new characters, resulting in a more + // stable fuzzer than picking the length of a string independently from + // picking its contents. + std::string result; + + // Reserve the anticipated capaticity to prevent several reallocations. + result.reserve(std::min(max_length, remaining_bytes_)); + for (size_t i = 0; i < max_length && remaining_bytes_ != 0; ++i) { + char next = ConvertUnsignedToSigned<char>(data_ptr_[0]); + Advance(1); + if (next == '\\' && remaining_bytes_ != 0) { + next = ConvertUnsignedToSigned<char>(data_ptr_[0]); + Advance(1); + if (next != '\\') + break; + } + result += next; } - // Returns a floating point value in the range [Type's lowest, Type's max] by - // consuming bytes from the input data. If there's no input data left, always - // returns approximately 0. - template <typename T> T ConsumeFloatingPoint() { - return ConsumeFloatingPointInRange<T>(std::numeric_limits<T>::lowest(), - std::numeric_limits<T>::max()); + result.shrink_to_fit(); + return result; +} + +// Returns a std::string of length from 0 to |remaining_bytes_|. +inline std::string FuzzedDataProvider::ConsumeRandomLengthString() { + return ConsumeRandomLengthString(remaining_bytes_); +} + +// Returns a std::string containing all remaining bytes of the input data. +// Prefer using |ConsumeRemainingBytes| unless you actually need a std::string +// object. +inline std::string FuzzedDataProvider::ConsumeRemainingBytesAsString() { + return ConsumeBytesAsString(remaining_bytes_); +} + +// Returns a number in the range [Type's min, Type's max]. The value might +// not be uniformly distributed in the given range. If there's no input data +// left, always returns |min|. +template <typename T> T FuzzedDataProvider::ConsumeIntegral() { + return ConsumeIntegralInRange(std::numeric_limits<T>::min(), + std::numeric_limits<T>::max()); +} + +// Returns a number in the range [min, max] by consuming bytes from the +// input data. The value might not be uniformly distributed in the given +// range. If there's no input data left, always returns |min|. |min| must +// be less than or equal to |max|. +template <typename T> +T FuzzedDataProvider::ConsumeIntegralInRange(T min, T max) { + static_assert(std::is_integral<T>::value, "An integral type is required."); + static_assert(sizeof(T) <= sizeof(uint64_t), "Unsupported integral type."); + + if (min > max) + abort(); + + // Use the biggest type possible to hold the range and the result. + uint64_t range = static_cast<uint64_t>(max) - min; + uint64_t result = 0; + size_t offset = 0; + + while (offset < sizeof(T) * CHAR_BIT && (range >> offset) > 0 && + remaining_bytes_ != 0) { + // Pull bytes off the end of the seed data. Experimentally, this seems to + // allow the fuzzer to more easily explore the input space. This makes + // sense, since it works by modifying inputs that caused new code to run, + // and this data is often used to encode length of data read by + // |ConsumeBytes|. Separating out read lengths makes it easier modify the + // contents of the data that is actually read. + --remaining_bytes_; + result = (result << CHAR_BIT) | data_ptr_[remaining_bytes_]; + offset += CHAR_BIT; } - // Returns a floating point value in the given range by consuming bytes from - // the input data. If there's no input data left, returns |min|. Note that - // |min| must be less than or equal to |max|. - template <typename T> T ConsumeFloatingPointInRange(T min, T max) { - if (min > max) - abort(); - - T range = .0; - T result = min; - constexpr T zero(.0); - if (max > zero && min < zero && max > min + std::numeric_limits<T>::max()) { - // The diff |max - min| would overflow the given floating point type. Use - // the half of the diff as the range and consume a bool to decide whether - // the result is in the first of the second part of the diff. - range = (max / 2.0) - (min / 2.0); - if (ConsumeBool()) { - result += range; - } - } else { - range = max - min; + // Avoid division by 0, in case |range + 1| results in overflow. + if (range != std::numeric_limits<decltype(range)>::max()) + result = result % (range + 1); + + return static_cast<T>(min + result); +} + +// Returns a floating point value in the range [Type's lowest, Type's max] by +// consuming bytes from the input data. If there's no input data left, always +// returns approximately 0. +template <typename T> T FuzzedDataProvider::ConsumeFloatingPoint() { + return ConsumeFloatingPointInRange<T>(std::numeric_limits<T>::lowest(), + std::numeric_limits<T>::max()); +} + +// Returns a floating point value in the given range by consuming bytes from +// the input data. If there's no input data left, returns |min|. Note that +// |min| must be less than or equal to |max|. +template <typename T> +T FuzzedDataProvider::ConsumeFloatingPointInRange(T min, T max) { + if (min > max) + abort(); + + T range = .0; + T result = min; + constexpr T zero(.0); + if (max > zero && min < zero && max > min + std::numeric_limits<T>::max()) { + // The diff |max - min| would overflow the given floating point type. Use + // the half of the diff as the range and consume a bool to decide whether + // the result is in the first of the second part of the diff. + range = (max / 2.0) - (min / 2.0); + if (ConsumeBool()) { + result += range; } - - return result + range * ConsumeProbability<T>(); + } else { + range = max - min; } - // Reports the remaining bytes available for fuzzed input. - size_t remaining_bytes() { return remaining_bytes_; } - - private: - FuzzedDataProvider(const FuzzedDataProvider &) = delete; - FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete; - - void Advance(size_t num_bytes) { - if (num_bytes > remaining_bytes_) + return result + range * ConsumeProbability<T>(); +} + +// Returns a floating point number in the range [0.0, 1.0]. If there's no +// input data left, always returns 0. +template <typename T> T FuzzedDataProvider::ConsumeProbability() { + static_assert(std::is_floating_point<T>::value, + "A floating point type is required."); + + // Use different integral types for different floating point types in order + // to provide better density of the resulting values. + using IntegralType = + typename std::conditional<(sizeof(T) <= sizeof(uint32_t)), uint32_t, + uint64_t>::type; + + T result = static_cast<T>(ConsumeIntegral<IntegralType>()); + result /= static_cast<T>(std::numeric_limits<IntegralType>::max()); + return result; +} + +// Reads one byte and returns a bool, or false when no data remains. +inline bool FuzzedDataProvider::ConsumeBool() { + return 1 & ConsumeIntegral<uint8_t>(); +} + +// Returns an enum value. The enum must start at 0 and be contiguous. It must +// also contain |kMaxValue| aliased to its largest (inclusive) value. Such as: +// enum class Foo { SomeValue, OtherValue, kMaxValue = OtherValue }; +template <typename T> T FuzzedDataProvider::ConsumeEnum() { + static_assert(std::is_enum<T>::value, "|T| must be an enum type."); + return static_cast<T>( + ConsumeIntegralInRange<uint32_t>(0, static_cast<uint32_t>(T::kMaxValue))); +} + +// Returns a copy of the value selected from the given fixed-size |array|. +template <typename T, size_t size> +T FuzzedDataProvider::PickValueInArray(const T (&array)[size]) { + static_assert(size > 0, "The array must be non empty."); + return array[ConsumeIntegralInRange<size_t>(0, size - 1)]; +} + +template <typename T> +T FuzzedDataProvider::PickValueInArray(std::initializer_list<const T> list) { + // TODO(Dor1s): switch to static_assert once C++14 is allowed. + if (!list.size()) + abort(); + + return *(list.begin() + ConsumeIntegralInRange<size_t>(0, list.size() - 1)); +} + +// Writes |num_bytes| of input data to the given destination pointer. If there +// is not enough data left, writes all remaining bytes. Return value is the +// number of bytes written. +// In general, it's better to avoid using this function, but it may be useful +// in cases when it's necessary to fill a certain buffer or object with +// fuzzing data. +inline size_t FuzzedDataProvider::ConsumeData(void *destination, + size_t num_bytes) { + num_bytes = std::min(num_bytes, remaining_bytes_); + CopyAndAdvance(destination, num_bytes); + return num_bytes; +} + +// Private methods. +inline void FuzzedDataProvider::CopyAndAdvance(void *destination, + size_t num_bytes) { + std::memcpy(destination, data_ptr_, num_bytes); + Advance(num_bytes); +} + +inline void FuzzedDataProvider::Advance(size_t num_bytes) { + if (num_bytes > remaining_bytes_) + abort(); + + data_ptr_ += num_bytes; + remaining_bytes_ -= num_bytes; +} + +template <typename T> +std::vector<T> FuzzedDataProvider::ConsumeBytes(size_t size, size_t num_bytes) { + static_assert(sizeof(T) == sizeof(uint8_t), "Incompatible data type."); + + // The point of using the size-based constructor below is to increase the + // odds of having a vector object with capacity being equal to the length. + // That part is always implementation specific, but at least both libc++ and + // libstdc++ allocate the requested number of bytes in that constructor, + // which seems to be a natural choice for other implementations as well. + // To increase the odds even more, we also call |shrink_to_fit| below. + std::vector<T> result(size); + if (size == 0) { + if (num_bytes != 0) abort(); - - data_ptr_ += num_bytes; - remaining_bytes_ -= num_bytes; - } - - template <typename T> - std::vector<T> ConsumeBytes(size_t size, size_t num_bytes_to_consume) { - static_assert(sizeof(T) == sizeof(uint8_t), "Incompatible data type."); - - // The point of using the size-based constructor below is to increase the - // odds of having a vector object with capacity being equal to the length. - // That part is always implementation specific, but at least both libc++ and - // libstdc++ allocate the requested number of bytes in that constructor, - // which seems to be a natural choice for other implementations as well. - // To increase the odds even more, we also call |shrink_to_fit| below. - std::vector<T> result(size); - if (size == 0) { - if (num_bytes_to_consume != 0) - abort(); - return result; - } - - std::memcpy(result.data(), data_ptr_, num_bytes_to_consume); - Advance(num_bytes_to_consume); - - // Even though |shrink_to_fit| is also implementation specific, we expect it - // to provide an additional assurance in case vector's constructor allocated - // a buffer which is larger than the actual amount of data we put inside it. - result.shrink_to_fit(); return result; } - template <typename TS, typename TU> TS ConvertUnsignedToSigned(TU value) { - static_assert(sizeof(TS) == sizeof(TU), "Incompatible data types."); - static_assert(!std::numeric_limits<TU>::is_signed, - "Source type must be unsigned."); - - // TODO(Dor1s): change to `if constexpr` once C++17 becomes mainstream. - if (std::numeric_limits<TS>::is_modulo) - return static_cast<TS>(value); - - // Avoid using implementation-defined unsigned to signer conversions. - // To learn more, see https://stackoverflow.com/questions/13150449. - if (value <= std::numeric_limits<TS>::max()) { - return static_cast<TS>(value); - } else { - constexpr auto TS_min = std::numeric_limits<TS>::min(); - return TS_min + static_cast<char>(value - TS_min); - } + CopyAndAdvance(result.data(), num_bytes); + + // Even though |shrink_to_fit| is also implementation specific, we expect it + // to provide an additional assurance in case vector's constructor allocated + // a buffer which is larger than the actual amount of data we put inside it. + result.shrink_to_fit(); + return result; +} + +template <typename TS, typename TU> +TS FuzzedDataProvider::ConvertUnsignedToSigned(TU value) { + static_assert(sizeof(TS) == sizeof(TU), "Incompatible data types."); + static_assert(!std::numeric_limits<TU>::is_signed, + "Source type must be unsigned."); + + // TODO(Dor1s): change to `if constexpr` once C++17 becomes mainstream. + if (std::numeric_limits<TS>::is_modulo) + return static_cast<TS>(value); + + // Avoid using implementation-defined unsigned to signed conversions. + // To learn more, see https://stackoverflow.com/questions/13150449. + if (value <= std::numeric_limits<TS>::max()) { + return static_cast<TS>(value); + } else { + constexpr auto TS_min = std::numeric_limits<TS>::min(); + return TS_min + static_cast<char>(value - TS_min); } - - const uint8_t *data_ptr_; - size_t remaining_bytes_; -}; +} #endif // LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ diff --git a/gnu/llvm/compiler-rt/include/profile/InstrProfData.inc b/gnu/llvm/compiler-rt/include/profile/InstrProfData.inc index 99f41d8fef0..a6913527e67 100644 --- a/gnu/llvm/compiler-rt/include/profile/InstrProfData.inc +++ b/gnu/llvm/compiler-rt/include/profile/InstrProfData.inc @@ -198,6 +198,14 @@ VALUE_PROF_KIND(IPVK_Last, IPVK_MemOPSize, "last") #undef VALUE_PROF_KIND /* VALUE_PROF_KIND end */ +#undef COVMAP_V2_OR_V3 +#ifdef COVMAP_V2 +#define COVMAP_V2_OR_V3 +#endif +#ifdef COVMAP_V3 +#define COVMAP_V2_OR_V3 +#endif + /* COVMAP_FUNC_RECORD start */ /* Definition of member fields of the function record structure in coverage * map. @@ -214,16 +222,30 @@ COVMAP_FUNC_RECORD(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), \ COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \ llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), \ NameValue.size())) -#else +#endif +#ifdef COVMAP_V2_OR_V3 COVMAP_FUNC_RECORD(const int64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \ - llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \ - llvm::IndexedInstrProf::ComputeHash(NameValue))) + llvm::ConstantInt::get( \ + llvm::Type::getInt64Ty(Ctx), NameHash)) #endif COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), DataSize, \ - llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx),\ - CoverageMapping.size())) + llvm::ConstantInt::get( \ + llvm::Type::getInt32Ty(Ctx), CoverageMapping.size())) COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \ - llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), FuncHash)) + llvm::ConstantInt::get( \ + llvm::Type::getInt64Ty(Ctx), FuncHash)) +#ifdef COVMAP_V3 +COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FilenamesRef, \ + llvm::ConstantInt::get( \ + llvm::Type::getInt64Ty(Ctx), FilenamesRef)) +COVMAP_FUNC_RECORD(const char, \ + llvm::ArrayType::get(llvm::Type::getInt8Ty(Ctx), \ + CoverageMapping.size()), \ + CoverageMapping, + llvm::ConstantDataArray::getRaw( \ + CoverageMapping, CoverageMapping.size(), \ + llvm::Type::getInt8Ty(Ctx))) +#endif #undef COVMAP_FUNC_RECORD /* COVMAP_FUNC_RECORD end. */ @@ -236,7 +258,7 @@ COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \ #define INSTR_PROF_DATA_DEFINED #endif COVMAP_HEADER(uint32_t, Int32Ty, NRecords, \ - llvm::ConstantInt::get(Int32Ty, FunctionRecords.size())) + llvm::ConstantInt::get(Int32Ty, NRecords)) COVMAP_HEADER(uint32_t, Int32Ty, FilenamesSize, \ llvm::ConstantInt::get(Int32Ty, FilenamesSize)) COVMAP_HEADER(uint32_t, Int32Ty, CoverageSize, \ @@ -267,6 +289,9 @@ INSTR_PROF_SECT_ENTRY(IPSK_vnodes, \ INSTR_PROF_SECT_ENTRY(IPSK_covmap, \ INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON), \ INSTR_PROF_COVMAP_COFF, "__LLVM_COV,") +INSTR_PROF_SECT_ENTRY(IPSK_covfun, \ + INSTR_PROF_QUOTE(INSTR_PROF_COVFUN_COMMON), \ + INSTR_PROF_COVFUN_COFF, "__LLVM_COV,") INSTR_PROF_SECT_ENTRY(IPSK_orderfile, \ INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON), \ INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COFF), "__DATA,") @@ -632,9 +657,9 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure, /* Raw profile format version (start from 1). */ #define INSTR_PROF_RAW_VERSION 5 /* Indexed profile format version (start from 1). */ -#define INSTR_PROF_INDEX_VERSION 5 -/* Coverage mapping format vresion (start from 0). */ -#define INSTR_PROF_COVMAP_VERSION 2 +#define INSTR_PROF_INDEX_VERSION 6 +/* Coverage mapping format version (start from 0). */ +#define INSTR_PROF_COVMAP_VERSION 3 /* Profile version is always of type uint64_t. Reserve the upper 8 bits in the * version for other variants of profile. We set the lowest bit of the upper 8 @@ -661,6 +686,7 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure, #define INSTR_PROF_VALS_COMMON __llvm_prf_vals #define INSTR_PROF_VNODES_COMMON __llvm_prf_vnds #define INSTR_PROF_COVMAP_COMMON __llvm_covmap +#define INSTR_PROF_COVFUN_COMMON __llvm_covfun #define INSTR_PROF_ORDERFILE_COMMON __llvm_orderfile /* Windows section names. Because these section names contain dollar characters, * they must be quoted. @@ -671,6 +697,7 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure, #define INSTR_PROF_VALS_COFF ".lprfv$M" #define INSTR_PROF_VNODES_COFF ".lprfnd$M" #define INSTR_PROF_COVMAP_COFF ".lcovmap$M" +#define INSTR_PROF_COVFUN_COFF ".lcovfun$M" #define INSTR_PROF_ORDERFILE_COFF ".lorderfile$M" #ifdef _WIN32 @@ -685,6 +712,7 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure, /* Value profile nodes section. */ #define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COFF #define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COFF +#define INSTR_PROF_COVFUN_SECT_NAME INSTR_PROF_COVFUN_COFF #define INSTR_PROF_ORDERFILE_SECT_NAME INSTR_PROF_ORDERFILE_COFF #else /* Runtime section names and name strings. */ @@ -698,6 +726,7 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure, /* Value profile nodes section. */ #define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON) #define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON) +#define INSTR_PROF_COVFUN_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVFUN_COMMON) /* Order file instrumentation. */ #define INSTR_PROF_ORDERFILE_SECT_NAME \ INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON) @@ -752,3 +781,5 @@ typedef struct InstrProfValueData { #else #undef INSTR_PROF_DATA_DEFINED #endif + +#undef COVMAP_V2_OR_V3 diff --git a/gnu/llvm/compiler-rt/include/sanitizer/linux_syscall_hooks.h b/gnu/llvm/compiler-rt/include/sanitizer/linux_syscall_hooks.h index a1794b71af5..56eae3d40f9 100644 --- a/gnu/llvm/compiler-rt/include/sanitizer/linux_syscall_hooks.h +++ b/gnu/llvm/compiler-rt/include/sanitizer/linux_syscall_hooks.h @@ -1845,6 +1845,10 @@ #define __sanitizer_syscall_post_rt_sigaction(res, signum, act, oldact, sz) \ __sanitizer_syscall_post_impl_rt_sigaction(res, (long)signum, (long)act, \ (long)oldact, (long)sz) +#define __sanitizer_syscall_pre_sigaltstack(ss, oss) \ + __sanitizer_syscall_pre_impl_sigaltstack((long)ss, (long)oss) +#define __sanitizer_syscall_post_sigaltstack(res, ss, oss) \ + __sanitizer_syscall_post_impl_sigaltstack(res, (long)ss, (long)oss) // And now a few syscalls we don't handle yet. #define __sanitizer_syscall_pre_afs_syscall(...) @@ -1912,7 +1916,6 @@ #define __sanitizer_syscall_pre_setreuid32(...) #define __sanitizer_syscall_pre_set_thread_area(...) #define __sanitizer_syscall_pre_setuid32(...) -#define __sanitizer_syscall_pre_sigaltstack(...) #define __sanitizer_syscall_pre_sigreturn(...) #define __sanitizer_syscall_pre_sigsuspend(...) #define __sanitizer_syscall_pre_stty(...) @@ -1992,7 +1995,6 @@ #define __sanitizer_syscall_post_setreuid32(res, ...) #define __sanitizer_syscall_post_set_thread_area(res, ...) #define __sanitizer_syscall_post_setuid32(res, ...) -#define __sanitizer_syscall_post_sigaltstack(res, ...) #define __sanitizer_syscall_post_sigreturn(res, ...) #define __sanitizer_syscall_post_sigsuspend(res, ...) #define __sanitizer_syscall_post_stty(res, ...) @@ -3075,6 +3077,8 @@ void __sanitizer_syscall_pre_impl_rt_sigaction(long signum, long act, long oldact, long sz); void __sanitizer_syscall_post_impl_rt_sigaction(long res, long signum, long act, long oldact, long sz); +void __sanitizer_syscall_pre_impl_sigaltstack(long ss, long oss); +void __sanitizer_syscall_post_impl_sigaltstack(long res, long ss, long oss); #ifdef __cplusplus } // extern "C" #endif diff --git a/gnu/llvm/compiler-rt/include/sanitizer/tsan_interface.h b/gnu/llvm/compiler-rt/include/sanitizer/tsan_interface.h index 011b23350ca..96b8ad58541 100644 --- a/gnu/llvm/compiler-rt/include/sanitizer/tsan_interface.h +++ b/gnu/llvm/compiler-rt/include/sanitizer/tsan_interface.h @@ -38,34 +38,34 @@ void __tsan_release(void *addr); // Mutex has static storage duration and no-op constructor and destructor. // This effectively makes tsan ignore destroy annotation. -const unsigned __tsan_mutex_linker_init = 1 << 0; +static const unsigned __tsan_mutex_linker_init = 1 << 0; // Mutex is write reentrant. -const unsigned __tsan_mutex_write_reentrant = 1 << 1; +static const unsigned __tsan_mutex_write_reentrant = 1 << 1; // Mutex is read reentrant. -const unsigned __tsan_mutex_read_reentrant = 1 << 2; +static const unsigned __tsan_mutex_read_reentrant = 1 << 2; // Mutex does not have static storage duration, and must not be used after // its destructor runs. The opposite of __tsan_mutex_linker_init. // If this flag is passed to __tsan_mutex_destroy, then the destruction // is ignored unless this flag was previously set on the mutex. -const unsigned __tsan_mutex_not_static = 1 << 8; +static const unsigned __tsan_mutex_not_static = 1 << 8; // Mutex operation flags: // Denotes read lock operation. -const unsigned __tsan_mutex_read_lock = 1 << 3; +static const unsigned __tsan_mutex_read_lock = 1 << 3; // Denotes try lock operation. -const unsigned __tsan_mutex_try_lock = 1 << 4; +static const unsigned __tsan_mutex_try_lock = 1 << 4; // Denotes that a try lock operation has failed to acquire the mutex. -const unsigned __tsan_mutex_try_lock_failed = 1 << 5; +static const unsigned __tsan_mutex_try_lock_failed = 1 << 5; // Denotes that the lock operation acquires multiple recursion levels. // Number of levels is passed in recursion parameter. // This is useful for annotation of e.g. Java builtin monitors, // for which wait operation releases all recursive acquisitions of the mutex. -const unsigned __tsan_mutex_recursive_lock = 1 << 6; +static const unsigned __tsan_mutex_recursive_lock = 1 << 6; // Denotes that the unlock operation releases all recursion levels. // Number of released levels is returned and later must be passed to // the corresponding __tsan_mutex_post_lock annotation. -const unsigned __tsan_mutex_recursive_unlock = 1 << 7; +static const unsigned __tsan_mutex_recursive_unlock = 1 << 7; // Annotate creation of a mutex. // Supported flags: mutex creation flags. @@ -152,7 +152,7 @@ void __tsan_set_fiber_name(void *fiber, const char *name); // Flags for __tsan_switch_to_fiber: // Do not establish a happens-before relation between fibers -const unsigned __tsan_switch_to_fiber_no_sync = 1 << 0; +static const unsigned __tsan_switch_to_fiber_no_sync = 1 << 0; #ifdef __cplusplus } // extern "C" diff --git a/gnu/llvm/compiler-rt/lib/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/CMakeLists.txt index 39082aa06d4..2020ee32d4f 100644 --- a/gnu/llvm/compiler-rt/lib/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/lib/CMakeLists.txt @@ -59,3 +59,9 @@ endif() if(COMPILER_RT_BUILD_LIBFUZZER) compiler_rt_build_runtime(fuzzer) endif() + +# It doesn't normally make sense to build runtimes when a sanitizer is enabled, +# so we don't add_subdirectory the runtimes in that case. However, the opposite +# is true for fuzzers that exercise parts of the runtime. So we add the fuzzer +# directories explicitly here. +add_subdirectory(scudo/standalone/fuzz) diff --git a/gnu/llvm/compiler-rt/lib/asan/asan_allocator.cpp b/gnu/llvm/compiler-rt/lib/asan/asan_allocator.cpp index 65c51fbafdd..126d26d0823 100644 --- a/gnu/llvm/compiler-rt/lib/asan/asan_allocator.cpp +++ b/gnu/llvm/compiler-rt/lib/asan/asan_allocator.cpp @@ -1037,8 +1037,19 @@ uptr PointsIntoChunk(void* p) { return 0; } +// Debug code. Delete once issue #1193 is chased down. +extern "C" SANITIZER_WEAK_ATTRIBUTE const char *__lsan_current_stage; + uptr GetUserBegin(uptr chunk) { __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); + if (!m) + Printf( + "ASAN is about to crash with a CHECK failure.\n" + "The ASAN developers are trying to chase down this bug,\n" + "so if you've encountered this bug please let us know.\n" + "See also: https://github.com/google/sanitizers/issues/1193\n" + "chunk: %p caller %p __lsan_current_stage %s\n", + chunk, GET_CALLER_PC(), __lsan_current_stage); CHECK(m); return m->Beg(); } diff --git a/gnu/llvm/compiler-rt/lib/asan/asan_fuchsia.cpp b/gnu/llvm/compiler-rt/lib/asan/asan_fuchsia.cpp index f8b2d5f2697..64f6dcbcefe 100644 --- a/gnu/llvm/compiler-rt/lib/asan/asan_fuchsia.cpp +++ b/gnu/llvm/compiler-rt/lib/asan/asan_fuchsia.cpp @@ -62,6 +62,8 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) { UNIMPLEMENTED(); } +bool PlatformUnpoisonStacks() { return false; } + // We can use a plain thread_local variable for TSD. static thread_local void *per_thread; diff --git a/gnu/llvm/compiler-rt/lib/asan/asan_internal.h b/gnu/llvm/compiler-rt/lib/asan/asan_internal.h index 72a4c3f22ff..d4bfe996b66 100644 --- a/gnu/llvm/compiler-rt/lib/asan/asan_internal.h +++ b/gnu/llvm/compiler-rt/lib/asan/asan_internal.h @@ -83,6 +83,16 @@ void *AsanDoesNotSupportStaticLinkage(); void AsanCheckDynamicRTPrereqs(); void AsanCheckIncompatibleRT(); +// Unpoisons platform-specific stacks. +// Returns true if all stacks have been unpoisoned. +bool PlatformUnpoisonStacks(); + +// asan_rtl.cpp +// Unpoison a region containing a stack. +// Performs a sanity check and warns if the bounds don't look right. +// The warning contains the type string to identify the stack type. +void UnpoisonStack(uptr bottom, uptr top, const char *type); + // asan_thread.cpp AsanThread *CreateMainThread(); diff --git a/gnu/llvm/compiler-rt/lib/asan/asan_posix.cpp b/gnu/llvm/compiler-rt/lib/asan/asan_posix.cpp index 920d216624a..d7f19d84654 100644 --- a/gnu/llvm/compiler-rt/lib/asan/asan_posix.cpp +++ b/gnu/llvm/compiler-rt/lib/asan/asan_posix.cpp @@ -17,6 +17,7 @@ #include "asan_internal.h" #include "asan_interceptors.h" #include "asan_mapping.h" +#include "asan_poisoning.h" #include "asan_report.h" #include "asan_stack.h" #include "sanitizer_common/sanitizer_libc.h" @@ -24,6 +25,7 @@ #include "sanitizer_common/sanitizer_procmaps.h" #include <pthread.h> +#include <signal.h> #include <stdlib.h> #include <sys/time.h> #include <sys/resource.h> @@ -37,6 +39,32 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) { ReportDeadlySignal(sig); } +bool PlatformUnpoisonStacks() { + stack_t signal_stack; + CHECK_EQ(0, sigaltstack(nullptr, &signal_stack)); + uptr sigalt_bottom = (uptr)signal_stack.ss_sp; + uptr sigalt_top = (uptr)((char *)signal_stack.ss_sp + signal_stack.ss_size); + // If we're executing on the signal alternate stack AND the Linux flag + // SS_AUTODISARM was used, then we cannot get the signal alternate stack + // bounds from sigaltstack -- sigaltstack's output looks just as if no + // alternate stack has ever been set up. + // We're always unpoisoning the signal alternate stack to support jumping + // between the default stack and signal alternate stack. + if (signal_stack.ss_flags != SS_DISABLE) + UnpoisonStack(sigalt_bottom, sigalt_top, "sigalt"); + + if (signal_stack.ss_flags != SS_ONSTACK) + return false; + + // Since we're on the signal altnerate stack, we cannot find the DEFAULT + // stack bottom using a local variable. + uptr default_bottom, tls_addr, tls_size, stack_size; + GetThreadStackAndTls(/*main=*/false, &default_bottom, &stack_size, &tls_addr, + &tls_size); + UnpoisonStack(default_bottom, default_bottom + stack_size, "default"); + return true; +} + // ---------------------- TSD ---------------- {{{1 #if SANITIZER_NETBSD && !ASAN_DYNAMIC diff --git a/gnu/llvm/compiler-rt/lib/asan/asan_report.cpp b/gnu/llvm/compiler-rt/lib/asan/asan_report.cpp index 2e6ce436d03..99e8678aa78 100644 --- a/gnu/llvm/compiler-rt/lib/asan/asan_report.cpp +++ b/gnu/llvm/compiler-rt/lib/asan/asan_report.cpp @@ -160,6 +160,9 @@ class ScopedInErrorReport { BlockingMutexLock l(&error_message_buf_mutex); internal_memcpy(buffer_copy.data(), error_message_buffer, kErrorMessageBufferSize); + // Clear error_message_buffer so that if we find other errors + // we don't re-log this error. + error_message_buffer_pos = 0; } LogFullErrorReport(buffer_copy.data()); diff --git a/gnu/llvm/compiler-rt/lib/asan/asan_rtems.cpp b/gnu/llvm/compiler-rt/lib/asan/asan_rtems.cpp index ecd568c5981..2e5b2f0a3b2 100644 --- a/gnu/llvm/compiler-rt/lib/asan/asan_rtems.cpp +++ b/gnu/llvm/compiler-rt/lib/asan/asan_rtems.cpp @@ -64,6 +64,8 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) { UNIMPLEMENTED(); } +bool PlatformUnpoisonStacks() { return false; } + void EarlyInit() { // Provide early initialization of shadow memory so that // instrumented code running before full initialzation will not diff --git a/gnu/llvm/compiler-rt/lib/asan/asan_rtl.cpp b/gnu/llvm/compiler-rt/lib/asan/asan_rtl.cpp index 594d7752eea..463bfa02f9f 100644 --- a/gnu/llvm/compiler-rt/lib/asan/asan_rtl.cpp +++ b/gnu/llvm/compiler-rt/lib/asan/asan_rtl.cpp @@ -551,22 +551,33 @@ class AsanInitializer { static AsanInitializer asan_initializer; #endif // ASAN_DYNAMIC -} // namespace __asan - -// ---------------------- Interface ---------------- {{{1 -using namespace __asan; - -void NOINLINE __asan_handle_no_return() { - if (asan_init_is_running) +void UnpoisonStack(uptr bottom, uptr top, const char *type) { + static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M + if (top - bottom > kMaxExpectedCleanupSize) { + static bool reported_warning = false; + if (reported_warning) + return; + reported_warning = true; + Report( + "WARNING: ASan is ignoring requested __asan_handle_no_return: " + "stack type: %s top: %p; bottom %p; size: %p (%zd)\n" + "False positive error reports may follow\n" + "For details see " + "https://github.com/google/sanitizers/issues/189\n", + type, top, bottom, top - bottom, top - bottom); return; + } + PoisonShadow(bottom, top - bottom, 0); +} - int local_stack; - AsanThread *curr_thread = GetCurrentThread(); - uptr PageSize = GetPageSizeCached(); - uptr top, bottom; - if (curr_thread) { +static void UnpoisonDefaultStack() { + uptr bottom, top; + + if (AsanThread *curr_thread = GetCurrentThread()) { + int local_stack; + const uptr page_size = GetPageSizeCached(); top = curr_thread->stack_top(); - bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1); + bottom = ((uptr)&local_stack - page_size) & ~(page_size - 1); } else if (SANITIZER_RTEMS) { // Give up On RTEMS. return; @@ -578,25 +589,31 @@ void NOINLINE __asan_handle_no_return() { &tls_size); top = bottom + stack_size; } - static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M - if (top - bottom > kMaxExpectedCleanupSize) { - static bool reported_warning = false; - if (reported_warning) - return; - reported_warning = true; - Report("WARNING: ASan is ignoring requested __asan_handle_no_return: " - "stack top: %p; bottom %p; size: %p (%zd)\n" - "False positive error reports may follow\n" - "For details see " - "https://github.com/google/sanitizers/issues/189\n", - top, bottom, top - bottom, top - bottom); - return; - } - PoisonShadow(bottom, top - bottom, 0); + + UnpoisonStack(bottom, top, "default"); +} + +static void UnpoisonFakeStack() { + AsanThread *curr_thread = GetCurrentThread(); if (curr_thread && curr_thread->has_fake_stack()) curr_thread->fake_stack()->HandleNoReturn(); } +} // namespace __asan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; + +void NOINLINE __asan_handle_no_return() { + if (asan_init_is_running) + return; + + if (!PlatformUnpoisonStacks()) + UnpoisonDefaultStack(); + + UnpoisonFakeStack(); +} + extern "C" void *__asan_extra_spill_area() { AsanThread *t = GetCurrentThread(); CHECK(t); diff --git a/gnu/llvm/compiler-rt/lib/asan/asan_thread.cpp b/gnu/llvm/compiler-rt/lib/asan/asan_thread.cpp index 6734d9a1668..f0df8bd4b37 100644 --- a/gnu/llvm/compiler-rt/lib/asan/asan_thread.cpp +++ b/gnu/llvm/compiler-rt/lib/asan/asan_thread.cpp @@ -480,6 +480,8 @@ bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, return true; } +void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {} + void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, void *arg) { __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); diff --git a/gnu/llvm/compiler-rt/lib/asan/asan_win.cpp b/gnu/llvm/compiler-rt/lib/asan/asan_win.cpp index 417892aaedd..03feddbe86b 100644 --- a/gnu/llvm/compiler-rt/lib/asan/asan_win.cpp +++ b/gnu/llvm/compiler-rt/lib/asan/asan_win.cpp @@ -268,6 +268,8 @@ void ReadContextStack(void *context, uptr *stack, uptr *ssize) { void AsanOnDeadlySignal(int, void *siginfo, void *context) { UNIMPLEMENTED(); } +bool PlatformUnpoisonStacks() { return false; } + #if SANITIZER_WINDOWS64 // Exception handler for dealing with shadow memory. static LONG CALLBACK diff --git a/gnu/llvm/compiler-rt/lib/asan/scripts/asan_symbolize.py b/gnu/llvm/compiler-rt/lib/asan/scripts/asan_symbolize.py index a196c075b03..d99e3441e92 100755 --- a/gnu/llvm/compiler-rt/lib/asan/scripts/asan_symbolize.py +++ b/gnu/llvm/compiler-rt/lib/asan/scripts/asan_symbolize.py @@ -275,11 +275,14 @@ class DarwinSymbolizer(Symbolizer): atos_line = self.atos.readline() # A well-formed atos response looks like this: # foo(type1, type2) (in object.name) (filename.cc:80) + # NOTE: + # * For C functions atos omits parentheses and argument types. + # * For C++ functions the function name (i.e., `foo` above) may contain + # templates which may contain parentheses. match = re.match('^(.*) \(in (.*)\) \((.*:\d*)\)$', atos_line) logging.debug('atos_line: %s', atos_line) if match: function_name = match.group(1) - function_name = re.sub('\(.*?\)', '', function_name) file_name = fix_filename(match.group(3)) return ['%s in %s %s' % (addr, function_name, file_name)] else: diff --git a/gnu/llvm/compiler-rt/lib/asan/tests/asan_str_test.cpp b/gnu/llvm/compiler-rt/lib/asan/tests/asan_str_test.cpp index 33a38a81567..12b8e5a5e67 100644 --- a/gnu/llvm/compiler-rt/lib/asan/tests/asan_str_test.cpp +++ b/gnu/llvm/compiler-rt/lib/asan/tests/asan_str_test.cpp @@ -39,12 +39,12 @@ enum class OOBKind { Global, }; -string LeftOOBReadMessage(OOBKind oob_kind, int oob_distance) { +std::string LeftOOBReadMessage(OOBKind oob_kind, int oob_distance) { return oob_kind == OOBKind::Stack ? kStackReadUnderflow : ::LeftOOBReadMessage(oob_distance); } -string RightOOBReadMessage(OOBKind oob_kind, int oob_distance) { +std::string RightOOBReadMessage(OOBKind oob_kind, int oob_distance) { return oob_kind == OOBKind::Stack ? kStackReadOverflow : ::RightOOBReadMessage(oob_distance); } @@ -480,7 +480,7 @@ TEST(AddressSanitizer, StrNCatOOBTest) { free(from); } -static string OverlapErrorMessage(const string &func) { +static std::string OverlapErrorMessage(const std::string &func) { return func + "-param-overlap"; } diff --git a/gnu/llvm/compiler-rt/lib/asan/tests/asan_test.cpp b/gnu/llvm/compiler-rt/lib/asan/tests/asan_test.cpp index 47bba08cc23..edc98ed1852 100644 --- a/gnu/llvm/compiler-rt/lib/asan/tests/asan_test.cpp +++ b/gnu/llvm/compiler-rt/lib/asan/tests/asan_test.cpp @@ -739,7 +739,7 @@ TEST(AddressSanitizer, Store128Test) { #endif // FIXME: All tests that use this function should be turned into lit tests. -string RightOOBErrorMessage(int oob_distance, bool is_write) { +std::string RightOOBErrorMessage(int oob_distance, bool is_write) { assert(oob_distance >= 0); char expected_str[100]; sprintf(expected_str, ASAN_PCRE_DOTALL @@ -751,19 +751,19 @@ string RightOOBErrorMessage(int oob_distance, bool is_write) { is_write ? "WRITE" : "READ", #endif oob_distance); - return string(expected_str); + return std::string(expected_str); } -string RightOOBWriteMessage(int oob_distance) { +std::string RightOOBWriteMessage(int oob_distance) { return RightOOBErrorMessage(oob_distance, /*is_write*/true); } -string RightOOBReadMessage(int oob_distance) { +std::string RightOOBReadMessage(int oob_distance) { return RightOOBErrorMessage(oob_distance, /*is_write*/false); } // FIXME: All tests that use this function should be turned into lit tests. -string LeftOOBErrorMessage(int oob_distance, bool is_write) { +std::string LeftOOBErrorMessage(int oob_distance, bool is_write) { assert(oob_distance > 0); char expected_str[100]; sprintf(expected_str, @@ -775,22 +775,22 @@ string LeftOOBErrorMessage(int oob_distance, bool is_write) { is_write ? "WRITE" : "READ", #endif oob_distance); - return string(expected_str); + return std::string(expected_str); } -string LeftOOBWriteMessage(int oob_distance) { +std::string LeftOOBWriteMessage(int oob_distance) { return LeftOOBErrorMessage(oob_distance, /*is_write*/true); } -string LeftOOBReadMessage(int oob_distance) { +std::string LeftOOBReadMessage(int oob_distance) { return LeftOOBErrorMessage(oob_distance, /*is_write*/false); } -string LeftOOBAccessMessage(int oob_distance) { +std::string LeftOOBAccessMessage(int oob_distance) { assert(oob_distance > 0); char expected_str[100]; sprintf(expected_str, "located %d bytes to the left", oob_distance); - return string(expected_str); + return std::string(expected_str); } char* MallocAndMemsetString(size_t size, char ch) { @@ -1157,9 +1157,13 @@ TEST(AddressSanitizer, DISABLED_StressStackReuseAndExceptionsTest) { #if !defined(_WIN32) TEST(AddressSanitizer, MlockTest) { +#if !defined(__ANDROID__) || __ANDROID_API__ >= 17 EXPECT_EQ(0, mlockall(MCL_CURRENT)); +#endif EXPECT_EQ(0, mlock((void*)0x12345, 0x5678)); +#if !defined(__ANDROID__) || __ANDROID_API__ >= 17 EXPECT_EQ(0, munlockall()); +#endif EXPECT_EQ(0, munlock((void*)0x987, 0x654)); } #endif @@ -1195,11 +1199,11 @@ TEST(AddressSanitizer, AttributeNoSanitizeAddressTest) { #if !defined(__ANDROID__) && \ !defined(__APPLE__) && \ !defined(_WIN32) -static string MismatchStr(const string &str) { - return string("AddressSanitizer: alloc-dealloc-mismatch \\(") + str; +static std::string MismatchStr(const std::string &str) { + return std::string("AddressSanitizer: alloc-dealloc-mismatch \\(") + str; } -static string MismatchOrNewDeleteTypeStr(const string &mismatch_str) { +static std::string MismatchOrNewDeleteTypeStr(const std::string &mismatch_str) { return "(" + MismatchStr(mismatch_str) + ")|(AddressSanitizer: new-delete-type-mismatch)"; } diff --git a/gnu/llvm/compiler-rt/lib/asan/tests/asan_test_utils.h b/gnu/llvm/compiler-rt/lib/asan/tests/asan_test_utils.h index 2ab44855a49..e619ca3d596 100644 --- a/gnu/llvm/compiler-rt/lib/asan/tests/asan_test_utils.h +++ b/gnu/llvm/compiler-rt/lib/asan/tests/asan_test_utils.h @@ -73,13 +73,13 @@ NOINLINE void asan_write(T *a) { *a = 0; } -string RightOOBErrorMessage(int oob_distance, bool is_write); -string RightOOBWriteMessage(int oob_distance); -string RightOOBReadMessage(int oob_distance); -string LeftOOBErrorMessage(int oob_distance, bool is_write); -string LeftOOBWriteMessage(int oob_distance); -string LeftOOBReadMessage(int oob_distance); -string LeftOOBAccessMessage(int oob_distance); +std::string RightOOBErrorMessage(int oob_distance, bool is_write); +std::string RightOOBWriteMessage(int oob_distance); +std::string RightOOBReadMessage(int oob_distance); +std::string LeftOOBErrorMessage(int oob_distance, bool is_write); +std::string LeftOOBWriteMessage(int oob_distance); +std::string LeftOOBReadMessage(int oob_distance); +std::string LeftOOBAccessMessage(int oob_distance); char* MallocAndMemsetString(size_t size, char ch); char* MallocAndMemsetString(size_t size); diff --git a/gnu/llvm/compiler-rt/lib/builtins/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/builtins/CMakeLists.txt index feacd21d086..3a66dd9c3fb 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/lib/builtins/CMakeLists.txt @@ -26,6 +26,16 @@ if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) include(AddCompilerRT) endif() +if (COMPILER_RT_STANDALONE_BUILD) + # When compiler-rt is being built standalone, possibly as a cross-compilation + # target, the target may or may not want position independent code. This + # option provides an avenue through which the flag may be controlled when an + # LLVM configuration is not being utilized. + option(COMPILER_RT_BUILTINS_ENABLE_PIC + "Turns on or off -fPIC for the builtin library source" + ON) +endif() + include(builtin-config-ix) # TODO: Need to add a mechanism for logging errors when builtin source files are @@ -36,7 +46,6 @@ set(GENERIC_SOURCES absvti2.c adddf3.c addsf3.c - addtf3.c addvdi3.c addvsi3.c addvti3.c @@ -65,9 +74,7 @@ set(GENERIC_SOURCES divsc3.c divsf3.c divsi3.c - divtc3.c divti3.c - divtf3.c extendsfdf2.c extendhfsf2.c ffsdi2.c @@ -113,7 +120,6 @@ set(GENERIC_SOURCES mulsc3.c mulsf3.c multi3.c - multf3.c mulvdi3.c mulvsi3.c mulvti3.c @@ -133,13 +139,11 @@ set(GENERIC_SOURCES popcountti2.c powidf2.c powisf2.c - powitf2.c subdf3.c subsf3.c subvdi3.c subvsi3.c subvti3.c - subtf3.c trampoline_setup.c truncdfhf2.c truncdfsf2.c @@ -158,7 +162,10 @@ set(GENERIC_SOURCES ) set(GENERIC_TF_SOURCES + addtf3.c comparetf2.c + divtc3.c + divtf3.c extenddftf2.c extendsftf2.c fixtfdi.c @@ -174,6 +181,9 @@ set(GENERIC_TF_SOURCES floatunsitf.c floatuntitf.c multc3.c + multf3.c + powitf2.c + subtf3.c trunctfdf2.c trunctfsf2.c ) @@ -250,7 +260,9 @@ endif () if (NOT MSVC) set(x86_64_SOURCES + ${GENERIC_SOURCES} ${GENERIC_TF_SOURCES} + ${x86_ARCH_SOURCES} x86_64/floatdidf.c x86_64/floatdisf.c x86_64/floatdixf.c @@ -258,7 +270,8 @@ if (NOT MSVC) x86_64/floatundisf.S x86_64/floatundixf.S ) - filter_builtin_sources(x86_64_SOURCES EXCLUDE x86_64_SOURCES "${x86_64_SOURCES};${GENERIC_SOURCES}") + + # Darwin x86_64 Haswell set(x86_64h_SOURCES ${x86_64_SOURCES}) if (WIN32) @@ -270,6 +283,8 @@ if (NOT MSVC) endif() set(i386_SOURCES + ${GENERIC_SOURCES} + ${x86_ARCH_SOURCES} i386/ashldi3.S i386/ashrdi3.S i386/divdi3.S @@ -285,7 +300,6 @@ if (NOT MSVC) i386/udivdi3.S i386/umoddi3.S ) - filter_builtin_sources(i386_SOURCES EXCLUDE i386_SOURCES "${i386_SOURCES};${GENERIC_SOURCES}") if (WIN32) set(i386_SOURCES @@ -299,20 +313,15 @@ else () # MSVC # MSVC's assembler takes Intel syntax, not AT&T syntax. # Also use only MSVC compilable builtin implementations. set(x86_64_SOURCES + ${GENERIC_SOURCES} + ${x86_ARCH_SOURCES} x86_64/floatdidf.c x86_64/floatdisf.c x86_64/floatdixf.c - ${GENERIC_SOURCES} ) - set(x86_64h_SOURCES ${x86_64_SOURCES}) - set(i386_SOURCES ${GENERIC_SOURCES}) + set(i386_SOURCES ${GENERIC_SOURCES} ${x86_ARCH_SOURCES}) endif () # if (NOT MSVC) -set(x86_64h_SOURCES ${x86_64h_SOURCES} ${x86_ARCH_SOURCES}) -set(x86_64_SOURCES ${x86_64_SOURCES} ${x86_ARCH_SOURCES}) -set(i386_SOURCES ${i386_SOURCES} ${x86_ARCH_SOURCES}) -set(i686_SOURCES ${i686_SOURCES} ${x86_ARCH_SOURCES}) - set(arm_SOURCES arm/fp_mode.c arm/bswapdi2.S @@ -346,8 +355,8 @@ set(arm_SOURCES arm/udivmodsi4.S arm/udivsi3.S arm/umodsi3.S + ${GENERIC_SOURCES} ) -filter_builtin_sources(arm_SOURCES EXCLUDE arm_SOURCES "${arm_SOURCES};${GENERIC_SOURCES}") set(thumb1_SOURCES arm/divsi3.S @@ -441,8 +450,8 @@ if(MINGW) arm/aeabi_uldivmod.S arm/chkstk.S mingw_fixfloat.c + ${GENERIC_SOURCES} ) - filter_builtin_sources(arm_SOURCES EXCLUDE arm_SOURCES "${arm_SOURCES};${GENERIC_SOURCES}") elseif(NOT WIN32) # TODO the EABI sources should only be added to EABI targets set(arm_SOURCES @@ -475,6 +484,7 @@ set(armv7_SOURCES ${arm_SOURCES}) set(armv7s_SOURCES ${arm_SOURCES}) set(armv7k_SOURCES ${arm_SOURCES}) set(arm64_SOURCES ${aarch64_SOURCES}) +set(arm64e_SOURCES ${aarch64_SOURCES}) # macho_embedded archs set(armv6m_SOURCES ${thumb1_SOURCES}) @@ -482,7 +492,6 @@ set(armv7m_SOURCES ${arm_SOURCES}) set(armv7em_SOURCES ${arm_SOURCES}) # hexagon arch -set(hexagon_SOURCES ${GENERIC_SOURCES} ${GENERIC_TF_SOURCES}) set(hexagon_SOURCES hexagon/common_entry_exit_abi1.S hexagon/common_entry_exit_abi2.S @@ -495,13 +504,9 @@ set(hexagon_SOURCES hexagon/dfsqrt.S hexagon/divdi3.S hexagon/divsi3.S - hexagon/fabs_opt.S hexagon/fastmath2_dlib_asm.S hexagon/fastmath2_ldlib_asm.S hexagon/fastmath_dlib_asm.S - hexagon/fma_opt.S - hexagon/fmax_opt.S - hexagon/fmin_opt.S hexagon/memcpy_forward_vp4cp4n2.S hexagon/memcpy_likely_aligned.S hexagon/moddi3.S @@ -514,6 +519,8 @@ set(hexagon_SOURCES hexagon/udivsi3.S hexagon/umoddi3.S hexagon/umodsi3.S + ${GENERIC_SOURCES} + ${GENERIC_TF_SOURCES} ) @@ -561,6 +568,12 @@ set(wasm64_SOURCES ${GENERIC_SOURCES} ) +set(ve_SOURCES + ve/grow_stack.S + ve/grow_stack_align.S + ${GENERIC_TF_SOURCES} + ${GENERIC_SOURCES}) + add_custom_target(builtins) set_target_properties(builtins PROPERTIES FOLDER "Compiler-RT Misc") @@ -576,7 +589,9 @@ else () # These flags would normally be added to CMAKE_C_FLAGS by the llvm # cmake step. Add them manually if this is a standalone build. if(COMPILER_RT_STANDALONE_BUILD) - append_list_if(COMPILER_RT_HAS_FPIC_FLAG -fPIC BUILTIN_CFLAGS) + if(COMPILER_RT_BUILTINS_ENABLE_PIC) + append_list_if(COMPILER_RT_HAS_FPIC_FLAG -fPIC BUILTIN_CFLAGS) + endif() append_list_if(COMPILER_RT_HAS_FNO_BUILTIN_FLAG -fno-builtin BUILTIN_CFLAGS) if(NOT ANDROID) append_list_if(COMPILER_RT_HAS_VISIBILITY_HIDDEN_FLAG -fvisibility=hidden BUILTIN_CFLAGS) @@ -603,25 +618,8 @@ else () endif() endif() - # Filter out generic versions of routines that are re-implemented in - # architecture specific manner. This prevents multiple definitions of the - # same symbols, making the symbol selection non-deterministic. - foreach (_file ${${arch}_SOURCES}) - get_filename_component(_file_dir "${_file}" DIRECTORY) - if (NOT "${_file_dir}" STREQUAL "") - # Architecture specific file. We follow the convention that a source - # file that exists in a sub-directory (e.g. `ppc/divtc3.c`) is - # architecture specific and that if a generic implementation exists - # it will be a top-level source file with the same name modulo the - # file extension (e.g. `divtc3.c`). - get_filename_component(_name ${_file} NAME) - string(REPLACE ".S" ".c" _cname "${_name}") - if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/${_cname}") - message(STATUS "For ${arch} builtins preferring ${_file} to ${_cname}") - list(REMOVE_ITEM ${arch}_SOURCES ${_cname}) - endif() - endif () - endforeach () + # Remove a generic C builtin when an arch-specific builtin is specified. + filter_builtin_sources(${arch}_SOURCES ${arch}) # Needed for clear_cache on debug mode, due to r7's usage in inline asm. # Release mode already sets it via -O2/3, Debug mode doesn't. diff --git a/gnu/llvm/compiler-rt/lib/builtins/README.txt b/gnu/llvm/compiler-rt/lib/builtins/README.txt index e603dfa0535..f9e1bc80509 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/README.txt +++ b/gnu/llvm/compiler-rt/lib/builtins/README.txt @@ -20,13 +20,18 @@ Here is the specification for this library: http://gcc.gnu.org/onlinedocs/gccint/Libgcc.html#Libgcc +Please note that the libgcc specification explicitly mentions actual types of +arguments and returned values being expressed with machine modes. +In some cases particular types such as "int", "unsigned", "long long", etc. +may be specified just as examples there. + Here is a synopsis of the contents of this library: -typedef int si_int; -typedef unsigned su_int; +typedef int32_t si_int; +typedef uint32_t su_int; -typedef long long di_int; -typedef unsigned long long du_int; +typedef int64_t di_int; +typedef uint64_t du_int; // Integral bit manipulation @@ -38,24 +43,24 @@ ti_int __ashrti3(ti_int a, si_int b); // a >> b arithmetic (sign fill) di_int __lshrdi3(di_int a, si_int b); // a >> b logical (zero fill) ti_int __lshrti3(ti_int a, si_int b); // a >> b logical (zero fill) -si_int __clzsi2(si_int a); // count leading zeros -si_int __clzdi2(di_int a); // count leading zeros -si_int __clzti2(ti_int a); // count leading zeros -si_int __ctzsi2(si_int a); // count trailing zeros -si_int __ctzdi2(di_int a); // count trailing zeros -si_int __ctzti2(ti_int a); // count trailing zeros +int __clzsi2(si_int a); // count leading zeros +int __clzdi2(di_int a); // count leading zeros +int __clzti2(ti_int a); // count leading zeros +int __ctzsi2(si_int a); // count trailing zeros +int __ctzdi2(di_int a); // count trailing zeros +int __ctzti2(ti_int a); // count trailing zeros -si_int __ffssi2(si_int a); // find least significant 1 bit -si_int __ffsdi2(di_int a); // find least significant 1 bit -si_int __ffsti2(ti_int a); // find least significant 1 bit +int __ffssi2(si_int a); // find least significant 1 bit +int __ffsdi2(di_int a); // find least significant 1 bit +int __ffsti2(ti_int a); // find least significant 1 bit -si_int __paritysi2(si_int a); // bit parity -si_int __paritydi2(di_int a); // bit parity -si_int __parityti2(ti_int a); // bit parity +int __paritysi2(si_int a); // bit parity +int __paritydi2(di_int a); // bit parity +int __parityti2(ti_int a); // bit parity -si_int __popcountsi2(si_int a); // bit population -si_int __popcountdi2(di_int a); // bit population -si_int __popcountti2(ti_int a); // bit population +int __popcountsi2(si_int a); // bit population +int __popcountdi2(di_int a); // bit population +int __popcountti2(ti_int a); // bit population uint32_t __bswapsi2(uint32_t a); // a byteswapped uint64_t __bswapdi2(uint64_t a); // a byteswapped @@ -169,10 +174,10 @@ long double __floatuntixf(tu_int a); // Floating point raised to integer power -float __powisf2( float a, si_int b); // a ^ b -double __powidf2( double a, si_int b); // a ^ b -long double __powixf2(long double a, si_int b); // a ^ b -long double __powitf2(long double a, si_int b); // ppc only, a ^ b +float __powisf2( float a, int b); // a ^ b +double __powidf2( double a, int b); // a ^ b +long double __powixf2(long double a, int b); // a ^ b +long double __powitf2(long double a, int b); // ppc only, a ^ b // Complex arithmetic diff --git a/gnu/llvm/compiler-rt/lib/builtins/absvsi2.c b/gnu/llvm/compiler-rt/lib/builtins/absvsi2.c index 44ada169e7e..9d5de7e8a3f 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/absvsi2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/absvsi2.c @@ -18,7 +18,7 @@ COMPILER_RT_ABI si_int __absvsi2(si_int a) { const int N = (int)(sizeof(si_int) * CHAR_BIT); - if (a == (1 << (N - 1))) + if (a == ((si_int)1 << (N - 1))) compilerrt_abort(); const si_int t = a >> (N - 1); return (a ^ t) - t; diff --git a/gnu/llvm/compiler-rt/lib/builtins/ashldi3.c b/gnu/llvm/compiler-rt/lib/builtins/ashldi3.c index 7c81057a228..04f22228f11 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/ashldi3.c +++ b/gnu/llvm/compiler-rt/lib/builtins/ashldi3.c @@ -16,7 +16,7 @@ // Precondition: 0 <= b < bits_in_dword -COMPILER_RT_ABI di_int __ashldi3(di_int a, si_int b) { +COMPILER_RT_ABI di_int __ashldi3(di_int a, int b) { const int bits_in_word = (int)(sizeof(si_int) * CHAR_BIT); dwords input; dwords result; diff --git a/gnu/llvm/compiler-rt/lib/builtins/ashrdi3.c b/gnu/llvm/compiler-rt/lib/builtins/ashrdi3.c index b9939132205..934a5c47fd6 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/ashrdi3.c +++ b/gnu/llvm/compiler-rt/lib/builtins/ashrdi3.c @@ -16,7 +16,7 @@ // Precondition: 0 <= b < bits_in_dword -COMPILER_RT_ABI di_int __ashrdi3(di_int a, si_int b) { +COMPILER_RT_ABI di_int __ashrdi3(di_int a, int b) { const int bits_in_word = (int)(sizeof(si_int) * CHAR_BIT); dwords input; dwords result; diff --git a/gnu/llvm/compiler-rt/lib/builtins/atomic.c b/gnu/llvm/compiler-rt/lib/builtins/atomic.c index 32b3a0f9ad2..8634a72e77d 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/atomic.c +++ b/gnu/llvm/compiler-rt/lib/builtins/atomic.c @@ -23,6 +23,7 @@ // //===----------------------------------------------------------------------===// +#include <stdbool.h> #include <stdint.h> #include <string.h> @@ -293,8 +294,8 @@ OPTIMISED_CASES #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) \ - int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \ - int success, int failure) { \ + bool __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \ + int success, int failure) { \ if (lockfree) \ return __c11_atomic_compare_exchange_strong( \ (_Atomic(type) *)ptr, expected, desired, success, failure); \ @@ -303,11 +304,11 @@ OPTIMISED_CASES if (*ptr == *expected) { \ *ptr = desired; \ unlock(l); \ - return 1; \ + return true; \ } \ *expected = *ptr; \ unlock(l); \ - return 0; \ + return false; \ } OPTIMISED_CASES #undef OPTIMISED_CASE diff --git a/gnu/llvm/compiler-rt/lib/builtins/clzdi2.c b/gnu/llvm/compiler-rt/lib/builtins/clzdi2.c index a0bacb2ae39..12c17982a5c 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/clzdi2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/clzdi2.c @@ -21,15 +21,15 @@ // ctz instruction, gcc resolves __builtin_clz to __clzdi2 rather than // __clzsi2, leading to infinite recursion. #define __builtin_clz(a) __clzsi2(a) -extern si_int __clzsi2(si_int); +extern int __clzsi2(si_int); #endif // Precondition: a != 0 -COMPILER_RT_ABI si_int __clzdi2(di_int a) { +COMPILER_RT_ABI int __clzdi2(di_int a) { dwords x; x.all = a; const si_int f = -(x.s.high == 0); - return __builtin_clz((x.s.high & ~f) | (x.s.low & f)) + + return clzsi((x.s.high & ~f) | (x.s.low & f)) + (f & ((si_int)(sizeof(si_int) * CHAR_BIT))); } diff --git a/gnu/llvm/compiler-rt/lib/builtins/clzsi2.c b/gnu/llvm/compiler-rt/lib/builtins/clzsi2.c index 3f9f27f4133..d75f56d937b 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/clzsi2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/clzsi2.c @@ -16,7 +16,7 @@ // Precondition: a != 0 -COMPILER_RT_ABI si_int __clzsi2(si_int a) { +COMPILER_RT_ABI int __clzsi2(si_int a) { su_int x = (su_int)a; si_int t = ((x & 0xFFFF0000) == 0) << 4; // if (x is small) t = 16 else 0 x >>= 16 - t; // x = [0 - 0xFFFF] diff --git a/gnu/llvm/compiler-rt/lib/builtins/clzti2.c b/gnu/llvm/compiler-rt/lib/builtins/clzti2.c index 0c787104caa..25d30119f27 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/clzti2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/clzti2.c @@ -18,7 +18,7 @@ // Precondition: a != 0 -COMPILER_RT_ABI si_int __clzti2(ti_int a) { +COMPILER_RT_ABI int __clzti2(ti_int a) { twords x; x.all = a; const di_int f = -(x.s.high == 0); diff --git a/gnu/llvm/compiler-rt/lib/builtins/cpu_model.c b/gnu/llvm/compiler-rt/lib/builtins/cpu_model.c index fb619037d39..468bcc84cbc 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/cpu_model.c +++ b/gnu/llvm/compiler-rt/lib/builtins/cpu_model.c @@ -82,6 +82,8 @@ enum ProcessorSubtypes { INTEL_COREI7_ICELAKE_SERVER, AMDFAM17H_ZNVER2, INTEL_COREI7_CASCADELAKE, + INTEL_COREI7_TIGERLAKE, + INTEL_COREI7_COOPERLAKE, CPU_SUBTYPE_MAX }; @@ -122,7 +124,9 @@ enum ProcessorFeatures { FEATURE_VPCLMULQDQ, FEATURE_AVX512VNNI, FEATURE_AVX512BITALG, - FEATURE_AVX512BF16 + FEATURE_AVX512BF16, + FEATURE_AVX512VP2INTERSECT, + CPU_FEATURE_MAX }; // The check below for i386 was copied from clang's cpuid.h (__get_cpuid_max). @@ -268,13 +272,17 @@ static void detectX86FamilyModel(unsigned EAX, unsigned *Family, } } -static void getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model, - unsigned Brand_id, - unsigned Features, - unsigned Features2, unsigned *Type, - unsigned *Subtype) { - if (Brand_id != 0) - return; +static const char * +getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model, + const unsigned *Features, + unsigned *Type, unsigned *Subtype) { +#define testFeature(F) \ + (Features[F / 32] & (1 << (F % 32))) != 0 + + // We select CPU strings to match the code in Host.cpp, but we don't use them + // in compiler-rt. + const char *CPU = 0; + switch (Family) { case 6: switch (Model) { @@ -285,13 +293,17 @@ static void getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model, // 0Fh. All processors are manufactured using the 65 nm process. case 0x16: // Intel Celeron processor model 16h. All processors are // manufactured using the 65 nm process + CPU = "core2"; + *Type = INTEL_CORE2; + break; case 0x17: // Intel Core 2 Extreme processor, Intel Xeon processor, model // 17h. All processors are manufactured using the 45 nm process. // // 45nm: Penryn , Wolfdale, Yorkfield (XE) case 0x1d: // Intel Xeon processor MP. All processors are manufactured using // the 45 nm process. - *Type = INTEL_CORE2; // "penryn" + CPU = "penryn"; + *Type = INTEL_CORE2; break; case 0x1a: // Intel Core i7 processor and Intel Xeon processor. All // processors are manufactured using the 45 nm process. @@ -299,25 +311,29 @@ static void getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model, // As found in a Summer 2010 model iMac. case 0x1f: case 0x2e: // Nehalem EX - *Type = INTEL_COREI7; // "nehalem" + CPU = "nehalem"; + *Type = INTEL_COREI7; *Subtype = INTEL_COREI7_NEHALEM; break; case 0x25: // Intel Core i7, laptop version. case 0x2c: // Intel Core i7 processor and Intel Xeon processor. All // processors are manufactured using the 32 nm process. case 0x2f: // Westmere EX - *Type = INTEL_COREI7; // "westmere" + CPU = "westmere"; + *Type = INTEL_COREI7; *Subtype = INTEL_COREI7_WESTMERE; break; case 0x2a: // Intel Core i7 processor. All processors are manufactured // using the 32 nm process. case 0x2d: - *Type = INTEL_COREI7; //"sandybridge" + CPU = "sandybridge"; + *Type = INTEL_COREI7; *Subtype = INTEL_COREI7_SANDYBRIDGE; break; case 0x3a: case 0x3e: // Ivy Bridge EP - *Type = INTEL_COREI7; // "ivybridge" + CPU = "ivybridge"; + *Type = INTEL_COREI7; *Subtype = INTEL_COREI7_IVYBRIDGE; break; @@ -326,7 +342,8 @@ static void getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model, case 0x3f: case 0x45: case 0x46: - *Type = INTEL_COREI7; // "haswell" + CPU = "haswell"; + *Type = INTEL_COREI7; *Subtype = INTEL_COREI7_HASWELL; break; @@ -335,7 +352,8 @@ static void getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model, case 0x47: case 0x4f: case 0x56: - *Type = INTEL_COREI7; // "broadwell" + CPU = "broadwell"; + *Type = INTEL_COREI7; *Subtype = INTEL_COREI7_BROADWELL; break; @@ -344,37 +362,49 @@ static void getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model, case 0x5e: // Skylake desktop case 0x8e: // Kaby Lake mobile case 0x9e: // Kaby Lake desktop - *Type = INTEL_COREI7; // "skylake" + case 0xa5: // Comet Lake-H/S + case 0xa6: // Comet Lake-U + CPU = "skylake"; + *Type = INTEL_COREI7; *Subtype = INTEL_COREI7_SKYLAKE; break; // Skylake Xeon: case 0x55: *Type = INTEL_COREI7; - if (Features2 & (1 << (FEATURE_AVX512VNNI - 32))) - *Subtype = INTEL_COREI7_CASCADELAKE; // "cascadelake" - else - *Subtype = INTEL_COREI7_SKYLAKE_AVX512; // "skylake-avx512" + if (testFeature(FEATURE_AVX512BF16)) { + CPU = "cooperlake"; + *Subtype = INTEL_COREI7_COOPERLAKE; + } else if (testFeature(FEATURE_AVX512VNNI)) { + CPU = "cascadelake"; + *Subtype = INTEL_COREI7_CASCADELAKE; + } else { + CPU = "skylake-avx512"; + *Subtype = INTEL_COREI7_SKYLAKE_AVX512; + } break; // Cannonlake: case 0x66: + CPU = "cannonlake"; *Type = INTEL_COREI7; - *Subtype = INTEL_COREI7_CANNONLAKE; // "cannonlake" + *Subtype = INTEL_COREI7_CANNONLAKE; break; // Icelake: case 0x7d: case 0x7e: + CPU = "icelake-client"; *Type = INTEL_COREI7; - *Subtype = INTEL_COREI7_ICELAKE_CLIENT; // "icelake-client" + *Subtype = INTEL_COREI7_ICELAKE_CLIENT; break; // Icelake Xeon: case 0x6a: case 0x6c: + CPU = "icelake-server"; *Type = INTEL_COREI7; - *Subtype = INTEL_COREI7_ICELAKE_SERVER; // "icelake-server" + *Subtype = INTEL_COREI7_ICELAKE_SERVER; break; case 0x1c: // Most 45 nm Intel Atom processors @@ -382,8 +412,9 @@ static void getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model, case 0x27: // 32 nm Atom Medfield case 0x35: // 32 nm Atom Midview case 0x36: // 32 nm Atom Midview + CPU = "bonnell"; *Type = INTEL_BONNELL; - break; // "bonnell" + break; // Atom Silvermont codes from the Intel software optimization guide. case 0x37: @@ -392,26 +423,32 @@ static void getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model, case 0x5a: case 0x5d: case 0x4c: // really airmont + CPU = "silvermont"; *Type = INTEL_SILVERMONT; - break; // "silvermont" + break; // Goldmont: case 0x5c: // Apollo Lake case 0x5f: // Denverton + CPU = "goldmont"; *Type = INTEL_GOLDMONT; break; // "goldmont" case 0x7a: + CPU = "goldmont-plus"; *Type = INTEL_GOLDMONT_PLUS; break; case 0x86: + CPU = "tremont"; *Type = INTEL_TREMONT; break; case 0x57: - *Type = INTEL_KNL; // knl + CPU = "knl"; + *Type = INTEL_KNL; break; case 0x85: - *Type = INTEL_KNM; // knm + CPU = "knm"; + *Type = INTEL_KNM; break; default: // Unknown family 6 CPU. @@ -421,17 +458,22 @@ static void getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model, default: break; // Unknown. } + + return CPU; } -static void getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model, - unsigned Features, unsigned Features2, - unsigned *Type, unsigned *Subtype) { - // FIXME: this poorly matches the generated SubtargetFeatureKV table. There - // appears to be no way to generate the wide variety of AMD-specific targets - // from the information returned from CPUID. +static const char * +getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model, + const unsigned *Features, + unsigned *Type, unsigned *Subtype) { + // We select CPU strings to match the code in Host.cpp, but we don't use them + // in compiler-rt. + const char *CPU = 0; + switch (Family) { case 16: - *Type = AMDFAM10H; // "amdfam10" + CPU = "amdfam10"; + *Type = AMDFAM10H; switch (Model) { case 2: *Subtype = AMDFAM10H_BARCELONA; @@ -445,60 +487,62 @@ static void getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model, } break; case 20: + CPU = "btver1"; *Type = AMD_BTVER1; - break; // "btver1"; + break; case 21: + CPU = "bdver1"; *Type = AMDFAM15H; if (Model >= 0x60 && Model <= 0x7f) { + CPU = "bdver4"; *Subtype = AMDFAM15H_BDVER4; - break; // "bdver4"; 60h-7Fh: Excavator + break; // 60h-7Fh: Excavator } if (Model >= 0x30 && Model <= 0x3f) { + CPU = "bdver3"; *Subtype = AMDFAM15H_BDVER3; - break; // "bdver3"; 30h-3Fh: Steamroller + break; // 30h-3Fh: Steamroller } if ((Model >= 0x10 && Model <= 0x1f) || Model == 0x02) { + CPU = "bdver2"; *Subtype = AMDFAM15H_BDVER2; - break; // "bdver2"; 02h, 10h-1Fh: Piledriver + break; // 02h, 10h-1Fh: Piledriver } if (Model <= 0x0f) { *Subtype = AMDFAM15H_BDVER1; - break; // "bdver1"; 00h-0Fh: Bulldozer + break; // 00h-0Fh: Bulldozer } break; case 22: + CPU = "btver2"; *Type = AMD_BTVER2; - break; // "btver2" + break; case 23: + CPU = "znver1"; *Type = AMDFAM17H; if ((Model >= 0x30 && Model <= 0x3f) || Model == 0x71) { + CPU = "znver2"; *Subtype = AMDFAM17H_ZNVER2; - break; // "znver2"; 30h-3fh, 71h: Zen2 + break; // 30h-3fh, 71h: Zen2 } if (Model <= 0x0f) { *Subtype = AMDFAM17H_ZNVER1; - break; // "znver1"; 00h-0Fh: Zen1 + break; // 00h-0Fh: Zen1 } break; default: - break; // "generic" + break; // Unknown AMD CPU. } + + return CPU; } static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf, - unsigned *FeaturesOut, - unsigned *Features2Out) { - unsigned Features = 0; - unsigned Features2 = 0; + unsigned *Features) { unsigned EAX, EBX; #define setFeature(F) \ - do { \ - if (F < 32) \ - Features |= 1U << (F & 0x1f); \ - else if (F < 64) \ - Features2 |= 1U << ((F - 32) & 0x1f); \ - } while (0) + Features[F / 32] |= 1U << (F % 32) if ((EDX >> 15) & 1) setFeature(FEATURE_CMOV); @@ -590,6 +634,8 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf, setFeature(FEATURE_AVX5124VNNIW); if (HasLeaf7 && ((EDX >> 3) & 1) && HasAVX512Save) setFeature(FEATURE_AVX5124FMAPS); + if (HasLeaf7 && ((EDX >> 8) & 1) && HasAVX512Save) + setFeature(FEATURE_AVX512VP2INTERSECT); bool HasLeaf7Subleaf1 = MaxLeaf >= 0x7 && !getX86CpuIDAndInfoEx(0x7, 0x1, &EAX, &EBX, &ECX, &EDX); @@ -607,9 +653,6 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf, setFeature(FEATURE_XOP); if (HasExtLeaf1 && ((ECX >> 16) & 1)) setFeature(FEATURE_FMA4); - - *FeaturesOut = Features; - *Features2Out = Features2; #undef setFeature } @@ -641,7 +684,7 @@ struct __processor_model { #ifndef _WIN32 __attribute__((visibility("hidden"))) #endif -unsigned int __cpu_features2; +unsigned int __cpu_features2 = 0; // A constructor function that is sets __cpu_model and __cpu_features2 with // the right values. This needs to run only once. This constructor is @@ -653,40 +696,38 @@ int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) { unsigned EAX, EBX, ECX, EDX; unsigned MaxLeaf = 5; unsigned Vendor; - unsigned Model, Family, Brand_id; - unsigned Features = 0; - unsigned Features2 = 0; + unsigned Model, Family; + unsigned Features[(CPU_FEATURE_MAX + 31) / 32] = {0}; // This function needs to run just once. if (__cpu_model.__cpu_vendor) return 0; - if (!isCpuIdSupported()) - return -1; - - // Assume cpuid insn present. Run in level 0 to get vendor id. - if (getX86CpuIDAndInfo(0, &MaxLeaf, &Vendor, &ECX, &EDX) || MaxLeaf < 1) { + if (!isCpuIdSupported() || + getX86CpuIDAndInfo(0, &MaxLeaf, &Vendor, &ECX, &EDX) || MaxLeaf < 1) { __cpu_model.__cpu_vendor = VENDOR_OTHER; return -1; } + getX86CpuIDAndInfo(1, &EAX, &EBX, &ECX, &EDX); detectX86FamilyModel(EAX, &Family, &Model); - Brand_id = EBX & 0xff; // Find available features. - getAvailableFeatures(ECX, EDX, MaxLeaf, &Features, &Features2); - __cpu_model.__cpu_features[0] = Features; - __cpu_features2 = Features2; + getAvailableFeatures(ECX, EDX, MaxLeaf, &Features[0]); + + assert((sizeof(Features)/sizeof(Features[0])) == 2); + __cpu_model.__cpu_features[0] = Features[0]; + __cpu_features2 = Features[1]; if (Vendor == SIG_INTEL) { // Get CPU type. - getIntelProcessorTypeAndSubtype(Family, Model, Brand_id, Features, - Features2, &(__cpu_model.__cpu_type), + getIntelProcessorTypeAndSubtype(Family, Model, &Features[0], + &(__cpu_model.__cpu_type), &(__cpu_model.__cpu_subtype)); __cpu_model.__cpu_vendor = VENDOR_INTEL; } else if (Vendor == SIG_AMD) { // Get CPU type. - getAMDProcessorTypeAndSubtype(Family, Model, Features, Features2, + getAMDProcessorTypeAndSubtype(Family, Model, &Features[0], &(__cpu_model.__cpu_type), &(__cpu_model.__cpu_subtype)); __cpu_model.__cpu_vendor = VENDOR_AMD; diff --git a/gnu/llvm/compiler-rt/lib/builtins/ctzdi2.c b/gnu/llvm/compiler-rt/lib/builtins/ctzdi2.c index 9384aa6055a..26c908d876a 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/ctzdi2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/ctzdi2.c @@ -21,15 +21,15 @@ // ctz instruction, gcc resolves __builtin_ctz to __ctzdi2 rather than // __ctzsi2, leading to infinite recursion. #define __builtin_ctz(a) __ctzsi2(a) -extern si_int __ctzsi2(si_int); +extern int __ctzsi2(si_int); #endif // Precondition: a != 0 -COMPILER_RT_ABI si_int __ctzdi2(di_int a) { +COMPILER_RT_ABI int __ctzdi2(di_int a) { dwords x; x.all = a; const si_int f = -(x.s.low == 0); - return __builtin_ctz((x.s.high & f) | (x.s.low & ~f)) + + return ctzsi((x.s.high & f) | (x.s.low & ~f)) + (f & ((si_int)(sizeof(si_int) * CHAR_BIT))); } diff --git a/gnu/llvm/compiler-rt/lib/builtins/ctzsi2.c b/gnu/llvm/compiler-rt/lib/builtins/ctzsi2.c index 09c6863b74e..ed95c605793 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/ctzsi2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/ctzsi2.c @@ -16,7 +16,7 @@ // Precondition: a != 0 -COMPILER_RT_ABI si_int __ctzsi2(si_int a) { +COMPILER_RT_ABI int __ctzsi2(si_int a) { su_int x = (su_int)a; si_int t = ((x & 0x0000FFFF) == 0) << 4; // if (x has no small bits) t = 16 else 0 diff --git a/gnu/llvm/compiler-rt/lib/builtins/ctzti2.c b/gnu/llvm/compiler-rt/lib/builtins/ctzti2.c index 2a1312c8437..fb136d0de1c 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/ctzti2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/ctzti2.c @@ -18,7 +18,7 @@ // Precondition: a != 0 -COMPILER_RT_ABI si_int __ctzti2(ti_int a) { +COMPILER_RT_ABI int __ctzti2(ti_int a) { twords x; x.all = a; const di_int f = -(x.s.low == 0); diff --git a/gnu/llvm/compiler-rt/lib/builtins/ffsdi2.c b/gnu/llvm/compiler-rt/lib/builtins/ffsdi2.c index 9c1a2426095..beae5530430 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/ffsdi2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/ffsdi2.c @@ -15,13 +15,13 @@ // Returns: the index of the least significant 1-bit in a, or // the value zero if a is zero. The least significant bit is index one. -COMPILER_RT_ABI si_int __ffsdi2(di_int a) { +COMPILER_RT_ABI int __ffsdi2(di_int a) { dwords x; x.all = a; if (x.s.low == 0) { if (x.s.high == 0) return 0; - return __builtin_ctz(x.s.high) + (1 + sizeof(si_int) * CHAR_BIT); + return ctzsi(x.s.high) + (1 + sizeof(si_int) * CHAR_BIT); } - return __builtin_ctz(x.s.low) + 1; + return ctzsi(x.s.low) + 1; } diff --git a/gnu/llvm/compiler-rt/lib/builtins/ffssi2.c b/gnu/llvm/compiler-rt/lib/builtins/ffssi2.c index cba1f72fdc6..ddb52927f8d 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/ffssi2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/ffssi2.c @@ -15,9 +15,9 @@ // Returns: the index of the least significant 1-bit in a, or // the value zero if a is zero. The least significant bit is index one. -COMPILER_RT_ABI si_int __ffssi2(si_int a) { +COMPILER_RT_ABI int __ffssi2(si_int a) { if (a == 0) { return 0; } - return __builtin_ctz(a) + 1; + return ctzsi(a) + 1; } diff --git a/gnu/llvm/compiler-rt/lib/builtins/ffsti2.c b/gnu/llvm/compiler-rt/lib/builtins/ffsti2.c index a2d7ce08ada..a2177d148a0 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/ffsti2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/ffsti2.c @@ -17,7 +17,7 @@ // Returns: the index of the least significant 1-bit in a, or // the value zero if a is zero. The least significant bit is index one. -COMPILER_RT_ABI si_int __ffsti2(ti_int a) { +COMPILER_RT_ABI int __ffsti2(ti_int a) { twords x; x.all = a; if (x.s.low == 0) { diff --git a/gnu/llvm/compiler-rt/lib/builtins/floatdidf.c b/gnu/llvm/compiler-rt/lib/builtins/floatdidf.c index 8f887314b9e..b2d8f2b44b6 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/floatdidf.c +++ b/gnu/llvm/compiler-rt/lib/builtins/floatdidf.c @@ -87,7 +87,7 @@ COMPILER_RT_ABI double __floatdidf(di_int a) { } double_bits fb; fb.u.s.high = ((su_int)s & 0x80000000) | // sign - ((e + 1023) << 20) | // exponent + ((su_int)(e + 1023) << 20) | // exponent ((su_int)(a >> 32) & 0x000FFFFF); // mantissa-high fb.u.s.low = (su_int)a; // mantissa-low return fb.f; diff --git a/gnu/llvm/compiler-rt/lib/builtins/floatdisf.c b/gnu/llvm/compiler-rt/lib/builtins/floatdisf.c index cd9e0a3b78a..faaa1bcb3c8 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/floatdisf.c +++ b/gnu/llvm/compiler-rt/lib/builtins/floatdisf.c @@ -26,7 +26,7 @@ COMPILER_RT_ABI float __floatdisf(di_int a) { const di_int s = a >> (N - 1); a = (a ^ s) - s; int sd = N - __builtin_clzll(a); // number of significant digits - int e = sd - 1; // exponent + si_int e = sd - 1; // exponent if (sd > FLT_MANT_DIG) { // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR diff --git a/gnu/llvm/compiler-rt/lib/builtins/floatsidf.c b/gnu/llvm/compiler-rt/lib/builtins/floatsidf.c index 2c66167d794..28cf32f6388 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/floatsidf.c +++ b/gnu/llvm/compiler-rt/lib/builtins/floatsidf.c @@ -17,7 +17,7 @@ #include "int_lib.h" -COMPILER_RT_ABI fp_t __floatsidf(int a) { +COMPILER_RT_ABI fp_t __floatsidf(si_int a) { const int aWidth = sizeof a * CHAR_BIT; @@ -33,14 +33,14 @@ COMPILER_RT_ABI fp_t __floatsidf(int a) { } // Exponent of (fp_t)a is the width of abs(a). - const int exponent = (aWidth - 1) - __builtin_clz(a); + const int exponent = (aWidth - 1) - clzsi(a); rep_t result; // Shift a into the significand field and clear the implicit bit. Extra // cast to unsigned int is necessary to get the correct behavior for // the input INT_MIN. const int shift = significandBits - exponent; - result = (rep_t)(unsigned int)a << shift ^ implicitBit; + result = (rep_t)(su_int)a << shift ^ implicitBit; // Insert the exponent result += (rep_t)(exponent + exponentBias) << significandBits; @@ -50,7 +50,7 @@ COMPILER_RT_ABI fp_t __floatsidf(int a) { #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) -AEABI_RTABI fp_t __aeabi_i2d(int a) { return __floatsidf(a); } +AEABI_RTABI fp_t __aeabi_i2d(si_int a) { return __floatsidf(a); } #else COMPILER_RT_ALIAS(__floatsidf, __aeabi_i2d) #endif diff --git a/gnu/llvm/compiler-rt/lib/builtins/floatundidf.c b/gnu/llvm/compiler-rt/lib/builtins/floatundidf.c index e7c6aae5ce3..4c445b11808 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/floatundidf.c +++ b/gnu/llvm/compiler-rt/lib/builtins/floatundidf.c @@ -90,7 +90,7 @@ COMPILER_RT_ABI double __floatundidf(du_int a) { // a is now rounded to DBL_MANT_DIG bits } double_bits fb; - fb.u.s.high = ((e + 1023) << 20) | // exponent + fb.u.s.high = ((su_int)(e + 1023) << 20) | // exponent ((su_int)(a >> 32) & 0x000FFFFF); // mantissa-high fb.u.s.low = (su_int)a; // mantissa-low return fb.f; diff --git a/gnu/llvm/compiler-rt/lib/builtins/floatundisf.c b/gnu/llvm/compiler-rt/lib/builtins/floatundisf.c index 87841b761de..00d61b0c631 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/floatundisf.c +++ b/gnu/llvm/compiler-rt/lib/builtins/floatundisf.c @@ -24,7 +24,7 @@ COMPILER_RT_ABI float __floatundisf(du_int a) { return 0.0F; const unsigned N = sizeof(du_int) * CHAR_BIT; int sd = N - __builtin_clzll(a); // number of significant digits - int e = sd - 1; // 8 exponent + si_int e = sd - 1; // 8 exponent if (sd > FLT_MANT_DIG) { // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR diff --git a/gnu/llvm/compiler-rt/lib/builtins/floatunsidf.c b/gnu/llvm/compiler-rt/lib/builtins/floatunsidf.c index 2c01c304143..9b3e5fea0e4 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/floatunsidf.c +++ b/gnu/llvm/compiler-rt/lib/builtins/floatunsidf.c @@ -17,7 +17,7 @@ #include "int_lib.h" -COMPILER_RT_ABI fp_t __floatunsidf(unsigned int a) { +COMPILER_RT_ABI fp_t __floatunsidf(su_int a) { const int aWidth = sizeof a * CHAR_BIT; @@ -26,7 +26,7 @@ COMPILER_RT_ABI fp_t __floatunsidf(unsigned int a) { return fromRep(0); // Exponent of (fp_t)a is the width of abs(a). - const int exponent = (aWidth - 1) - __builtin_clz(a); + const int exponent = (aWidth - 1) - clzsi(a); rep_t result; // Shift a into the significand field and clear the implicit bit. @@ -40,7 +40,7 @@ COMPILER_RT_ABI fp_t __floatunsidf(unsigned int a) { #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) -AEABI_RTABI fp_t __aeabi_ui2d(unsigned int a) { return __floatunsidf(a); } +AEABI_RTABI fp_t __aeabi_ui2d(su_int a) { return __floatunsidf(a); } #else COMPILER_RT_ALIAS(__floatunsidf, __aeabi_ui2d) #endif diff --git a/gnu/llvm/compiler-rt/lib/builtins/fp_extend.h b/gnu/llvm/compiler-rt/lib/builtins/fp_extend.h index d2083c42672..fb512672e35 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/fp_extend.h +++ b/gnu/llvm/compiler-rt/lib/builtins/fp_extend.h @@ -21,7 +21,7 @@ typedef float src_t; typedef uint32_t src_rep_t; #define SRC_REP_C UINT32_C static const int srcSigBits = 23; -#define src_rep_t_clz __builtin_clz +#define src_rep_t_clz clzsi #elif defined SRC_DOUBLE typedef double src_t; diff --git a/gnu/llvm/compiler-rt/lib/builtins/fp_lib.h b/gnu/llvm/compiler-rt/lib/builtins/fp_lib.h index e2a906681c4..bd1f180f499 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/fp_lib.h +++ b/gnu/llvm/compiler-rt/lib/builtins/fp_lib.h @@ -46,7 +46,7 @@ typedef float fp_t; #define REP_C UINT32_C #define significandBits 23 -static __inline int rep_clz(rep_t a) { return __builtin_clz(a); } +static __inline int rep_clz(rep_t a) { return clzsi(a); } // 32x32 --> 64 bit multiply static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) { @@ -69,9 +69,9 @@ static __inline int rep_clz(rep_t a) { return __builtin_clzl(a); #else if (a & REP_C(0xffffffff00000000)) - return __builtin_clz(a >> 32); + return clzsi(a >> 32); else - return 32 + __builtin_clz(a & REP_C(0xffffffff)); + return 32 + clzsi(a & REP_C(0xffffffff)); #endif } diff --git a/gnu/llvm/compiler-rt/lib/builtins/fp_mode.h b/gnu/llvm/compiler-rt/lib/builtins/fp_mode.h index 51bec0431a4..4ba682c384f 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/fp_mode.h +++ b/gnu/llvm/compiler-rt/lib/builtins/fp_mode.h @@ -23,7 +23,7 @@ typedef enum { FE_TOWARDZERO } FE_ROUND_MODE; -FE_ROUND_MODE __fe_getround(); -int __fe_raise_inexact(); +FE_ROUND_MODE __fe_getround(void); +int __fe_raise_inexact(void); #endif // FP_MODE_H diff --git a/gnu/llvm/compiler-rt/lib/builtins/hexagon/dffma.S b/gnu/llvm/compiler-rt/lib/builtins/hexagon/dffma.S index c201d3d8be5..843e88b3cab 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/hexagon/dffma.S +++ b/gnu/llvm/compiler-rt/lib/builtins/hexagon/dffma.S @@ -104,13 +104,11 @@ .type __hexagon_fmadf4,@function .global __hexagon_fmadf5 .type __hexagon_fmadf5,@function - .global fma - .type fma,@function Q6_ALIAS(fmadf5) .p2align 5 __hexagon_fmadf4: __hexagon_fmadf5: -fma: +.Lfma_begin: { P_TMP = dfclass(A,#2) P_TMP = dfclass(B,#2) @@ -561,7 +559,7 @@ fma: B = insert(BTMP,#63,#0) AH -= asl(TMP,#HI_MANTBITS) } - jump fma + jump .Lfma_begin .Lfma_ab_tiny: ATMP = combine(##0x00100000,#0) @@ -569,7 +567,7 @@ fma: A = insert(ATMP,#63,#0) B = insert(ATMP,#63,#0) } - jump fma + jump .Lfma_begin .Lab_inf: { diff --git a/gnu/llvm/compiler-rt/lib/builtins/i386/floatdidf.S b/gnu/llvm/compiler-rt/lib/builtins/i386/floatdidf.S index ab7422c312d..d588e770364 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/i386/floatdidf.S +++ b/gnu/llvm/compiler-rt/lib/builtins/i386/floatdidf.S @@ -4,7 +4,7 @@ #include "../assembly.h" -// double __floatundidf(du_int a); +// double __floatdidf(du_int a); #ifdef __i386__ diff --git a/gnu/llvm/compiler-rt/lib/builtins/i386/floatdixf.S b/gnu/llvm/compiler-rt/lib/builtins/i386/floatdixf.S index df70f5f9e6e..19dd0835a9c 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/i386/floatdixf.S +++ b/gnu/llvm/compiler-rt/lib/builtins/i386/floatdixf.S @@ -4,7 +4,7 @@ #include "../assembly.h" -// float __floatdixf(di_int a); +// long double __floatdixf(di_int a); #ifdef __i386__ diff --git a/gnu/llvm/compiler-rt/lib/builtins/int_div_impl.inc b/gnu/llvm/compiler-rt/lib/builtins/int_div_impl.inc new file mode 100644 index 00000000000..de037388907 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/builtins/int_div_impl.inc @@ -0,0 +1,70 @@ +//===-- int_div_impl.inc - Integer division ---------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Helpers used by __udivsi3, __umodsi3, __udivdi3, and __umodsi3. +// +//===----------------------------------------------------------------------===// + +#define clz(a) (sizeof(a) == sizeof(unsigned long long) ? __builtin_clzll(a) : clzsi(a)) + +// Adapted from Figure 3-40 of The PowerPC Compiler Writer's Guide +static __inline fixuint_t __udivXi3(fixuint_t n, fixuint_t d) { + const unsigned N = sizeof(fixuint_t) * CHAR_BIT; + // d == 0 cases are unspecified. + unsigned sr = (d ? clz(d) : N) - (n ? clz(n) : N); + // 0 <= sr <= N - 1 or sr is very large. + if (sr > N - 1) // n < d + return 0; + if (sr == N - 1) // d == 1 + return n; + ++sr; + // 1 <= sr <= N - 1. Shifts do not trigger UB. + fixuint_t r = n >> sr; + n <<= N - sr; + fixuint_t carry = 0; + for (; sr > 0; --sr) { + r = (r << 1) | (n >> (N - 1)); + n = (n << 1) | carry; + // Branch-less version of: + // carry = 0; + // if (r >= d) r -= d, carry = 1; + const fixint_t s = (fixint_t)(d - r - 1) >> (N - 1); + carry = s & 1; + r -= d & s; + } + n = (n << 1) | carry; + return n; +} + +// Mostly identical to __udivXi3 but the return values are different. +static __inline fixuint_t __umodXi3(fixuint_t n, fixuint_t d) { + const unsigned N = sizeof(fixuint_t) * CHAR_BIT; + // d == 0 cases are unspecified. + unsigned sr = (d ? clz(d) : N) - (n ? clz(n) : N); + // 0 <= sr <= N - 1 or sr is very large. + if (sr > N - 1) // n < d + return n; + if (sr == N - 1) // d == 1 + return 0; + ++sr; + // 1 <= sr <= N - 1. Shifts do not trigger UB. + fixuint_t r = n >> sr; + n <<= N - sr; + fixuint_t carry = 0; + for (; sr > 0; --sr) { + r = (r << 1) | (n >> (N - 1)); + n = (n << 1) | carry; + // Branch-less version of: + // carry = 0; + // if (r >= d) r -= d, carry = 1; + const fixint_t s = (fixint_t)(d - r - 1) >> (N - 1); + carry = s & 1; + r -= d & s; + } + return r; +} diff --git a/gnu/llvm/compiler-rt/lib/builtins/int_lib.h b/gnu/llvm/compiler-rt/lib/builtins/int_lib.h index 3092f68c084..991c4a99ea6 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/int_lib.h +++ b/gnu/llvm/compiler-rt/lib/builtins/int_lib.h @@ -48,12 +48,20 @@ #define XSTR(a) STR(a) #define SYMBOL_NAME(name) XSTR(__USER_LABEL_PREFIX__) #name -#if defined(__ELF__) || defined(__MINGW32__) || defined(__wasm__) +#if defined(__ELF__) || defined(__MINGW32__) || defined(__wasm__) || \ + defined(_AIX) #define COMPILER_RT_ALIAS(name, aliasname) \ COMPILER_RT_ABI __typeof(name) aliasname __attribute__((__alias__(#name))); #elif defined(__APPLE__) +#if defined(VISIBILITY_HIDDEN) +#define COMPILER_RT_ALIAS_VISIBILITY(name) \ + __asm__(".private_extern " SYMBOL_NAME(name)); +#else +#define COMPILER_RT_ALIAS_VISIBILITY(name) +#endif #define COMPILER_RT_ALIAS(name, aliasname) \ __asm__(".globl " SYMBOL_NAME(aliasname)); \ + COMPILER_RT_ALIAS_VISIBILITY(aliasname) \ __asm__(SYMBOL_NAME(aliasname) " = " SYMBOL_NAME(name)); \ COMPILER_RT_ABI __typeof(name) aliasname; #elif defined(_WIN32) @@ -84,8 +92,8 @@ // Include internal utility function declarations. #include "int_util.h" -COMPILER_RT_ABI si_int __paritysi2(si_int a); -COMPILER_RT_ABI si_int __paritydi2(di_int a); +COMPILER_RT_ABI int __paritysi2(si_int a); +COMPILER_RT_ABI int __paritydi2(di_int a); COMPILER_RT_ABI di_int __divdi3(di_int a, di_int b); COMPILER_RT_ABI si_int __divsi3(si_int a, si_int b); @@ -94,7 +102,7 @@ COMPILER_RT_ABI su_int __udivsi3(su_int n, su_int d); COMPILER_RT_ABI su_int __udivmodsi4(su_int a, su_int b, su_int *rem); COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int *rem); #ifdef CRT_HAS_128BIT -COMPILER_RT_ABI si_int __clzti2(ti_int a); +COMPILER_RT_ABI int __clzti2(ti_int a); COMPILER_RT_ABI tu_int __udivmodti4(tu_int a, tu_int b, tu_int *rem); #endif @@ -102,14 +110,14 @@ COMPILER_RT_ABI tu_int __udivmodti4(tu_int a, tu_int b, tu_int *rem); #if defined(_MSC_VER) && !defined(__clang__) #include <intrin.h> -uint32_t __inline __builtin_ctz(uint32_t value) { +int __inline __builtin_ctz(uint32_t value) { unsigned long trailing_zero = 0; if (_BitScanForward(&trailing_zero, value)) return trailing_zero; return 32; } -uint32_t __inline __builtin_clz(uint32_t value) { +int __inline __builtin_clz(uint32_t value) { unsigned long leading_zero = 0; if (_BitScanReverse(&leading_zero, value)) return 31 - leading_zero; @@ -117,14 +125,14 @@ uint32_t __inline __builtin_clz(uint32_t value) { } #if defined(_M_ARM) || defined(_M_X64) -uint32_t __inline __builtin_clzll(uint64_t value) { +int __inline __builtin_clzll(uint64_t value) { unsigned long leading_zero = 0; if (_BitScanReverse64(&leading_zero, value)) return 63 - leading_zero; return 64; } #else -uint32_t __inline __builtin_clzll(uint64_t value) { +int __inline __builtin_clzll(uint64_t value) { if (value == 0) return 64; uint32_t msh = (uint32_t)(value >> 32); diff --git a/gnu/llvm/compiler-rt/lib/builtins/int_types.h b/gnu/llvm/compiler-rt/lib/builtins/int_types.h index f89220d5435..705355a4840 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/int_types.h +++ b/gnu/llvm/compiler-rt/lib/builtins/int_types.h @@ -22,11 +22,20 @@ #ifdef si_int #undef si_int #endif -typedef int si_int; -typedef unsigned su_int; +typedef int32_t si_int; +typedef uint32_t su_int; +#if UINT_MAX == 0xFFFFFFFF +#define clzsi __builtin_clz +#define ctzsi __builtin_ctz +#elif ULONG_MAX == 0xFFFFFFFF +#define clzsi __builtin_clzl +#define ctzsi __builtin_ctzl +#else +#error could not determine appropriate clzsi macro for this system +#endif -typedef long long di_int; -typedef unsigned long long du_int; +typedef int64_t di_int; +typedef uint64_t du_int; typedef union { di_int all; @@ -135,9 +144,12 @@ typedef struct { // Check if the target supports 80 bit extended precision long doubles. // Notably, on x86 Windows, MSVC only provides a 64-bit long double, but GCC // still makes it 80 bits. Clang will match whatever compiler it is trying to -// be compatible with. -#if ((defined(__i386__) || defined(__x86_64__)) && !defined(_MSC_VER)) || \ - defined(__m68k__) || defined(__ia64__) +// be compatible with. On 32-bit x86 Android, long double is 64 bits, while on +// x86_64 Android, long double is 128 bits. +#if (defined(__i386__) || defined(__x86_64__)) && \ + !(defined(_MSC_VER) || defined(__ANDROID__)) +#define HAS_80_BIT_LONG_DOUBLE 1 +#elif defined(__m68k__) || defined(__ia64__) #define HAS_80_BIT_LONG_DOUBLE 1 #else #define HAS_80_BIT_LONG_DOUBLE 0 diff --git a/gnu/llvm/compiler-rt/lib/builtins/lshrdi3.c b/gnu/llvm/compiler-rt/lib/builtins/lshrdi3.c index 97e08e1e9ba..6072152583a 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/lshrdi3.c +++ b/gnu/llvm/compiler-rt/lib/builtins/lshrdi3.c @@ -16,7 +16,7 @@ // Precondition: 0 <= b < bits_in_dword -COMPILER_RT_ABI di_int __lshrdi3(di_int a, si_int b) { +COMPILER_RT_ABI di_int __lshrdi3(di_int a, int b) { const int bits_in_word = (int)(sizeof(si_int) * CHAR_BIT); udwords input; udwords result; diff --git a/gnu/llvm/compiler-rt/lib/builtins/paritydi2.c b/gnu/llvm/compiler-rt/lib/builtins/paritydi2.c index dd9d45e63ea..58e85f89e04 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/paritydi2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/paritydi2.c @@ -14,7 +14,7 @@ // Returns: 1 if number of bits is odd else returns 0 -COMPILER_RT_ABI si_int __paritydi2(di_int a) { +COMPILER_RT_ABI int __paritydi2(di_int a) { dwords x; x.all = a; return __paritysi2(x.s.high ^ x.s.low); diff --git a/gnu/llvm/compiler-rt/lib/builtins/paritysi2.c b/gnu/llvm/compiler-rt/lib/builtins/paritysi2.c index 3efa961f2f8..a4b84e08063 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/paritysi2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/paritysi2.c @@ -14,7 +14,7 @@ // Returns: 1 if number of bits is odd else returns 0 -COMPILER_RT_ABI si_int __paritysi2(si_int a) { +COMPILER_RT_ABI int __paritysi2(si_int a) { su_int x = (su_int)a; x ^= x >> 16; x ^= x >> 8; diff --git a/gnu/llvm/compiler-rt/lib/builtins/parityti2.c b/gnu/llvm/compiler-rt/lib/builtins/parityti2.c index f3942ba8378..79e920d8a02 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/parityti2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/parityti2.c @@ -16,7 +16,7 @@ // Returns: 1 if number of bits is odd else returns 0 -COMPILER_RT_ABI si_int __parityti2(ti_int a) { +COMPILER_RT_ABI int __parityti2(ti_int a) { twords x; x.all = a; return __paritydi2(x.s.high ^ x.s.low); diff --git a/gnu/llvm/compiler-rt/lib/builtins/popcountdi2.c b/gnu/llvm/compiler-rt/lib/builtins/popcountdi2.c index 9bbc39c6608..20dd0b0239e 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/popcountdi2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/popcountdi2.c @@ -14,7 +14,7 @@ // Returns: count of 1 bits -COMPILER_RT_ABI si_int __popcountdi2(di_int a) { +COMPILER_RT_ABI int __popcountdi2(di_int a) { du_int x2 = (du_int)a; x2 = x2 - ((x2 >> 1) & 0x5555555555555555uLL); // Every 2 bits holds the sum of every pair of bits (32) diff --git a/gnu/llvm/compiler-rt/lib/builtins/popcountsi2.c b/gnu/llvm/compiler-rt/lib/builtins/popcountsi2.c index 75e592a778d..4d346c45d9c 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/popcountsi2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/popcountsi2.c @@ -14,7 +14,7 @@ // Returns: count of 1 bits -COMPILER_RT_ABI si_int __popcountsi2(si_int a) { +COMPILER_RT_ABI int __popcountsi2(si_int a) { su_int x = (su_int)a; x = x - ((x >> 1) & 0x55555555); // Every 2 bits holds the sum of every pair of bits diff --git a/gnu/llvm/compiler-rt/lib/builtins/popcountti2.c b/gnu/llvm/compiler-rt/lib/builtins/popcountti2.c index 853fd722309..79cbb2fb34c 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/popcountti2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/popcountti2.c @@ -17,7 +17,7 @@ // Returns: count of 1 bits -COMPILER_RT_ABI si_int __popcountti2(ti_int a) { +COMPILER_RT_ABI int __popcountti2(ti_int a) { tu_int x3 = (tu_int)a; x3 = x3 - ((x3 >> 1) & (((tu_int)0x5555555555555555uLL << 64) | 0x5555555555555555uLL)); diff --git a/gnu/llvm/compiler-rt/lib/builtins/powidf2.c b/gnu/llvm/compiler-rt/lib/builtins/powidf2.c index 9697588484e..81058af5082 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/powidf2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/powidf2.c @@ -14,7 +14,7 @@ // Returns: a ^ b -COMPILER_RT_ABI double __powidf2(double a, si_int b) { +COMPILER_RT_ABI double __powidf2(double a, int b) { const int recip = b < 0; double r = 1; while (1) { diff --git a/gnu/llvm/compiler-rt/lib/builtins/powisf2.c b/gnu/llvm/compiler-rt/lib/builtins/powisf2.c index 46940234882..d0ab26167bb 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/powisf2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/powisf2.c @@ -14,7 +14,7 @@ // Returns: a ^ b -COMPILER_RT_ABI float __powisf2(float a, si_int b) { +COMPILER_RT_ABI float __powisf2(float a, int b) { const int recip = b < 0; float r = 1; while (1) { diff --git a/gnu/llvm/compiler-rt/lib/builtins/powitf2.c b/gnu/llvm/compiler-rt/lib/builtins/powitf2.c index fcbdb4c2ee2..8e639a03a3c 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/powitf2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/powitf2.c @@ -10,13 +10,14 @@ // //===----------------------------------------------------------------------===// -#include "int_lib.h" +#define QUAD_PRECISION +#include "fp_lib.h" -#if _ARCH_PPC +#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT) // Returns: a ^ b -COMPILER_RT_ABI long double __powitf2(long double a, si_int b) { +COMPILER_RT_ABI long double __powitf2(long double a, int b) { const int recip = b < 0; long double r = 1; while (1) { diff --git a/gnu/llvm/compiler-rt/lib/builtins/powixf2.c b/gnu/llvm/compiler-rt/lib/builtins/powixf2.c index b7b52095afa..3edfe9fd7af 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/powixf2.c +++ b/gnu/llvm/compiler-rt/lib/builtins/powixf2.c @@ -16,7 +16,7 @@ // Returns: a ^ b -COMPILER_RT_ABI long double __powixf2(long double a, si_int b) { +COMPILER_RT_ABI long double __powixf2(long double a, int b) { const int recip = b < 0; long double r = 1; while (1) { diff --git a/gnu/llvm/compiler-rt/lib/builtins/udivdi3.c b/gnu/llvm/compiler-rt/lib/builtins/udivdi3.c index a23139ec947..74319cbe71c 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/udivdi3.c +++ b/gnu/llvm/compiler-rt/lib/builtins/udivdi3.c @@ -12,8 +12,12 @@ #include "int_lib.h" +typedef du_int fixuint_t; +typedef di_int fixint_t; +#include "int_div_impl.inc" + // Returns: a / b COMPILER_RT_ABI du_int __udivdi3(du_int a, du_int b) { - return __udivmoddi4(a, b, 0); + return __udivXi3(a, b); } diff --git a/gnu/llvm/compiler-rt/lib/builtins/udivmoddi4.c b/gnu/llvm/compiler-rt/lib/builtins/udivmoddi4.c index 5b297c32d79..10b41df28f8 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/udivmoddi4.c +++ b/gnu/llvm/compiler-rt/lib/builtins/udivmoddi4.c @@ -87,7 +87,7 @@ COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int *rem) { // K K // --- // K 0 - sr = __builtin_clz(d.s.high) - __builtin_clz(n.s.high); + sr = clzsi(d.s.high) - clzsi(n.s.high); // 0 <= sr <= n_uword_bits - 2 or sr large if (sr > n_uword_bits - 2) { if (rem) @@ -120,7 +120,7 @@ COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int *rem) { // K X // --- // 0 K - sr = 1 + n_uword_bits + __builtin_clz(d.s.low) - __builtin_clz(n.s.high); + sr = 1 + n_uword_bits + clzsi(d.s.low) - clzsi(n.s.high); // 2 <= sr <= n_udword_bits - 1 // q.all = n.all << (n_udword_bits - sr); // r.all = n.all >> sr; @@ -145,7 +145,7 @@ COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int *rem) { // K X // --- // K K - sr = __builtin_clz(d.s.high) - __builtin_clz(n.s.high); + sr = clzsi(d.s.high) - clzsi(n.s.high); // 0 <= sr <= n_uword_bits - 1 or sr large if (sr > n_uword_bits - 1) { if (rem) diff --git a/gnu/llvm/compiler-rt/lib/builtins/udivmodti4.c b/gnu/llvm/compiler-rt/lib/builtins/udivmodti4.c index dd14a8b579c..55def37c9e1 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/udivmodti4.c +++ b/gnu/llvm/compiler-rt/lib/builtins/udivmodti4.c @@ -14,182 +14,145 @@ #ifdef CRT_HAS_128BIT +// Returns the 128 bit division result by 64 bit. Result must fit in 64 bits. +// Remainder stored in r. +// Taken and adjusted from libdivide libdivide_128_div_64_to_64 division +// fallback. For a correctness proof see the reference for this algorithm +// in Knuth, Volume 2, section 4.3.1, Algorithm D. +UNUSED +static inline du_int udiv128by64to64default(du_int u1, du_int u0, du_int v, + du_int *r) { + const unsigned n_udword_bits = sizeof(du_int) * CHAR_BIT; + const du_int b = (1ULL << (n_udword_bits / 2)); // Number base (32 bits) + du_int un1, un0; // Norm. dividend LSD's + du_int vn1, vn0; // Norm. divisor digits + du_int q1, q0; // Quotient digits + du_int un64, un21, un10; // Dividend digit pairs + du_int rhat; // A remainder + si_int s; // Shift amount for normalization + + s = __builtin_clzll(v); + if (s > 0) { + // Normalize the divisor. + v = v << s; + un64 = (u1 << s) | (u0 >> (n_udword_bits - s)); + un10 = u0 << s; // Shift dividend left + } else { + // Avoid undefined behavior of (u0 >> 64). + un64 = u1; + un10 = u0; + } + + // Break divisor up into two 32-bit digits. + vn1 = v >> (n_udword_bits / 2); + vn0 = v & 0xFFFFFFFF; + + // Break right half of dividend into two digits. + un1 = un10 >> (n_udword_bits / 2); + un0 = un10 & 0xFFFFFFFF; + + // Compute the first quotient digit, q1. + q1 = un64 / vn1; + rhat = un64 - q1 * vn1; + + // q1 has at most error 2. No more than 2 iterations. + while (q1 >= b || q1 * vn0 > b * rhat + un1) { + q1 = q1 - 1; + rhat = rhat + vn1; + if (rhat >= b) + break; + } + + un21 = un64 * b + un1 - q1 * v; + + // Compute the second quotient digit. + q0 = un21 / vn1; + rhat = un21 - q0 * vn1; + + // q0 has at most error 2. No more than 2 iterations. + while (q0 >= b || q0 * vn0 > b * rhat + un0) { + q0 = q0 - 1; + rhat = rhat + vn1; + if (rhat >= b) + break; + } + + *r = (un21 * b + un0 - q0 * v) >> s; + return q1 * b + q0; +} + +static inline du_int udiv128by64to64(du_int u1, du_int u0, du_int v, + du_int *r) { +#if defined(__x86_64__) + du_int result; + __asm__("divq %[v]" + : "=a"(result), "=d"(*r) + : [ v ] "r"(v), "a"(u0), "d"(u1)); + return result; +#else + return udiv128by64to64default(u1, u0, v, r); +#endif +} + // Effects: if rem != 0, *rem = a % b // Returns: a / b -// Translated from Figure 3-40 of The PowerPC Compiler Writer's Guide - COMPILER_RT_ABI tu_int __udivmodti4(tu_int a, tu_int b, tu_int *rem) { - const unsigned n_udword_bits = sizeof(du_int) * CHAR_BIT; const unsigned n_utword_bits = sizeof(tu_int) * CHAR_BIT; - utwords n; - n.all = a; - utwords d; - d.all = b; - utwords q; - utwords r; - unsigned sr; - // special cases, X is unknown, K != 0 - if (n.s.high == 0) { - if (d.s.high == 0) { - // 0 X - // --- - // 0 X - if (rem) - *rem = n.s.low % d.s.low; - return n.s.low / d.s.low; - } - // 0 X - // --- - // K X + utwords dividend; + dividend.all = a; + utwords divisor; + divisor.all = b; + utwords quotient; + utwords remainder; + if (divisor.all > dividend.all) { if (rem) - *rem = n.s.low; + *rem = dividend.all; return 0; } - // n.s.high != 0 - if (d.s.low == 0) { - if (d.s.high == 0) { - // K X - // --- - // 0 0 - if (rem) - *rem = n.s.high % d.s.low; - return n.s.high / d.s.low; - } - // d.s.high != 0 - if (n.s.low == 0) { - // K 0 - // --- - // K 0 - if (rem) { - r.s.high = n.s.high % d.s.high; - r.s.low = 0; - *rem = r.all; - } - return n.s.high / d.s.high; - } - // K K - // --- - // K 0 - if ((d.s.high & (d.s.high - 1)) == 0) /* if d is a power of 2 */ { - if (rem) { - r.s.low = n.s.low; - r.s.high = n.s.high & (d.s.high - 1); - *rem = r.all; - } - return n.s.high >> __builtin_ctzll(d.s.high); - } - // K K - // --- - // K 0 - sr = __builtin_clzll(d.s.high) - __builtin_clzll(n.s.high); - // 0 <= sr <= n_udword_bits - 2 or sr large - if (sr > n_udword_bits - 2) { - if (rem) - *rem = n.all; - return 0; - } - ++sr; - // 1 <= sr <= n_udword_bits - 1 - // q.all = n.all << (n_utword_bits - sr); - q.s.low = 0; - q.s.high = n.s.low << (n_udword_bits - sr); - // r.all = n.all >> sr; - r.s.high = n.s.high >> sr; - r.s.low = (n.s.high << (n_udword_bits - sr)) | (n.s.low >> sr); - } else /* d.s.low != 0 */ { - if (d.s.high == 0) { - // K X - // --- - // 0 K - if ((d.s.low & (d.s.low - 1)) == 0) /* if d is a power of 2 */ { - if (rem) - *rem = n.s.low & (d.s.low - 1); - if (d.s.low == 1) - return n.all; - sr = __builtin_ctzll(d.s.low); - q.s.high = n.s.high >> sr; - q.s.low = (n.s.high << (n_udword_bits - sr)) | (n.s.low >> sr); - return q.all; - } - // K X - // --- - // 0 K - sr = 1 + n_udword_bits + __builtin_clzll(d.s.low) - - __builtin_clzll(n.s.high); - // 2 <= sr <= n_utword_bits - 1 - // q.all = n.all << (n_utword_bits - sr); - // r.all = n.all >> sr; - if (sr == n_udword_bits) { - q.s.low = 0; - q.s.high = n.s.low; - r.s.high = 0; - r.s.low = n.s.high; - } else if (sr < n_udword_bits) /* 2 <= sr <= n_udword_bits - 1 */ { - q.s.low = 0; - q.s.high = n.s.low << (n_udword_bits - sr); - r.s.high = n.s.high >> sr; - r.s.low = (n.s.high << (n_udword_bits - sr)) | (n.s.low >> sr); - } else /* n_udword_bits + 1 <= sr <= n_utword_bits - 1 */ { - q.s.low = n.s.low << (n_utword_bits - sr); - q.s.high = (n.s.high << (n_utword_bits - sr)) | - (n.s.low >> (sr - n_udword_bits)); - r.s.high = 0; - r.s.low = n.s.high >> (sr - n_udword_bits); - } + // When the divisor fits in 64 bits, we can use an optimized path. + if (divisor.s.high == 0) { + remainder.s.high = 0; + if (dividend.s.high < divisor.s.low) { + // The result fits in 64 bits. + quotient.s.low = udiv128by64to64(dividend.s.high, dividend.s.low, + divisor.s.low, &remainder.s.low); + quotient.s.high = 0; } else { - // K X - // --- - // K K - sr = __builtin_clzll(d.s.high) - __builtin_clzll(n.s.high); - // 0 <= sr <= n_udword_bits - 1 or sr large - if (sr > n_udword_bits - 1) { - if (rem) - *rem = n.all; - return 0; - } - ++sr; - // 1 <= sr <= n_udword_bits - // q.all = n.all << (n_utword_bits - sr); - // r.all = n.all >> sr; - q.s.low = 0; - if (sr == n_udword_bits) { - q.s.high = n.s.low; - r.s.high = 0; - r.s.low = n.s.high; - } else { - r.s.high = n.s.high >> sr; - r.s.low = (n.s.high << (n_udword_bits - sr)) | (n.s.low >> sr); - q.s.high = n.s.low << (n_udword_bits - sr); - } + // First, divide with the high part to get the remainder in dividend.s.high. + // After that dividend.s.high < divisor.s.low. + quotient.s.high = dividend.s.high / divisor.s.low; + dividend.s.high = dividend.s.high % divisor.s.low; + quotient.s.low = udiv128by64to64(dividend.s.high, dividend.s.low, + divisor.s.low, &remainder.s.low); } + if (rem) + *rem = remainder.all; + return quotient.all; } - // Not a special case - // q and r are initialized with: - // q.all = n.all << (n_utword_bits - sr); - // r.all = n.all >> sr; - // 1 <= sr <= n_utword_bits - 1 - su_int carry = 0; - for (; sr > 0; --sr) { - // r:q = ((r:q) << 1) | carry - r.s.high = (r.s.high << 1) | (r.s.low >> (n_udword_bits - 1)); - r.s.low = (r.s.low << 1) | (q.s.high >> (n_udword_bits - 1)); - q.s.high = (q.s.high << 1) | (q.s.low >> (n_udword_bits - 1)); - q.s.low = (q.s.low << 1) | carry; - // carry = 0; - // if (r.all >= d.all) + // 0 <= shift <= 63. + si_int shift = + __builtin_clzll(divisor.s.high) - __builtin_clzll(dividend.s.high); + divisor.all <<= shift; + quotient.s.high = 0; + quotient.s.low = 0; + for (; shift >= 0; --shift) { + quotient.s.low <<= 1; + // Branch free version of. + // if (dividend.all >= divisor.all) // { - // r.all -= d.all; - // carry = 1; + // dividend.all -= divisor.all; + // carry = 1; // } - const ti_int s = (ti_int)(d.all - r.all - 1) >> (n_utword_bits - 1); - carry = s & 1; - r.all -= d.all & s; + const ti_int s = + (ti_int)(divisor.all - dividend.all - 1) >> (n_utword_bits - 1); + quotient.s.low |= s & 1; + dividend.all -= divisor.all & s; + divisor.all >>= 1; } - q.all = (q.all << 1) | carry; if (rem) - *rem = r.all; - return q.all; + *rem = dividend.all; + return quotient.all; } #endif // CRT_HAS_128BIT diff --git a/gnu/llvm/compiler-rt/lib/builtins/udivsi3.c b/gnu/llvm/compiler-rt/lib/builtins/udivsi3.c index 18cc96c1b2e..3894e159755 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/udivsi3.c +++ b/gnu/llvm/compiler-rt/lib/builtins/udivsi3.c @@ -12,49 +12,14 @@ #include "int_lib.h" -// Returns: a / b +typedef su_int fixuint_t; +typedef si_int fixint_t; +#include "int_div_impl.inc" -// Translated from Figure 3-40 of The PowerPC Compiler Writer's Guide +// Returns: a / b -// This function should not call __divsi3! -COMPILER_RT_ABI su_int __udivsi3(su_int n, su_int d) { - const unsigned n_uword_bits = sizeof(su_int) * CHAR_BIT; - su_int q; - su_int r; - unsigned sr; - // special cases - if (d == 0) - return 0; // ?! - if (n == 0) - return 0; - sr = __builtin_clz(d) - __builtin_clz(n); - // 0 <= sr <= n_uword_bits - 1 or sr large - if (sr > n_uword_bits - 1) // d > r - return 0; - if (sr == n_uword_bits - 1) // d == 1 - return n; - ++sr; - // 1 <= sr <= n_uword_bits - 1 - // Not a special case - q = n << (n_uword_bits - sr); - r = n >> sr; - su_int carry = 0; - for (; sr > 0; --sr) { - // r:q = ((r:q) << 1) | carry - r = (r << 1) | (q >> (n_uword_bits - 1)); - q = (q << 1) | carry; - // carry = 0; - // if (r.all >= d.all) - // { - // r.all -= d.all; - // carry = 1; - // } - const si_int s = (si_int)(d - r - 1) >> (n_uword_bits - 1); - carry = s & 1; - r -= d & s; - } - q = (q << 1) | carry; - return q; +COMPILER_RT_ABI su_int __udivsi3(su_int a, su_int b) { + return __udivXi3(a, b); } #if defined(__ARM_EABI__) diff --git a/gnu/llvm/compiler-rt/lib/builtins/umoddi3.c b/gnu/llvm/compiler-rt/lib/builtins/umoddi3.c index 965cf8fc01b..e672da96ef6 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/umoddi3.c +++ b/gnu/llvm/compiler-rt/lib/builtins/umoddi3.c @@ -12,10 +12,12 @@ #include "int_lib.h" +typedef du_int fixuint_t; +typedef di_int fixint_t; +#include "int_div_impl.inc" + // Returns: a % b COMPILER_RT_ABI du_int __umoddi3(du_int a, du_int b) { - du_int r; - __udivmoddi4(a, b, &r); - return r; + return __umodXi3(a, b); } diff --git a/gnu/llvm/compiler-rt/lib/builtins/umodsi3.c b/gnu/llvm/compiler-rt/lib/builtins/umodsi3.c index ce9abcd94ef..5383aea656a 100644 --- a/gnu/llvm/compiler-rt/lib/builtins/umodsi3.c +++ b/gnu/llvm/compiler-rt/lib/builtins/umodsi3.c @@ -12,8 +12,12 @@ #include "int_lib.h" +typedef su_int fixuint_t; +typedef si_int fixint_t; +#include "int_div_impl.inc" + // Returns: a % b COMPILER_RT_ABI su_int __umodsi3(su_int a, su_int b) { - return a - __udivsi3(a, b) * b; + return __umodXi3(a, b); } diff --git a/gnu/llvm/compiler-rt/lib/builtins/ve/grow_stack.S b/gnu/llvm/compiler-rt/lib/builtins/ve/grow_stack.S new file mode 100644 index 00000000000..f403798495a --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/builtins/ve/grow_stack.S @@ -0,0 +1,31 @@ +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include "../assembly.h" + +// grow_stack routine +// This routine is VE specific +// https://www.nec.com/en/global/prod/hpc/aurora/document/VE-ABI_v1.1.pdf + +// destroy %s62 and %s63 only + +#ifdef __ve__ + +.text +.p2align 4 +DEFINE_COMPILERRT_FUNCTION(__ve_grow_stack) + subu.l %sp, %sp, %s0 # sp -= alloca size + and %sp, -16, %sp # align sp + brge.l.t %sp, %sl, 1f + ld %s63, 0x18(,%tp) # load param area + lea %s62, 0x13b # syscall # of grow + shm.l %s62, 0x0(%s63) # stored at addr:0 + shm.l %sl, 0x8(%s63) # old limit at addr:8 + shm.l %sp, 0x10(%s63) # new limit at addr:16 + monc +1: + b.l (,%lr) +END_COMPILERRT_FUNCTION(__ve_grow_stack) + +#endif // __ve__ diff --git a/gnu/llvm/compiler-rt/lib/builtins/ve/grow_stack_align.S b/gnu/llvm/compiler-rt/lib/builtins/ve/grow_stack_align.S new file mode 100644 index 00000000000..19a1dfa8726 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/builtins/ve/grow_stack_align.S @@ -0,0 +1,31 @@ +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include "../assembly.h" + +// grow_stack routine +// This routine is VE specific +// https://www.nec.com/en/global/prod/hpc/aurora/document/VE-ABI_v1.1.pdf + +// destroy %s62 and %s63 only + +#ifdef __ve__ + +.text +.p2align 4 +DEFINE_COMPILERRT_FUNCTION(__ve_grow_stack_align) + subu.l %sp, %sp, %s0 # sp -= alloca size + and %sp, %sp, %s1 # align sp + brge.l.t %sp, %sl, 1f + ld %s63, 0x18(,%tp) # load param area + lea %s62, 0x13b # syscall # of grow + shm.l %s62, 0x0(%s63) # stored at addr:0 + shm.l %sl, 0x8(%s63) # old limit at addr:8 + shm.l %sp, 0x10(%s63) # new limit at addr:16 + monc +1: + b.l (,%lr) +END_COMPILERRT_FUNCTION(__ve_grow_stack_align) + +#endif // __ve__ diff --git a/gnu/llvm/compiler-rt/lib/crt/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/crt/CMakeLists.txt index 1ed04827925..0f8689268b9 100644 --- a/gnu/llvm/compiler-rt/lib/crt/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/lib/crt/CMakeLists.txt @@ -20,6 +20,16 @@ function(check_cxx_section_exists section output) list(APPEND try_compile_flags "-target ${CMAKE_C_COMPILER_TARGET}") endif() append_list_if(COMPILER_RT_HAS_FNO_LTO_FLAG -fno-lto try_compile_flags) + if(NOT COMPILER_RT_ENABLE_PGO) + if(LLVM_PROFDATA_FILE AND COMPILER_RT_HAS_FNO_PROFILE_INSTR_USE_FLAG) + list(APPEND try_compile_flags "-fno-profile-instr-use") + endif() + if(LLVM_BUILD_INSTRUMENTED MATCHES IR AND COMPILER_RT_HAS_FNO_PROFILE_GENERATE_FLAG) + list(APPEND try_compile_flags "-fno-profile-generate") + elseif(LLVM_BUILD_INSTRUMENTED AND COMPILER_RT_HAS_FNO_PROFILE_INSTR_GENERATE_FLAG) + list(APPEND try_compile_flags "-fno-profile-instr-generate") + endif() + endif() string(REPLACE ";" " " extra_flags "${try_compile_flags}") @@ -43,6 +53,10 @@ function(check_cxx_section_exists section output) endif() endforeach() + # Strip quotes from the compile command, as the compiler is not expecting + # quoted arguments (potential quotes added from D62063). + string(REPLACE "\"" "" test_compile_command "${test_compile_command}") + string(REPLACE " " ";" test_compile_command "${test_compile_command}") execute_process( @@ -52,6 +66,12 @@ function(check_cxx_section_exists section output) ERROR_VARIABLE TEST_ERROR ) + # Explicitly throw a fatal error message if test_compile_command fails. + if(TEST_RESULT) + message(FATAL_ERROR "${TEST_ERROR}") + return() + endif() + execute_process( COMMAND ${CMAKE_OBJDUMP} -h "${TARGET_NAME}/CheckSectionExists.o" RESULT_VARIABLE CHECK_RESULT diff --git a/gnu/llvm/compiler-rt/lib/dfsan/dfsan_custom.cpp b/gnu/llvm/compiler-rt/lib/dfsan/dfsan_custom.cpp index 84f0271b15e..1acd2d47d15 100644 --- a/gnu/llvm/compiler-rt/lib/dfsan/dfsan_custom.cpp +++ b/gnu/llvm/compiler-rt/lib/dfsan/dfsan_custom.cpp @@ -84,7 +84,13 @@ SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strchr(const char *s, int c, *ret_label = dfsan_union(dfsan_read_label(s, i + 1), dfsan_union(s_label, c_label)); } - return s[i] == 0 ? nullptr : const_cast<char *>(s+i); + + // If s[i] is the \0 at the end of the string, and \0 is not the + // character we are searching for, then return null. + if (s[i] == 0 && c != 0) { + return nullptr; + } + return const_cast<char *>(s + i); } } } @@ -151,14 +157,17 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_strcasecmp(const char *s1, const char *s2, dfsan_label s1_label, dfsan_label s2_label, dfsan_label *ret_label) { for (size_t i = 0;; ++i) { - if (tolower(s1[i]) != tolower(s2[i]) || s1[i] == 0 || s2[i] == 0) { + char s1_lower = tolower(s1[i]); + char s2_lower = tolower(s2[i]); + + if (s1_lower != s2_lower || s1[i] == 0 || s2[i] == 0) { if (flags().strict_data_dependencies) { *ret_label = 0; } else { *ret_label = dfsan_union(dfsan_read_label(s1, i + 1), dfsan_read_label(s2, i + 1)); } - return s1[i] - s2[i]; + return s1_lower - s2_lower; } } return 0; @@ -206,15 +215,17 @@ __dfsw_strncasecmp(const char *s1, const char *s2, size_t n, } for (size_t i = 0;; ++i) { - if (tolower(s1[i]) != tolower(s2[i]) || s1[i] == 0 || s2[i] == 0 || - i == n - 1) { + char s1_lower = tolower(s1[i]); + char s2_lower = tolower(s2[i]); + + if (s1_lower != s2_lower || s1[i] == 0 || s2[i] == 0 || i == n - 1) { if (flags().strict_data_dependencies) { *ret_label = 0; } else { *ret_label = dfsan_union(dfsan_read_label(s1, i + 1), dfsan_read_label(s2, i + 1)); } - return s1[i] - s2[i]; + return s1_lower - s2_lower; } } return 0; diff --git a/gnu/llvm/compiler-rt/lib/dfsan/done_abilist.txt b/gnu/llvm/compiler-rt/lib/dfsan/done_abilist.txt index f8c88dd3201..52f3ff5ef23 100644 --- a/gnu/llvm/compiler-rt/lib/dfsan/done_abilist.txt +++ b/gnu/llvm/compiler-rt/lib/dfsan/done_abilist.txt @@ -33,7 +33,6 @@ fun:dfsan_flush=discard # glibc ############################################################################### fun:malloc=discard -fun:realloc=discard fun:free=discard # Functions that return a value that depends on the input, but the output might diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/fuzzer/CMakeLists.txt index 80409f9f689..b5be6b89452 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/lib/fuzzer/CMakeLists.txt @@ -82,8 +82,6 @@ else() endif() endif() -set(FUZZER_SUPPORTED_OS ${SANITIZER_COMMON_SUPPORTED_OS}) - add_compiler_rt_component(fuzzer) add_compiler_rt_object_libraries(RTfuzzer diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerBuiltins.h b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerBuiltins.h index 5f1ccef8a9c..4c0ada82662 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerBuiltins.h +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerBuiltins.h @@ -11,7 +11,7 @@ #ifndef LLVM_FUZZER_BUILTINS_H #define LLVM_FUZZER_BUILTINS_H -#include "FuzzerDefs.h" +#include "FuzzerPlatform.h" #if !LIBFUZZER_MSVC #include <cstdint> diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerBuiltinsMsvc.h b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerBuiltinsMsvc.h index bc65c60098b..c5bec9787d8 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerBuiltinsMsvc.h +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerBuiltinsMsvc.h @@ -12,7 +12,7 @@ #ifndef LLVM_FUZZER_BUILTINS_MSVC_H #define LLVM_FUZZER_BUILTINS_MSVC_H -#include "FuzzerDefs.h" +#include "FuzzerPlatform.h" #if LIBFUZZER_MSVC #include <intrin.h> diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerCorpus.h b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerCorpus.h index 6a95ef3a8e6..54d1e09ec6d 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerCorpus.h +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerCorpus.h @@ -38,12 +38,102 @@ struct InputInfo { bool HasFocusFunction = false; Vector<uint32_t> UniqFeatureSet; Vector<uint8_t> DataFlowTraceForFocusFunction; + // Power schedule. + bool NeedsEnergyUpdate = false; + double Energy = 0.0; + size_t SumIncidence = 0; + Vector<std::pair<uint32_t, uint16_t>> FeatureFreqs; + + // Delete feature Idx and its frequency from FeatureFreqs. + bool DeleteFeatureFreq(uint32_t Idx) { + if (FeatureFreqs.empty()) + return false; + + // Binary search over local feature frequencies sorted by index. + auto Lower = std::lower_bound(FeatureFreqs.begin(), FeatureFreqs.end(), + std::pair<uint32_t, uint16_t>(Idx, 0)); + + if (Lower != FeatureFreqs.end() && Lower->first == Idx) { + FeatureFreqs.erase(Lower); + return true; + } + return false; + } + + // Assign more energy to a high-entropy seed, i.e., that reveals more + // information about the globally rare features in the neighborhood + // of the seed. Since we do not know the entropy of a seed that has + // never been executed we assign fresh seeds maximum entropy and + // let II->Energy approach the true entropy from above. + void UpdateEnergy(size_t GlobalNumberOfFeatures) { + Energy = 0.0; + SumIncidence = 0; + + // Apply add-one smoothing to locally discovered features. + for (auto F : FeatureFreqs) { + size_t LocalIncidence = F.second + 1; + Energy -= LocalIncidence * logl(LocalIncidence); + SumIncidence += LocalIncidence; + } + + // Apply add-one smoothing to locally undiscovered features. + // PreciseEnergy -= 0; // since logl(1.0) == 0) + SumIncidence += (GlobalNumberOfFeatures - FeatureFreqs.size()); + + // Add a single locally abundant feature apply add-one smoothing. + size_t AbdIncidence = NumExecutedMutations + 1; + Energy -= AbdIncidence * logl(AbdIncidence); + SumIncidence += AbdIncidence; + + // Normalize. + if (SumIncidence != 0) + Energy = (Energy / SumIncidence) + logl(SumIncidence); + } + + // Increment the frequency of the feature Idx. + void UpdateFeatureFrequency(uint32_t Idx) { + NeedsEnergyUpdate = true; + + // The local feature frequencies is an ordered vector of pairs. + // If there are no local feature frequencies, push_back preserves order. + // Set the feature frequency for feature Idx32 to 1. + if (FeatureFreqs.empty()) { + FeatureFreqs.push_back(std::pair<uint32_t, uint16_t>(Idx, 1)); + return; + } + + // Binary search over local feature frequencies sorted by index. + auto Lower = std::lower_bound(FeatureFreqs.begin(), FeatureFreqs.end(), + std::pair<uint32_t, uint16_t>(Idx, 0)); + + // If feature Idx32 already exists, increment its frequency. + // Otherwise, insert a new pair right after the next lower index. + if (Lower != FeatureFreqs.end() && Lower->first == Idx) { + Lower->second++; + } else { + FeatureFreqs.insert(Lower, std::pair<uint32_t, uint16_t>(Idx, 1)); + } + } +}; + +struct EntropicOptions { + bool Enabled; + size_t NumberOfRarestFeatures; + size_t FeatureFrequencyThreshold; }; class InputCorpus { - static const size_t kFeatureSetSize = 1 << 21; - public: - InputCorpus(const std::string &OutputCorpus) : OutputCorpus(OutputCorpus) { + static const uint32_t kFeatureSetSize = 1 << 21; + static const uint8_t kMaxMutationFactor = 20; + static const size_t kSparseEnergyUpdates = 100; + + size_t NumExecutedMutations = 0; + + EntropicOptions Entropic; + +public: + InputCorpus(const std::string &OutputCorpus, EntropicOptions Entropic) + : Entropic(Entropic), OutputCorpus(OutputCorpus) { memset(InputSizesPerFeature, 0, sizeof(InputSizesPerFeature)); memset(SmallestElementPerFeature, 0, sizeof(SmallestElementPerFeature)); } @@ -70,6 +160,7 @@ class InputCorpus { Res = std::max(Res, II->U.size()); return Res; } + void IncrementNumExecutedMutations() { NumExecutedMutations++; } size_t NumInputsThatTouchFocusFunction() { return std::count_if(Inputs.begin(), Inputs.end(), [](const InputInfo *II) { @@ -99,6 +190,10 @@ class InputCorpus { II.MayDeleteFile = MayDeleteFile; II.UniqFeatureSet = FeatureSet; II.HasFocusFunction = HasFocusFunction; + // Assign maximal energy to the new seed. + II.Energy = RareFeatures.empty() ? 1.0 : log(RareFeatures.size()); + II.SumIncidence = RareFeatures.size(); + II.NeedsEnergyUpdate = false; std::sort(II.UniqFeatureSet.begin(), II.UniqFeatureSet.end()); ComputeSHA1(U.data(), U.size(), II.Sha1); auto Sha1Str = Sha1ToString(II.Sha1); @@ -111,7 +206,7 @@ class InputCorpus { // But if we don't, we'll use the DFT of its base input. if (II.DataFlowTraceForFocusFunction.empty() && BaseII) II.DataFlowTraceForFocusFunction = BaseII->DataFlowTraceForFocusFunction; - UpdateCorpusDistribution(); + DistributionNeedsUpdate = true; PrintCorpus(); // ValidateFeatureSet(); return &II; @@ -162,7 +257,7 @@ class InputCorpus { Hashes.insert(Sha1ToString(II->Sha1)); II->U = U; II->Reduced = true; - UpdateCorpusDistribution(); + DistributionNeedsUpdate = true; } bool HasUnit(const Unit &U) { return Hashes.count(Hash(U)); } @@ -175,6 +270,7 @@ class InputCorpus { // Returns an index of random unit from the corpus to mutate. size_t ChooseUnitIdxToMutate(Random &Rand) { + UpdateCorpusDistribution(Rand); size_t Idx = static_cast<size_t>(CorpusDistribution(Rand)); assert(Idx < Inputs.size()); return Idx; @@ -210,10 +306,65 @@ class InputCorpus { InputInfo &II = *Inputs[Idx]; DeleteFile(II); Unit().swap(II.U); + II.Energy = 0.0; + II.NeedsEnergyUpdate = false; + DistributionNeedsUpdate = true; if (FeatureDebug) Printf("EVICTED %zd\n", Idx); } + void AddRareFeature(uint32_t Idx) { + // Maintain *at least* TopXRarestFeatures many rare features + // and all features with a frequency below ConsideredRare. + // Remove all other features. + while (RareFeatures.size() > Entropic.NumberOfRarestFeatures && + FreqOfMostAbundantRareFeature > Entropic.FeatureFrequencyThreshold) { + + // Find most and second most abbundant feature. + uint32_t MostAbundantRareFeatureIndices[2] = {RareFeatures[0], + RareFeatures[0]}; + size_t Delete = 0; + for (size_t i = 0; i < RareFeatures.size(); i++) { + uint32_t Idx2 = RareFeatures[i]; + if (GlobalFeatureFreqs[Idx2] >= + GlobalFeatureFreqs[MostAbundantRareFeatureIndices[0]]) { + MostAbundantRareFeatureIndices[1] = MostAbundantRareFeatureIndices[0]; + MostAbundantRareFeatureIndices[0] = Idx2; + Delete = i; + } + } + + // Remove most abundant rare feature. + RareFeatures[Delete] = RareFeatures.back(); + RareFeatures.pop_back(); + + for (auto II : Inputs) { + if (II->DeleteFeatureFreq(MostAbundantRareFeatureIndices[0])) + II->NeedsEnergyUpdate = true; + } + + // Set 2nd most abundant as the new most abundant feature count. + FreqOfMostAbundantRareFeature = + GlobalFeatureFreqs[MostAbundantRareFeatureIndices[1]]; + } + + // Add rare feature, handle collisions, and update energy. + RareFeatures.push_back(Idx); + GlobalFeatureFreqs[Idx] = 0; + for (auto II : Inputs) { + II->DeleteFeatureFreq(Idx); + + // Apply add-one smoothing to this locally undiscovered feature. + // Zero energy seeds will never be fuzzed and remain zero energy. + if (II->Energy > 0.0) { + II->SumIncidence += 1; + II->Energy += logl(II->SumIncidence) / II->SumIncidence; + } + } + + DistributionNeedsUpdate = true; + } + bool AddFeature(size_t Idx, uint32_t NewSize, bool Shrink) { assert(NewSize); Idx = Idx % kFeatureSetSize; @@ -228,6 +379,8 @@ class InputCorpus { DeleteInput(OldIdx); } else { NumAddedFeatures++; + if (Entropic.Enabled) + AddRareFeature((uint32_t)Idx); } NumUpdatedFeatures++; if (FeatureDebug) @@ -239,6 +392,30 @@ class InputCorpus { return false; } + // Increment frequency of feature Idx globally and locally. + void UpdateFeatureFrequency(InputInfo *II, size_t Idx) { + uint32_t Idx32 = Idx % kFeatureSetSize; + + // Saturated increment. + if (GlobalFeatureFreqs[Idx32] == 0xFFFF) + return; + uint16_t Freq = GlobalFeatureFreqs[Idx32]++; + + // Skip if abundant. + if (Freq > FreqOfMostAbundantRareFeature || + std::find(RareFeatures.begin(), RareFeatures.end(), Idx32) == + RareFeatures.end()) + return; + + // Update global frequencies. + if (Freq == FreqOfMostAbundantRareFeature) + FreqOfMostAbundantRareFeature++; + + // Update local frequencies. + if (II) + II->UpdateFeatureFrequency(Idx32); + } + size_t NumFeatures() const { return NumAddedFeatures; } size_t NumFeatureUpdates() const { return NumUpdatedFeatures; } @@ -265,19 +442,60 @@ private: // Updates the probability distribution for the units in the corpus. // Must be called whenever the corpus or unit weights are changed. // - // Hypothesis: units added to the corpus last are more interesting. - // - // Hypothesis: inputs with infrequent features are more interesting. - void UpdateCorpusDistribution() { + // Hypothesis: inputs that maximize information about globally rare features + // are interesting. + void UpdateCorpusDistribution(Random &Rand) { + // Skip update if no seeds or rare features were added/deleted. + // Sparse updates for local change of feature frequencies, + // i.e., randomly do not skip. + if (!DistributionNeedsUpdate && + (!Entropic.Enabled || Rand(kSparseEnergyUpdates))) + return; + + DistributionNeedsUpdate = false; + size_t N = Inputs.size(); assert(N); Intervals.resize(N + 1); Weights.resize(N); std::iota(Intervals.begin(), Intervals.end(), 0); - for (size_t i = 0; i < N; i++) - Weights[i] = Inputs[i]->NumFeatures - ? (i + 1) * (Inputs[i]->HasFocusFunction ? 1000 : 1) - : 0.; + + bool VanillaSchedule = true; + if (Entropic.Enabled) { + for (auto II : Inputs) { + if (II->NeedsEnergyUpdate && II->Energy != 0.0) { + II->NeedsEnergyUpdate = false; + II->UpdateEnergy(RareFeatures.size()); + } + } + + for (size_t i = 0; i < N; i++) { + + if (Inputs[i]->NumFeatures == 0) { + // If the seed doesn't represent any features, assign zero energy. + Weights[i] = 0.; + } else if (Inputs[i]->NumExecutedMutations / kMaxMutationFactor > + NumExecutedMutations / Inputs.size()) { + // If the seed was fuzzed a lot more than average, assign zero energy. + Weights[i] = 0.; + } else { + // Otherwise, simply assign the computed energy. + Weights[i] = Inputs[i]->Energy; + } + + // If energy for all seeds is zero, fall back to vanilla schedule. + if (Weights[i] > 0.0) + VanillaSchedule = false; + } + } + + if (VanillaSchedule) { + for (size_t i = 0; i < N; i++) + Weights[i] = Inputs[i]->NumFeatures + ? (i + 1) * (Inputs[i]->HasFocusFunction ? 1000 : 1) + : 0.; + } + if (FeatureDebug) { for (size_t i = 0; i < N; i++) Printf("%zd ", Inputs[i]->NumFeatures); @@ -302,6 +520,11 @@ private: uint32_t InputSizesPerFeature[kFeatureSetSize]; uint32_t SmallestElementPerFeature[kFeatureSetSize]; + bool DistributionNeedsUpdate = true; + uint16_t FreqOfMostAbundantRareFeature = 0; + uint16_t GlobalFeatureFreqs[kFeatureSetSize] = {}; + Vector<uint32_t> RareFeatures; + std::string OutputCorpus; }; diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp index 99ff918f7c6..48df8e66860 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp @@ -248,6 +248,11 @@ int CollectDataFlow(const std::string &DFTBinary, const std::string &DirPath, const Vector<SizedFile> &CorporaFiles) { Printf("INFO: collecting data flow: bin: %s dir: %s files: %zd\n", DFTBinary.c_str(), DirPath.c_str(), CorporaFiles.size()); + if (CorporaFiles.empty()) { + Printf("ERROR: can't collect data flow without corpus provided."); + return 1; + } + static char DFSanEnv[] = "DFSAN_OPTIONS=fast16labels=1:warn_unimplemented=0"; putenv(DFSanEnv); MkDir(DirPath); diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerDefs.h b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerDefs.h index 5793e86aa80..1a2752af2f4 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerDefs.h +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerDefs.h @@ -21,156 +21,6 @@ #include <vector> -// Platform detection. -#ifdef __linux__ -#define LIBFUZZER_APPLE 0 -#define LIBFUZZER_FUCHSIA 0 -#define LIBFUZZER_LINUX 1 -#define LIBFUZZER_NETBSD 0 -#define LIBFUZZER_FREEBSD 0 -#define LIBFUZZER_OPENBSD 0 -#define LIBFUZZER_WINDOWS 0 -#define LIBFUZZER_EMSCRIPTEN 0 -#elif __APPLE__ -#define LIBFUZZER_APPLE 1 -#define LIBFUZZER_FUCHSIA 0 -#define LIBFUZZER_LINUX 0 -#define LIBFUZZER_NETBSD 0 -#define LIBFUZZER_FREEBSD 0 -#define LIBFUZZER_OPENBSD 0 -#define LIBFUZZER_WINDOWS 0 -#define LIBFUZZER_EMSCRIPTEN 0 -#elif __NetBSD__ -#define LIBFUZZER_APPLE 0 -#define LIBFUZZER_FUCHSIA 0 -#define LIBFUZZER_LINUX 0 -#define LIBFUZZER_NETBSD 1 -#define LIBFUZZER_FREEBSD 0 -#define LIBFUZZER_OPENBSD 0 -#define LIBFUZZER_WINDOWS 0 -#define LIBFUZZER_EMSCRIPTEN 0 -#elif __FreeBSD__ -#define LIBFUZZER_APPLE 0 -#define LIBFUZZER_FUCHSIA 0 -#define LIBFUZZER_LINUX 0 -#define LIBFUZZER_NETBSD 0 -#define LIBFUZZER_FREEBSD 1 -#define LIBFUZZER_OPENBSD 0 -#define LIBFUZZER_WINDOWS 0 -#define LIBFUZZER_EMSCRIPTEN 0 -#elif __OpenBSD__ -#define LIBFUZZER_APPLE 0 -#define LIBFUZZER_FUCHSIA 0 -#define LIBFUZZER_LINUX 0 -#define LIBFUZZER_NETBSD 0 -#define LIBFUZZER_FREEBSD 0 -#define LIBFUZZER_OPENBSD 1 -#define LIBFUZZER_WINDOWS 0 -#define LIBFUZZER_EMSCRIPTEN 0 -#elif _WIN32 -#define LIBFUZZER_APPLE 0 -#define LIBFUZZER_FUCHSIA 0 -#define LIBFUZZER_LINUX 0 -#define LIBFUZZER_NETBSD 0 -#define LIBFUZZER_FREEBSD 0 -#define LIBFUZZER_OPENBSD 0 -#define LIBFUZZER_WINDOWS 1 -#define LIBFUZZER_EMSCRIPTEN 0 -#elif __Fuchsia__ -#define LIBFUZZER_APPLE 0 -#define LIBFUZZER_FUCHSIA 1 -#define LIBFUZZER_LINUX 0 -#define LIBFUZZER_NETBSD 0 -#define LIBFUZZER_FREEBSD 0 -#define LIBFUZZER_OPENBSD 0 -#define LIBFUZZER_WINDOWS 0 -#define LIBFUZZER_EMSCRIPTEN 0 -#elif __EMSCRIPTEN__ -#define LIBFUZZER_APPLE 0 -#define LIBFUZZER_FUCHSIA 0 -#define LIBFUZZER_LINUX 0 -#define LIBFUZZER_NETBSD 0 -#define LIBFUZZER_FREEBSD 0 -#define LIBFUZZER_OPENBSD 0 -#define LIBFUZZER_WINDOWS 0 -#define LIBFUZZER_EMSCRIPTEN 1 -#else -#error "Support for your platform has not been implemented" -#endif - -#if defined(_MSC_VER) && !defined(__clang__) -// MSVC compiler is being used. -#define LIBFUZZER_MSVC 1 -#else -#define LIBFUZZER_MSVC 0 -#endif - -#ifndef __has_attribute -# define __has_attribute(x) 0 -#endif - -#define LIBFUZZER_POSIX \ - (LIBFUZZER_APPLE || LIBFUZZER_LINUX || LIBFUZZER_NETBSD || \ - LIBFUZZER_FREEBSD || LIBFUZZER_OPENBSD || LIBFUZZER_EMSCRIPTEN) - -#ifdef __x86_64 -# if __has_attribute(target) -# define ATTRIBUTE_TARGET_POPCNT __attribute__((target("popcnt"))) -# else -# define ATTRIBUTE_TARGET_POPCNT -# endif -#else -# define ATTRIBUTE_TARGET_POPCNT -#endif - - -#ifdef __clang__ // avoid gcc warning. -# if __has_attribute(no_sanitize) -# define ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize("memory"))) -# else -# define ATTRIBUTE_NO_SANITIZE_MEMORY -# endif -# define ALWAYS_INLINE __attribute__((always_inline)) -#else -# define ATTRIBUTE_NO_SANITIZE_MEMORY -# define ALWAYS_INLINE -#endif // __clang__ - -#if LIBFUZZER_WINDOWS -#define ATTRIBUTE_NO_SANITIZE_ADDRESS -#else -#define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address)) -#endif - -#if LIBFUZZER_WINDOWS -#define ATTRIBUTE_ALIGNED(X) __declspec(align(X)) -#define ATTRIBUTE_INTERFACE __declspec(dllexport) -// This is used for __sancov_lowest_stack which is needed for -// -fsanitize-coverage=stack-depth. That feature is not yet available on -// Windows, so make the symbol static to avoid linking errors. -#define ATTRIBUTES_INTERFACE_TLS_INITIAL_EXEC static -#define ATTRIBUTE_NOINLINE __declspec(noinline) -#else -#define ATTRIBUTE_ALIGNED(X) __attribute__((aligned(X))) -#define ATTRIBUTE_INTERFACE __attribute__((visibility("default"))) -#define ATTRIBUTES_INTERFACE_TLS_INITIAL_EXEC \ - ATTRIBUTE_INTERFACE __attribute__((tls_model("initial-exec"))) thread_local - -#define ATTRIBUTE_NOINLINE __attribute__((noinline)) -#endif - -#if defined(__has_feature) -# if __has_feature(address_sanitizer) -# define ATTRIBUTE_NO_SANITIZE_ALL ATTRIBUTE_NO_SANITIZE_ADDRESS -# elif __has_feature(memory_sanitizer) -# define ATTRIBUTE_NO_SANITIZE_ALL ATTRIBUTE_NO_SANITIZE_MEMORY -# else -# define ATTRIBUTE_NO_SANITIZE_ALL -# endif -#else -# define ATTRIBUTE_NO_SANITIZE_ALL -#endif - namespace fuzzer { template <class T> T Min(T a, T b) { return a < b ? a : b; } diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerDriver.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerDriver.cpp index dd3cab0ee8d..a847c76e292 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerDriver.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerDriver.cpp @@ -16,6 +16,7 @@ #include "FuzzerInternal.h" #include "FuzzerMerge.h" #include "FuzzerMutate.h" +#include "FuzzerPlatform.h" #include "FuzzerRandom.h" #include "FuzzerTracePC.h" #include <algorithm> @@ -195,8 +196,11 @@ static void ParseFlags(const Vector<std::string> &Args, } // Disable len_control by default, if LLVMFuzzerCustomMutator is used. - if (EF->LLVMFuzzerCustomMutator) + if (EF->LLVMFuzzerCustomMutator) { Flags.len_control = 0; + Printf("INFO: found LLVMFuzzerCustomMutator (%p). " + "Disabling -len_control by default.\n", EF->LLVMFuzzerCustomMutator); + } Inputs = new Vector<std::string>; for (size_t A = 1; A < Args.size(); A++) { @@ -303,8 +307,7 @@ static bool AllInputsAreFiles() { return true; } -static std::string GetDedupTokenFromFile(const std::string &Path) { - auto S = FileToString(Path); +static std::string GetDedupTokenFromCmdOutput(const std::string &S) { auto Beg = S.find("DEDUP_TOKEN:"); if (Beg == std::string::npos) return ""; @@ -329,10 +332,9 @@ int CleanseCrashInput(const Vector<std::string> &Args, assert(Cmd.hasArgument(InputFilePath)); Cmd.removeArgument(InputFilePath); - auto LogFilePath = TempPath(".txt"); - auto TmpFilePath = TempPath(".repro"); + auto TmpFilePath = TempPath("CleanseCrashInput", ".repro"); Cmd.addArgument(TmpFilePath); - Cmd.setOutputFile(LogFilePath); + Cmd.setOutputFile(getDevNull()); Cmd.combineOutAndErr(); std::string CurrentFilePath = InputFilePath; @@ -367,7 +369,6 @@ int CleanseCrashInput(const Vector<std::string> &Args, } if (!Changed) break; } - RemoveFile(LogFilePath); return 0; } @@ -390,8 +391,6 @@ int MinimizeCrashInput(const Vector<std::string> &Args, BaseCmd.addFlag("max_total_time", "600"); } - auto LogFilePath = TempPath(".txt"); - BaseCmd.setOutputFile(LogFilePath); BaseCmd.combineOutAndErr(); std::string CurrentFilePath = InputFilePath; @@ -403,17 +402,17 @@ int MinimizeCrashInput(const Vector<std::string> &Args, Command Cmd(BaseCmd); Cmd.addArgument(CurrentFilePath); - std::string CommandLine = Cmd.toString(); - Printf("CRASH_MIN: executing: %s\n", CommandLine.c_str()); - int ExitCode = ExecuteCommand(Cmd); - if (ExitCode == 0) { + Printf("CRASH_MIN: executing: %s\n", Cmd.toString().c_str()); + std::string CmdOutput; + bool Success = ExecuteCommand(Cmd, &CmdOutput); + if (Success) { Printf("ERROR: the input %s did not crash\n", CurrentFilePath.c_str()); exit(1); } Printf("CRASH_MIN: '%s' (%zd bytes) caused a crash. Will try to minimize " "it further\n", CurrentFilePath.c_str(), U.size()); - auto DedupToken1 = GetDedupTokenFromFile(LogFilePath); + auto DedupToken1 = GetDedupTokenFromCmdOutput(CmdOutput); if (!DedupToken1.empty()) Printf("CRASH_MIN: DedupToken1: %s\n", DedupToken1.c_str()); @@ -423,11 +422,11 @@ int MinimizeCrashInput(const Vector<std::string> &Args, : Options.ArtifactPrefix + "minimized-from-" + Hash(U); Cmd.addFlag("minimize_crash_internal_step", "1"); Cmd.addFlag("exact_artifact_path", ArtifactPath); - CommandLine = Cmd.toString(); - Printf("CRASH_MIN: executing: %s\n", CommandLine.c_str()); - ExitCode = ExecuteCommand(Cmd); - CopyFileToErr(LogFilePath); - if (ExitCode == 0) { + Printf("CRASH_MIN: executing: %s\n", Cmd.toString().c_str()); + CmdOutput.clear(); + Success = ExecuteCommand(Cmd, &CmdOutput); + Printf("%s", CmdOutput.c_str()); + if (Success) { if (Flags.exact_artifact_path) { CurrentFilePath = Flags.exact_artifact_path; WriteToFile(U, CurrentFilePath); @@ -436,7 +435,7 @@ int MinimizeCrashInput(const Vector<std::string> &Args, CurrentFilePath.c_str(), U.size()); break; } - auto DedupToken2 = GetDedupTokenFromFile(LogFilePath); + auto DedupToken2 = GetDedupTokenFromCmdOutput(CmdOutput); if (!DedupToken2.empty()) Printf("CRASH_MIN: DedupToken2: %s\n", DedupToken2.c_str()); @@ -453,7 +452,6 @@ int MinimizeCrashInput(const Vector<std::string> &Args, CurrentFilePath = ArtifactPath; Printf("*********************************\n"); } - RemoveFile(LogFilePath); return 0; } @@ -488,7 +486,7 @@ void Merge(Fuzzer *F, FuzzingOptions &Options, const Vector<std::string> &Args, std::sort(OldCorpus.begin(), OldCorpus.end()); std::sort(NewCorpus.begin(), NewCorpus.end()); - std::string CFPath = CFPathOrNull ? CFPathOrNull : TempPath(".txt"); + std::string CFPath = CFPathOrNull ? CFPathOrNull : TempPath("Merge", ".txt"); Vector<std::string> NewFiles; Set<uint32_t> NewFeatures, NewCov; CrashResistantMerge(Args, OldCorpus, NewCorpus, &NewFiles, {}, &NewFeatures, @@ -711,6 +709,26 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) { Options.CollectDataFlow = Flags.collect_data_flow; if (Flags.stop_file) Options.StopFile = Flags.stop_file; + Options.Entropic = Flags.entropic; + Options.EntropicFeatureFrequencyThreshold = + (size_t)Flags.entropic_feature_frequency_threshold; + Options.EntropicNumberOfRarestFeatures = + (size_t)Flags.entropic_number_of_rarest_features; + if (Options.Entropic) { + if (!Options.FocusFunction.empty()) { + Printf("ERROR: The parameters `--entropic` and `--focus_function` cannot " + "be used together.\n"); + exit(1); + } + Printf("INFO: Running with entropic power schedule (0x%X, %d).\n", + Options.EntropicFeatureFrequencyThreshold, + Options.EntropicNumberOfRarestFeatures); + } + struct EntropicOptions Entropic; + Entropic.Enabled = Options.Entropic; + Entropic.FeatureFrequencyThreshold = + Options.EntropicFeatureFrequencyThreshold; + Entropic.NumberOfRarestFeatures = Options.EntropicNumberOfRarestFeatures; unsigned Seed = Flags.seed; // Initialize Seed. @@ -731,7 +749,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) { Random Rand(Seed); auto *MD = new MutationDispatcher(Rand, Options); - auto *Corpus = new InputCorpus(Options.OutputCorpus); + auto *Corpus = new InputCorpus(Options.OutputCorpus, Entropic); auto *F = new Fuzzer(Callback, *Corpus, *MD, Options); for (auto &U: Dictionary) diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerExtFunctionsDlsym.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerExtFunctionsDlsym.cpp index dcd71345948..95233d2a10d 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerExtFunctionsDlsym.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerExtFunctionsDlsym.cpp @@ -10,7 +10,7 @@ // requires that clients of LibFuzzer pass ``--export-dynamic`` to the linker. // That is a complication we don't wish to expose to clients right now. //===----------------------------------------------------------------------===// -#include "FuzzerDefs.h" +#include "FuzzerPlatform.h" #if LIBFUZZER_APPLE #include "FuzzerExtFunctions.h" diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWeak.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWeak.cpp index d56dab36c64..24ddc57d47d 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWeak.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWeak.cpp @@ -11,7 +11,7 @@ // weak symbols to be undefined. That is a complication we don't want to expose // to clients right now. //===----------------------------------------------------------------------===// -#include "FuzzerDefs.h" +#include "FuzzerPlatform.h" #if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FUCHSIA || \ LIBFUZZER_FREEBSD || LIBFUZZER_OPENBSD || LIBFUZZER_EMSCRIPTEN diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWindows.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWindows.cpp index 55efe8f80e9..688bad1d51c 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWindows.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWindows.cpp @@ -9,7 +9,7 @@ // compiled with MSVC. Uses weak aliases when compiled with clang. Unfortunately // the method each compiler supports is not supported by the other. //===----------------------------------------------------------------------===// -#include "FuzzerDefs.h" +#include "FuzzerPlatform.h" #if LIBFUZZER_WINDOWS #include "FuzzerExtFunctions.h" diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp index b2face77820..d36beba1b1b 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp @@ -8,7 +8,8 @@ // Extra coverage counters defined by user code. //===----------------------------------------------------------------------===// -#include "FuzzerDefs.h" +#include "FuzzerPlatform.h" +#include <cstdint> #if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FREEBSD || \ LIBFUZZER_OPENBSD || LIBFUZZER_FUCHSIA || LIBFUZZER_EMSCRIPTEN diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerFlags.def b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerFlags.def index a6741574303..832224a705d 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerFlags.def +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerFlags.def @@ -22,7 +22,7 @@ FUZZER_FLAG_INT(len_control, 100, "Try generating small inputs first, " "size up to max_len. Default value is 0, if LLVMFuzzerCustomMutator is used.") FUZZER_FLAG_STRING(seed_inputs, "A comma-separated list of input files " "to use as an additional seed corpus. Alternatively, an \"@\" followed by " - "the name of a file containing the comma-seperated list.") + "the name of a file containing the comma-separated list.") FUZZER_FLAG_INT(cross_over, 1, "If 1, cross over inputs.") FUZZER_FLAG_INT(mutate_depth, 5, "Apply this number of consecutive mutations to each input.") @@ -153,6 +153,14 @@ FUZZER_FLAG_STRING(focus_function, "Experimental. " "Fuzzing will focus on inputs that trigger calls to this function. " "If -focus_function=auto and -data_flow_trace is used, libFuzzer " "will choose the focus functions automatically.") +FUZZER_FLAG_INT(entropic, 0, "Experimental. Enables entropic power schedule.") +FUZZER_FLAG_INT(entropic_feature_frequency_threshold, 0xFF, "Experimental. If " + "entropic is enabled, all features which are observed less often than " + "the specified value are considered as rare.") +FUZZER_FLAG_INT(entropic_number_of_rarest_features, 100, "Experimental. If " + "entropic is enabled, we keep track of the frequencies only for the " + "Top-X least abundant features (union features that are considered as " + "rare).") FUZZER_FLAG_INT(analyze_dict, 0, "Experimental") FUZZER_DEPRECATED_FLAG(use_clang_coverage) diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerFork.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerFork.cpp index 95ed3655146..d9e6b79443e 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerFork.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerFork.cpp @@ -297,7 +297,7 @@ void FuzzWithFork(Random &Rand, const FuzzingOptions &Options, for (auto &Dir : CorpusDirs) GetSizedFilesFromDir(Dir, &SeedFiles); std::sort(SeedFiles.begin(), SeedFiles.end()); - Env.TempDir = TempPath(".dir"); + Env.TempDir = TempPath("FuzzWithFork", ".dir"); Env.DFTDir = DirPlusFile(Env.TempDir, "DFT"); RmDirRecursive(Env.TempDir); // in case there is a leftover from old runs. MkDir(Env.TempDir); diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerIO.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerIO.cpp index f0708164be8..cbb1dbe1b86 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerIO.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerIO.cpp @@ -151,9 +151,9 @@ void RmDirRecursive(const std::string &Dir) { [](const std::string &Path) { RemoveFile(Path); }); } -std::string TempPath(const char *Extension) { - return DirPlusFile(TmpDir(), - "libFuzzerTemp." + std::to_string(GetPid()) + Extension); +std::string TempPath(const char *Prefix, const char *Extension) { + return DirPlusFile(TmpDir(), std::string("libFuzzerTemp.") + Prefix + + std::to_string(GetPid()) + Extension); } } // namespace fuzzer diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerIO.h b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerIO.h index ae8dd24e373..6e4368b971f 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerIO.h +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerIO.h @@ -42,7 +42,7 @@ std::string DirName(const std::string &FileName); // Returns path to a TmpDir. std::string TmpDir(); -std::string TempPath(const char *Extension); +std::string TempPath(const char *Prefix, const char *Extension); bool IsInterestingCoverageFile(const std::string &FileName); diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp index fcd9b8d8b9c..aac85b08727 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// // IO functions implementation using Posix API. //===----------------------------------------------------------------------===// -#include "FuzzerDefs.h" +#include "FuzzerPlatform.h" #if LIBFUZZER_POSIX || LIBFUZZER_FUCHSIA #include "FuzzerExtFunctions.h" diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp index 56757aa09a3..651283a551c 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// // IO functions implementation for Windows. //===----------------------------------------------------------------------===// -#include "FuzzerDefs.h" +#include "FuzzerPlatform.h" #if LIBFUZZER_WINDOWS #include "FuzzerExtFunctions.h" diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerLoop.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerLoop.cpp index 451a4c17316..02db6d27b0a 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerLoop.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerLoop.cpp @@ -12,6 +12,7 @@ #include "FuzzerIO.h" #include "FuzzerInternal.h" #include "FuzzerMutate.h" +#include "FuzzerPlatform.h" #include "FuzzerRandom.h" #include "FuzzerTracePC.h" #include <algorithm> @@ -256,7 +257,7 @@ void Fuzzer::ExitCallback() { void Fuzzer::MaybeExitGracefully() { if (!F->GracefulExitRequested) return; Printf("==%lu== INFO: libFuzzer: exiting as requested\n", GetPid()); - RmDirRecursive(TempPath(".dir")); + RmDirRecursive(TempPath("FuzzWithFork", ".dir")); F->PrintFinalStats(); _Exit(0); } @@ -265,7 +266,7 @@ void Fuzzer::InterruptCallback() { Printf("==%lu== libFuzzer: run interrupted; exiting\n", GetPid()); PrintFinalStats(); ScopedDisableMsanInterceptorChecks S; // RmDirRecursive may call opendir(). - RmDirRecursive(TempPath(".dir")); + RmDirRecursive(TempPath("FuzzWithFork", ".dir")); // Stop right now, don't perform any at-exit actions. _Exit(Options.InterruptExitCode); } @@ -475,6 +476,8 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile, TPC.CollectFeatures([&](size_t Feature) { if (Corpus.AddFeature(Feature, Size, Options.Shrink)) UniqFeatureSetTmp.push_back(Feature); + if (Options.Entropic) + Corpus.UpdateFeatureFrequency(II, Feature); if (Options.ReduceInputs && II) if (std::binary_search(II->UniqFeatureSet.begin(), II->UniqFeatureSet.end(), Feature)) @@ -693,6 +696,7 @@ void Fuzzer::MutateAndTestOne() { assert(NewSize <= CurrentMaxMutationLen && "Mutator return oversized unit"); Size = NewSize; II.NumExecutedMutations++; + Corpus.IncrementNumExecutedMutations(); bool FoundUniqFeatures = false; bool NewCov = RunOne(CurrentUnitData, Size, /*MayDeleteFile=*/true, &II, @@ -706,6 +710,8 @@ void Fuzzer::MutateAndTestOne() { if (Options.ReduceDepth && !FoundUniqFeatures) break; } + + II.NeedsEnergyUpdate = true; } void Fuzzer::PurgeAllocator() { @@ -770,12 +776,14 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) { } PrintStats("INITED"); - if (!Options.FocusFunction.empty()) + if (!Options.FocusFunction.empty()) { Printf("INFO: %zd/%zd inputs touch the focus function\n", Corpus.NumInputsThatTouchFocusFunction(), Corpus.size()); - if (!Options.DataFlowTrace.empty()) - Printf("INFO: %zd/%zd inputs have the Data Flow Trace\n", - Corpus.NumInputsWithDataFlowTrace(), Corpus.size()); + if (!Options.DataFlowTrace.empty()) + Printf("INFO: %zd/%zd inputs have the Data Flow Trace\n", + Corpus.NumInputsWithDataFlowTrace(), + Corpus.NumInputsThatTouchFocusFunction()); + } if (Corpus.empty() && Options.MaxNumberOfRuns) { Printf("ERROR: no interesting inputs were found. " diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerMain.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerMain.cpp index 771a34aed31..75f2f8e75c9 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerMain.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerMain.cpp @@ -9,6 +9,7 @@ //===----------------------------------------------------------------------===// #include "FuzzerDefs.h" +#include "FuzzerPlatform.h" extern "C" { // This function should be defined by the user. diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerMerge.h b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerMerge.h index c14dd589e62..e0c6bc539bd 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerMerge.h +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerMerge.h @@ -13,7 +13,7 @@ // The process should tolerate the crashes, OOMs, leaks, etc. // // Algorithm: -// The outter process collects the set of files and writes their names +// The outer process collects the set of files and writes their names // into a temporary "control" file, then repeatedly launches the inner // process until all inputs are processed. // The outer process does not actually execute the target code. @@ -22,13 +22,14 @@ // and b) the last processed input. Then it starts processing the inputs one // by one. Before processing every input it writes one line to control file: // STARTED INPUT_ID INPUT_SIZE -// After processing an input it write another line: -// DONE INPUT_ID Feature1 Feature2 Feature3 ... +// After processing an input it writes the following lines: +// FT INPUT_ID Feature1 Feature2 Feature3 ... +// COV INPUT_ID Coverage1 Coverage2 Coverage3 ... // If a crash happens while processing an input the last line in the control // file will be "STARTED INPUT_ID" and so the next process will know // where to resume. // -// Once all inputs are processed by the innner process(es) the outer process +// Once all inputs are processed by the inner process(es) the outer process // reads the control files and does the merge based entirely on the contents // of control file. // It uses a single pass greedy algorithm choosing first the smallest inputs diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerOptions.h b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerOptions.h index beecc980380..9d975bd61fe 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerOptions.h +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerOptions.h @@ -44,6 +44,9 @@ struct FuzzingOptions { size_t MaxNumberOfRuns = -1L; int ReportSlowUnits = 10; bool OnlyASCII = false; + bool Entropic = false; + size_t EntropicFeatureFrequencyThreshold = 0xFF; + size_t EntropicNumberOfRarestFeatures = 100; std::string OutputCorpus; std::string ArtifactPrefix = "./"; std::string ExactArtifactPath; diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerPlatform.h b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerPlatform.h new file mode 100644 index 00000000000..8befdb882cc --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerPlatform.h @@ -0,0 +1,163 @@ +//===-- FuzzerPlatform.h --------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// Common platform macros. +//===----------------------------------------------------------------------===// + +#ifndef LLVM_FUZZER_PLATFORM_H +#define LLVM_FUZZER_PLATFORM_H + +// Platform detection. +#ifdef __linux__ +#define LIBFUZZER_APPLE 0 +#define LIBFUZZER_FUCHSIA 0 +#define LIBFUZZER_LINUX 1 +#define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 0 +#define LIBFUZZER_WINDOWS 0 +#define LIBFUZZER_EMSCRIPTEN 0 +#elif __APPLE__ +#define LIBFUZZER_APPLE 1 +#define LIBFUZZER_FUCHSIA 0 +#define LIBFUZZER_LINUX 0 +#define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 0 +#define LIBFUZZER_WINDOWS 0 +#define LIBFUZZER_EMSCRIPTEN 0 +#elif __NetBSD__ +#define LIBFUZZER_APPLE 0 +#define LIBFUZZER_FUCHSIA 0 +#define LIBFUZZER_LINUX 0 +#define LIBFUZZER_NETBSD 1 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 0 +#define LIBFUZZER_WINDOWS 0 +#define LIBFUZZER_EMSCRIPTEN 0 +#elif __FreeBSD__ +#define LIBFUZZER_APPLE 0 +#define LIBFUZZER_FUCHSIA 0 +#define LIBFUZZER_LINUX 0 +#define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 1 +#define LIBFUZZER_OPENBSD 0 +#define LIBFUZZER_WINDOWS 0 +#define LIBFUZZER_EMSCRIPTEN 0 +#elif __OpenBSD__ +#define LIBFUZZER_APPLE 0 +#define LIBFUZZER_FUCHSIA 0 +#define LIBFUZZER_LINUX 0 +#define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 1 +#define LIBFUZZER_WINDOWS 0 +#define LIBFUZZER_EMSCRIPTEN 0 +#elif _WIN32 +#define LIBFUZZER_APPLE 0 +#define LIBFUZZER_FUCHSIA 0 +#define LIBFUZZER_LINUX 0 +#define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 0 +#define LIBFUZZER_WINDOWS 1 +#define LIBFUZZER_EMSCRIPTEN 0 +#elif __Fuchsia__ +#define LIBFUZZER_APPLE 0 +#define LIBFUZZER_FUCHSIA 1 +#define LIBFUZZER_LINUX 0 +#define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 0 +#define LIBFUZZER_WINDOWS 0 +#define LIBFUZZER_EMSCRIPTEN 0 +#elif __EMSCRIPTEN__ +#define LIBFUZZER_APPLE 0 +#define LIBFUZZER_FUCHSIA 0 +#define LIBFUZZER_LINUX 0 +#define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 0 +#define LIBFUZZER_WINDOWS 0 +#define LIBFUZZER_EMSCRIPTEN 1 +#else +#error "Support for your platform has not been implemented" +#endif + +#if defined(_MSC_VER) && !defined(__clang__) +// MSVC compiler is being used. +#define LIBFUZZER_MSVC 1 +#else +#define LIBFUZZER_MSVC 0 +#endif + +#ifndef __has_attribute +#define __has_attribute(x) 0 +#endif + +#define LIBFUZZER_POSIX \ + (LIBFUZZER_APPLE || LIBFUZZER_LINUX || LIBFUZZER_NETBSD || \ + LIBFUZZER_FREEBSD || LIBFUZZER_OPENBSD || LIBFUZZER_EMSCRIPTEN) + +#ifdef __x86_64 +#if __has_attribute(target) +#define ATTRIBUTE_TARGET_POPCNT __attribute__((target("popcnt"))) +#else +#define ATTRIBUTE_TARGET_POPCNT +#endif +#else +#define ATTRIBUTE_TARGET_POPCNT +#endif + +#ifdef __clang__ // avoid gcc warning. +#if __has_attribute(no_sanitize) +#define ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize("memory"))) +#else +#define ATTRIBUTE_NO_SANITIZE_MEMORY +#endif +#define ALWAYS_INLINE __attribute__((always_inline)) +#else +#define ATTRIBUTE_NO_SANITIZE_MEMORY +#define ALWAYS_INLINE +#endif // __clang__ + +#if LIBFUZZER_WINDOWS +#define ATTRIBUTE_NO_SANITIZE_ADDRESS +#else +#define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address)) +#endif + +#if LIBFUZZER_WINDOWS +#define ATTRIBUTE_ALIGNED(X) __declspec(align(X)) +#define ATTRIBUTE_INTERFACE __declspec(dllexport) +// This is used for __sancov_lowest_stack which is needed for +// -fsanitize-coverage=stack-depth. That feature is not yet available on +// Windows, so make the symbol static to avoid linking errors. +#define ATTRIBUTES_INTERFACE_TLS_INITIAL_EXEC static +#define ATTRIBUTE_NOINLINE __declspec(noinline) +#else +#define ATTRIBUTE_ALIGNED(X) __attribute__((aligned(X))) +#define ATTRIBUTE_INTERFACE __attribute__((visibility("default"))) +#define ATTRIBUTES_INTERFACE_TLS_INITIAL_EXEC \ + ATTRIBUTE_INTERFACE __attribute__((tls_model("initial-exec"))) thread_local + +#define ATTRIBUTE_NOINLINE __attribute__((noinline)) +#endif + +#if defined(__has_feature) +#if __has_feature(address_sanitizer) +#define ATTRIBUTE_NO_SANITIZE_ALL ATTRIBUTE_NO_SANITIZE_ADDRESS +#elif __has_feature(memory_sanitizer) +#define ATTRIBUTE_NO_SANITIZE_ALL ATTRIBUTE_NO_SANITIZE_MEMORY +#else +#define ATTRIBUTE_NO_SANITIZE_ALL +#endif +#else +#define ATTRIBUTE_NO_SANITIZE_ALL +#endif + +#endif // LLVM_FUZZER_PLATFORM_H diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerSHA1.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerSHA1.cpp index 43e5e78cd78..2005dc70030 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerSHA1.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerSHA1.cpp @@ -17,6 +17,7 @@ #include "FuzzerSHA1.h" #include "FuzzerDefs.h" +#include "FuzzerPlatform.h" /* This code is public-domain - it is based on libcrypt * placed in the public domain by Wei Dai and other contributors. diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp index f03be7a3950..b2ca7693e54 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp @@ -19,6 +19,7 @@ #include "FuzzerDictionary.h" #include "FuzzerExtFunctions.h" #include "FuzzerIO.h" +#include "FuzzerPlatform.h" #include "FuzzerUtil.h" #include "FuzzerValueBitMap.h" #include <set> @@ -240,7 +241,9 @@ void TracePC::IterateCoveredFunctions(CallBack CB) { void TracePC::SetFocusFunction(const std::string &FuncName) { // This function should be called once. assert(!FocusFunctionCounterPtr); - if (FuncName.empty()) + // "auto" is not a valid function name. If this function is called with "auto" + // that means the auto focus functionality failed. + if (FuncName.empty() || FuncName == "auto") return; for (size_t M = 0; M < NumModules; M++) { auto &PCTE = ModulePCTable[M]; @@ -256,6 +259,10 @@ void TracePC::SetFocusFunction(const std::string &FuncName) { return; } } + + Printf("ERROR: Failed to set focus function. Make sure the function name is " + "valid (%s) and symbolization is enabled.\n", FuncName.c_str()); + exit(1); } bool TracePC::ObservedFocusFunction() { diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtil.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtil.cpp index 7aa84a1faad..7eecb68d072 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtil.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtil.cpp @@ -151,32 +151,37 @@ bool ParseDictionaryFile(const std::string &Text, Vector<Unit> *Units) { return true; } +// Code duplicated (and tested) in llvm/include/llvm/Support/Base64.h std::string Base64(const Unit &U) { static const char Table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz" "0123456789+/"; - std::string Res; - size_t i; - for (i = 0; i + 2 < U.size(); i += 3) { - uint32_t x = (U[i] << 16) + (U[i + 1] << 8) + U[i + 2]; - Res += Table[(x >> 18) & 63]; - Res += Table[(x >> 12) & 63]; - Res += Table[(x >> 6) & 63]; - Res += Table[x & 63]; + std::string Buffer; + Buffer.resize(((U.size() + 2) / 3) * 4); + + size_t i = 0, j = 0; + for (size_t n = U.size() / 3 * 3; i < n; i += 3, j += 4) { + uint32_t x = ((unsigned char)U[i] << 16) | ((unsigned char)U[i + 1] << 8) | + (unsigned char)U[i + 2]; + Buffer[j + 0] = Table[(x >> 18) & 63]; + Buffer[j + 1] = Table[(x >> 12) & 63]; + Buffer[j + 2] = Table[(x >> 6) & 63]; + Buffer[j + 3] = Table[x & 63]; } if (i + 1 == U.size()) { - uint32_t x = (U[i] << 16); - Res += Table[(x >> 18) & 63]; - Res += Table[(x >> 12) & 63]; - Res += "=="; + uint32_t x = ((unsigned char)U[i] << 16); + Buffer[j + 0] = Table[(x >> 18) & 63]; + Buffer[j + 1] = Table[(x >> 12) & 63]; + Buffer[j + 2] = '='; + Buffer[j + 3] = '='; } else if (i + 2 == U.size()) { - uint32_t x = (U[i] << 16) + (U[i + 1] << 8); - Res += Table[(x >> 18) & 63]; - Res += Table[(x >> 12) & 63]; - Res += Table[(x >> 6) & 63]; - Res += "="; + uint32_t x = ((unsigned char)U[i] << 16) | ((unsigned char)U[i + 1] << 8); + Buffer[j + 0] = Table[(x >> 18) & 63]; + Buffer[j + 1] = Table[(x >> 12) & 63]; + Buffer[j + 2] = Table[(x >> 6) & 63]; + Buffer[j + 3] = '='; } - return Res; + return Buffer; } static std::mutex SymbolizeMutex; diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtil.h b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtil.h index 00ea6550646..4ae35838306 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtil.h +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtil.h @@ -57,8 +57,11 @@ unsigned long GetPid(); size_t GetPeakRSSMb(); int ExecuteCommand(const Command &Cmd); +bool ExecuteCommand(const Command &Cmd, std::string *CmdOutput); +// Fuchsia does not have popen/pclose. FILE *OpenProcessPipe(const char *Command, const char *Mode); +int CloseProcessPipe(FILE *F); const void *SearchMemory(const void *haystack, size_t haystacklen, const void *needle, size_t needlelen); diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp index d449bc248f0..a5bed658a44 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// // Misc utils for Darwin. //===----------------------------------------------------------------------===// -#include "FuzzerDefs.h" +#include "FuzzerPlatform.h" #if LIBFUZZER_APPLE #include "FuzzerCommand.h" #include "FuzzerIO.h" diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp index bde9f68d62a..190fb786664 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// // Misc utils implementation using Fuchsia/Zircon APIs. //===----------------------------------------------------------------------===// -#include "FuzzerDefs.h" +#include "FuzzerPlatform.h" #if LIBFUZZER_FUCHSIA @@ -34,6 +34,8 @@ #include <zircon/syscalls/object.h> #include <zircon/types.h> +#include <vector> + namespace fuzzer { // Given that Fuchsia doesn't have the POSIX signals that libFuzzer was written @@ -424,6 +426,17 @@ RunOnDestruction<Fn> at_scope_exit(Fn fn) { return RunOnDestruction<Fn>(fn); } +static fdio_spawn_action_t clone_fd_action(int localFd, int targetFd) { + return { + .action = FDIO_SPAWN_ACTION_CLONE_FD, + .fd = + { + .local_fd = localFd, + .target_fd = targetFd, + }, + }; +} + int ExecuteCommand(const Command &Cmd) { zx_status_t rc; @@ -442,17 +455,26 @@ int ExecuteCommand(const Command &Cmd) { // so write the log file(s) there. // However, we don't want to apply this logic for absolute paths. int FdOut = STDOUT_FILENO; + bool discardStdout = false; + bool discardStderr = false; + if (Cmd.hasOutputFile()) { std::string Path = Cmd.getOutputFile(); - bool IsAbsolutePath = Path.length() > 1 && Path[0] == '/'; - if (!IsAbsolutePath && Cmd.hasFlag("artifact_prefix")) - Path = Cmd.getFlagValue("artifact_prefix") + "/" + Path; - - FdOut = open(Path.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0); - if (FdOut == -1) { - Printf("libFuzzer: failed to open %s: %s\n", Path.c_str(), - strerror(errno)); - return ZX_ERR_IO; + if (Path == getDevNull()) { + // On Fuchsia, there's no "/dev/null" like-file, so we + // just don't copy the FDs into the spawned process. + discardStdout = true; + } else { + bool IsAbsolutePath = Path.length() > 1 && Path[0] == '/'; + if (!IsAbsolutePath && Cmd.hasFlag("artifact_prefix")) + Path = Cmd.getFlagValue("artifact_prefix") + "/" + Path; + + FdOut = open(Path.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0); + if (FdOut == -1) { + Printf("libFuzzer: failed to open %s: %s\n", Path.c_str(), + strerror(errno)); + return ZX_ERR_IO; + } } } auto CloseFdOut = at_scope_exit([FdOut]() { @@ -462,43 +484,29 @@ int ExecuteCommand(const Command &Cmd) { // Determine stderr int FdErr = STDERR_FILENO; - if (Cmd.isOutAndErrCombined()) + if (Cmd.isOutAndErrCombined()) { FdErr = FdOut; + if (discardStdout) + discardStderr = true; + } // Clone the file descriptors into the new process - fdio_spawn_action_t SpawnAction[] = { - { - .action = FDIO_SPAWN_ACTION_CLONE_FD, - .fd = - { - .local_fd = STDIN_FILENO, - .target_fd = STDIN_FILENO, - }, - }, - { - .action = FDIO_SPAWN_ACTION_CLONE_FD, - .fd = - { - .local_fd = FdOut, - .target_fd = STDOUT_FILENO, - }, - }, - { - .action = FDIO_SPAWN_ACTION_CLONE_FD, - .fd = - { - .local_fd = FdErr, - .target_fd = STDERR_FILENO, - }, - }, - }; + std::vector<fdio_spawn_action_t> SpawnActions; + SpawnActions.push_back(clone_fd_action(STDIN_FILENO, STDIN_FILENO)); + + if (!discardStdout) + SpawnActions.push_back(clone_fd_action(FdOut, STDOUT_FILENO)); + if (!discardStderr) + SpawnActions.push_back(clone_fd_action(FdErr, STDERR_FILENO)); // Start the process. char ErrorMsg[FDIO_SPAWN_ERR_MSG_MAX_LENGTH]; zx_handle_t ProcessHandle = ZX_HANDLE_INVALID; - rc = fdio_spawn_etc( - ZX_HANDLE_INVALID, FDIO_SPAWN_CLONE_ALL & (~FDIO_SPAWN_CLONE_STDIO), - Argv[0], Argv.get(), nullptr, 3, SpawnAction, &ProcessHandle, ErrorMsg); + rc = fdio_spawn_etc(ZX_HANDLE_INVALID, + FDIO_SPAWN_CLONE_ALL & (~FDIO_SPAWN_CLONE_STDIO), Argv[0], + Argv.get(), nullptr, SpawnActions.size(), + SpawnActions.data(), &ProcessHandle, ErrorMsg); + if (rc != ZX_OK) { Printf("libFuzzer: failed to launch '%s': %s, %s\n", Argv[0], ErrorMsg, _zx_status_get_string(rc)); @@ -525,6 +533,16 @@ int ExecuteCommand(const Command &Cmd) { return Info.return_code; } +bool ExecuteCommand(const Command &BaseCmd, std::string *CmdOutput) { + auto LogFilePath = TempPath("SimPopenOut", ".txt"); + Command Cmd(BaseCmd); + Cmd.setOutputFile(LogFilePath); + int Ret = ExecuteCommand(Cmd); + *CmdOutput = FileToString(LogFilePath); + RemoveFile(LogFilePath); + return Ret == 0; +} + const void *SearchMemory(const void *Data, size_t DataLen, const void *Patt, size_t PattLen) { return memmem(Data, DataLen, Patt, PattLen); diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp index 993023e7039..95490b992e0 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// // Misc utils for Linux. //===----------------------------------------------------------------------===// -#include "FuzzerDefs.h" +#include "FuzzerPlatform.h" #if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FREEBSD || \ LIBFUZZER_OPENBSD || LIBFUZZER_EMSCRIPTEN #include "FuzzerCommand.h" diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp index 8048e6a8afd..fc57b724db1 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// // Misc utils implementation using Posix API. //===----------------------------------------------------------------------===// -#include "FuzzerDefs.h" +#include "FuzzerPlatform.h" #if LIBFUZZER_POSIX #include "FuzzerIO.h" #include "FuzzerInternal.h" @@ -86,6 +86,20 @@ static void SetSigaction(int signum, } } +// Return true on success, false otherwise. +bool ExecuteCommand(const Command &Cmd, std::string *CmdOutput) { + FILE *Pipe = popen(Cmd.toString().c_str(), "r"); + if (!Pipe) + return false; + + if (CmdOutput) { + char TmpBuffer[128]; + while (fgets(TmpBuffer, sizeof(TmpBuffer), Pipe)) + CmdOutput->append(TmpBuffer); + } + return pclose(Pipe) == 0; +} + void SetTimer(int Seconds) { struct itimerval T { {Seconds, 0}, { Seconds, 0 } @@ -149,6 +163,10 @@ FILE *OpenProcessPipe(const char *Command, const char *Mode) { return popen(Command, Mode); } +int CloseProcessPipe(FILE *F) { + return pclose(F); +} + const void *SearchMemory(const void *Data, size_t DataLen, const void *Patt, size_t PattLen) { return memmem(Data, DataLen, Patt, PattLen); diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp index 527e7dbd1cf..6c693e3d7ee 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// // Misc utils implementation for Windows. //===----------------------------------------------------------------------===// -#include "FuzzerDefs.h" +#include "FuzzerPlatform.h" #if LIBFUZZER_WINDOWS #include "FuzzerCommand.h" #include "FuzzerIO.h" @@ -152,11 +152,28 @@ FILE *OpenProcessPipe(const char *Command, const char *Mode) { return _popen(Command, Mode); } +int CloseProcessPipe(FILE *F) { + return _pclose(F); +} + int ExecuteCommand(const Command &Cmd) { std::string CmdLine = Cmd.toString(); return system(CmdLine.c_str()); } +bool ExecuteCommand(const Command &Cmd, std::string *CmdOutput) { + FILE *Pipe = _popen(Cmd.toString().c_str(), "r"); + if (!Pipe) + return false; + + if (CmdOutput) { + char TmpBuffer[128]; + while (fgets(TmpBuffer, sizeof(TmpBuffer), Pipe)) + CmdOutput->append(TmpBuffer); + } + return _pclose(Pipe) == 0; +} + const void *SearchMemory(const void *Data, size_t DataLen, const void *Patt, size_t PattLen) { // TODO: make this implementation more efficient. diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerValueBitMap.h b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerValueBitMap.h index bc039f1df27..ddbfe200af9 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerValueBitMap.h +++ b/gnu/llvm/compiler-rt/lib/fuzzer/FuzzerValueBitMap.h @@ -11,7 +11,8 @@ #ifndef LLVM_FUZZER_VALUE_BIT_MAP_H #define LLVM_FUZZER_VALUE_BIT_MAP_H -#include "FuzzerDefs.h" +#include "FuzzerPlatform.h" +#include <cstdint> namespace fuzzer { diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/afl/afl_driver.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/afl/afl_driver.cpp index bb3b48f3672..457f180ecc8 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/afl/afl_driver.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/afl/afl_driver.cpp @@ -111,7 +111,7 @@ static uint8_t AflInputBuf[kMaxAflInputSize]; // Use this optionally defined function to output sanitizer messages even if // user asks to close stderr. -__attribute__((weak)) extern "C" void __sanitizer_set_report_fd(void *); +extern "C" __attribute__((weak)) void __sanitizer_set_report_fd(void *); // Keep track of where stderr content is being written to, so that // dup_and_close_stderr can use the correct one. diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/tests/FuzzedDataProviderUnittest.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/tests/FuzzedDataProviderUnittest.cpp index 222283434eb..99d9d8ecbe9 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/tests/FuzzedDataProviderUnittest.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/tests/FuzzedDataProviderUnittest.cpp @@ -190,14 +190,26 @@ TEST(FuzzedDataProvider, ConsumeRandomLengthString) { "\x1D\xBD\x4E\x17\x04\x1E\xBA\x26\xAC\x1F\xE3\x37\x1C\x15\x43" "\x60\x41\x2A\x7C\xCA\x70\xCE\xAB\x20\x24\xF8\xD9\x1F\x14\x7C"), DataProv.ConsumeRandomLengthString(31337)); - EXPECT_EQ(std::string(Data + 141, Data + 141 + 5), + size_t Offset = 141; + EXPECT_EQ(std::string(Data + Offset, Data + Offset + 5), DataProv.ConsumeRandomLengthString(5)); - EXPECT_EQ(std::string(Data + 141 + 5, Data + 141 + 5 + 2), + Offset += 5; + EXPECT_EQ(std::string(Data + Offset, Data + Offset + 2), DataProv.ConsumeRandomLengthString(2)); + Offset += 2; + + // Call the overloaded method without arguments (uses max length available). + EXPECT_EQ(std::string(Data + Offset, Data + Offset + 664), + DataProv.ConsumeRandomLengthString()); + Offset += 664 + 2; // +2 because of '\' character followed by any other byte. + + EXPECT_EQ(std::string(Data + Offset, Data + Offset + 92), + DataProv.ConsumeRandomLengthString()); + Offset += 92 + 2; // Exhaust the buffer. auto String = DataProv.ConsumeBytesAsString(31337); - EXPECT_EQ(size_t(876), String.length()); + EXPECT_EQ(size_t(116), String.length()); EXPECT_EQ(std::string(), DataProv.ConsumeRandomLengthString(1)); } @@ -399,6 +411,25 @@ TEST(FuzzedDataProvider, ConsumeFloatingPoint) { -13.37, 31.337)); } +TEST(FuzzedDataProvider, ConsumeData) { + FuzzedDataProvider DataProv(Data, sizeof(Data)); + uint8_t Buffer[10] = {}; + EXPECT_EQ(sizeof(Buffer), DataProv.ConsumeData(Buffer, sizeof(Buffer))); + std::vector<uint8_t> Expected(Data, Data + sizeof(Buffer)); + EXPECT_EQ(Expected, std::vector<uint8_t>(Buffer, Buffer + sizeof(Buffer))); + + EXPECT_EQ(size_t(2), DataProv.ConsumeData(Buffer, 2)); + Expected[0] = Data[sizeof(Buffer)]; + Expected[1] = Data[sizeof(Buffer) + 1]; + EXPECT_EQ(Expected, std::vector<uint8_t>(Buffer, Buffer + sizeof(Buffer))); + + // Exhaust the buffer. + EXPECT_EQ(std::vector<uint8_t>(Data + 12, Data + sizeof(Data)), + DataProv.ConsumeRemainingBytes<uint8_t>()); + EXPECT_EQ(size_t(0), DataProv.ConsumeData(Buffer, sizeof(Buffer))); + EXPECT_EQ(Expected, std::vector<uint8_t>(Buffer, Buffer + sizeof(Buffer))); +} + int main(int argc, char **argv) { testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); diff --git a/gnu/llvm/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp b/gnu/llvm/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp index 7fc4b9a55b0..0e9435ab8fc 100644 --- a/gnu/llvm/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp +++ b/gnu/llvm/compiler-rt/lib/fuzzer/tests/FuzzerUnittest.cpp @@ -592,7 +592,8 @@ TEST(FuzzerUtil, Base64) { TEST(Corpus, Distribution) { DataFlowTrace DFT; Random Rand(0); - std::unique_ptr<InputCorpus> C(new InputCorpus("")); + struct EntropicOptions Entropic = {false, 0xFF, 100}; + std::unique_ptr<InputCorpus> C(new InputCorpus("", Entropic)); size_t N = 10; size_t TriesPerUnit = 1<<16; for (size_t i = 0; i < N; i++) @@ -1050,6 +1051,68 @@ TEST(FuzzerCommand, SetOutput) { EXPECT_EQ(CmdLine, makeCmdLine("", ">thud 2>&1")); } +TEST(Entropic, UpdateFrequency) { + const size_t One = 1, Two = 2; + const size_t FeatIdx1 = 0, FeatIdx2 = 42, FeatIdx3 = 12, FeatIdx4 = 26; + size_t Index; + // Create input corpus with default entropic configuration + struct EntropicOptions Entropic = {true, 0xFF, 100}; + std::unique_ptr<InputCorpus> C(new InputCorpus("", Entropic)); + std::unique_ptr<InputInfo> II(new InputInfo()); + + C->AddRareFeature(FeatIdx1); + C->UpdateFeatureFrequency(II.get(), FeatIdx1); + EXPECT_EQ(II->FeatureFreqs.size(), One); + C->AddRareFeature(FeatIdx2); + C->UpdateFeatureFrequency(II.get(), FeatIdx1); + C->UpdateFeatureFrequency(II.get(), FeatIdx2); + EXPECT_EQ(II->FeatureFreqs.size(), Two); + EXPECT_EQ(II->FeatureFreqs[0].second, 2); + EXPECT_EQ(II->FeatureFreqs[1].second, 1); + + C->AddRareFeature(FeatIdx3); + C->AddRareFeature(FeatIdx4); + C->UpdateFeatureFrequency(II.get(), FeatIdx3); + C->UpdateFeatureFrequency(II.get(), FeatIdx3); + C->UpdateFeatureFrequency(II.get(), FeatIdx3); + C->UpdateFeatureFrequency(II.get(), FeatIdx4); + + for (Index = 1; Index < II->FeatureFreqs.size(); Index++) + EXPECT_LT(II->FeatureFreqs[Index - 1].first, II->FeatureFreqs[Index].first); + + II->DeleteFeatureFreq(FeatIdx3); + for (Index = 1; Index < II->FeatureFreqs.size(); Index++) + EXPECT_LT(II->FeatureFreqs[Index - 1].first, II->FeatureFreqs[Index].first); +} + +double SubAndSquare(double X, double Y) { + double R = X - Y; + R = R * R; + return R; +} + +TEST(Entropic, ComputeEnergy) { + const double Precision = 0.01; + struct EntropicOptions Entropic = {true, 0xFF, 100}; + std::unique_ptr<InputCorpus> C(new InputCorpus("", Entropic)); + std::unique_ptr<InputInfo> II(new InputInfo()); + Vector<std::pair<uint32_t, uint16_t>> FeatureFreqs = {{1, 3}, {2, 3}, {3, 3}}; + II->FeatureFreqs = FeatureFreqs; + II->NumExecutedMutations = 0; + II->UpdateEnergy(4); + EXPECT_LT(SubAndSquare(II->Energy, 1.450805), Precision); + + II->NumExecutedMutations = 9; + II->UpdateEnergy(5); + EXPECT_LT(SubAndSquare(II->Energy, 1.525496), Precision); + + II->FeatureFreqs[0].second++; + II->FeatureFreqs.push_back(std::pair<uint32_t, uint16_t>(42, 6)); + II->NumExecutedMutations = 20; + II->UpdateEnergy(10); + EXPECT_LT(SubAndSquare(II->Energy, 1.792831), Precision); +} + int main(int argc, char **argv) { testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/gwp_asan/CMakeLists.txt index afdd624017a..cb550bd1214 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/CMakeLists.txt @@ -3,14 +3,20 @@ add_compiler_rt_component(gwp_asan) include_directories(..) set(GWP_ASAN_SOURCES + common.cpp + crash_handler.cpp + platform_specific/common_posix.cpp platform_specific/guarded_pool_allocator_posix.cpp platform_specific/mutex_posix.cpp + platform_specific/utilities_posix.cpp guarded_pool_allocator.cpp random.cpp stack_trace_compressor.cpp ) set(GWP_ASAN_HEADERS + common.h + crash_handler.h definitions.h guarded_pool_allocator.h mutex.h @@ -18,6 +24,7 @@ set(GWP_ASAN_HEADERS options.inc random.h stack_trace_compressor.h + utilities.h ) # Ensure that GWP-ASan meets the delegated requirements of some supporting @@ -50,6 +57,9 @@ set(GWP_ASAN_BACKTRACE_HEADERS options.h options.inc ) +set(GWP_ASAN_SEGV_HANDLER_HEADERS + optional/segv_handler.h + options.h) set(GWP_ASAN_OPTIONS_PARSER_CFLAGS ${GWP_ASAN_CFLAGS} @@ -94,6 +104,11 @@ if (COMPILER_RT_HAS_GWP_ASAN) SOURCES optional/backtrace_linux_libc.cpp ADDITIONAL_HEADERS ${GWP_ASAN_BACKTRACE_HEADERS} CFLAGS ${GWP_ASAN_CFLAGS}) + add_compiler_rt_object_libraries(RTGwpAsanSegvHandler + ARCHS ${GWP_ASAN_SUPPORTED_ARCH} + SOURCES optional/segv_handler_posix.cpp + ADDITIONAL_HEADERS ${GWP_ASAN_SEGV_HANDLER_HEADERS} + CFLAGS ${GWP_ASAN_CFLAGS}) add_compiler_rt_object_libraries(RTGwpAsanBacktraceSanitizerCommon ARCHS ${GWP_ASAN_SUPPORTED_ARCH} SOURCES optional/backtrace_sanitizer_common.cpp diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/common.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/common.cpp new file mode 100644 index 00000000000..3438c4b9189 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/common.cpp @@ -0,0 +1,105 @@ +//===-- common.cpp ----------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/common.h" +#include "gwp_asan/stack_trace_compressor.h" + +#include <assert.h> + +using AllocationMetadata = gwp_asan::AllocationMetadata; +using Error = gwp_asan::Error; + +namespace gwp_asan { + +const char *ErrorToString(const Error &E) { + switch (E) { + case Error::UNKNOWN: + return "Unknown"; + case Error::USE_AFTER_FREE: + return "Use After Free"; + case Error::DOUBLE_FREE: + return "Double Free"; + case Error::INVALID_FREE: + return "Invalid (Wild) Free"; + case Error::BUFFER_OVERFLOW: + return "Buffer Overflow"; + case Error::BUFFER_UNDERFLOW: + return "Buffer Underflow"; + } + __builtin_trap(); +} + +void AllocationMetadata::RecordAllocation(uintptr_t AllocAddr, + size_t AllocSize) { + Addr = AllocAddr; + Size = AllocSize; + IsDeallocated = false; + + AllocationTrace.ThreadID = getThreadID(); + DeallocationTrace.TraceSize = 0; + DeallocationTrace.ThreadID = kInvalidThreadID; +} + +void AllocationMetadata::RecordDeallocation() { + IsDeallocated = true; + DeallocationTrace.ThreadID = getThreadID(); +} + +void AllocationMetadata::CallSiteInfo::RecordBacktrace( + options::Backtrace_t Backtrace) { + TraceSize = 0; + if (!Backtrace) + return; + + uintptr_t UncompressedBuffer[kMaxTraceLengthToCollect]; + size_t BacktraceLength = + Backtrace(UncompressedBuffer, kMaxTraceLengthToCollect); + // Backtrace() returns the number of available frames, which may be greater + // than the number of frames in the buffer. In this case, we need to only pack + // the number of frames that are in the buffer. + if (BacktraceLength > kMaxTraceLengthToCollect) + BacktraceLength = kMaxTraceLengthToCollect; + TraceSize = + compression::pack(UncompressedBuffer, BacktraceLength, CompressedTrace, + AllocationMetadata::kStackFrameStorageBytes); +} + +size_t AllocatorState::maximumAllocationSize() const { return PageSize; } + +uintptr_t AllocatorState::slotToAddr(size_t N) const { + return GuardedPagePool + (PageSize * (1 + N)) + (maximumAllocationSize() * N); +} + +bool AllocatorState::isGuardPage(uintptr_t Ptr) const { + assert(pointerIsMine(reinterpret_cast<void *>(Ptr))); + size_t PageOffsetFromPoolStart = (Ptr - GuardedPagePool) / PageSize; + size_t PagesPerSlot = maximumAllocationSize() / PageSize; + return (PageOffsetFromPoolStart % (PagesPerSlot + 1)) == 0; +} + +static size_t addrToSlot(const AllocatorState *State, uintptr_t Ptr) { + size_t ByteOffsetFromPoolStart = Ptr - State->GuardedPagePool; + return ByteOffsetFromPoolStart / + (State->maximumAllocationSize() + State->PageSize); +} + +size_t AllocatorState::getNearestSlot(uintptr_t Ptr) const { + if (Ptr <= GuardedPagePool + PageSize) + return 0; + if (Ptr > GuardedPagePoolEnd - PageSize) + return MaxSimultaneousAllocations - 1; + + if (!isGuardPage(Ptr)) + return addrToSlot(this, Ptr); + + if (Ptr % PageSize <= PageSize / 2) + return addrToSlot(this, Ptr - PageSize); // Round down. + return addrToSlot(this, Ptr + PageSize); // Round up. +} + +} // namespace gwp_asan diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/common.h b/gnu/llvm/compiler-rt/lib/gwp_asan/common.h new file mode 100644 index 00000000000..d197711c77f --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/common.h @@ -0,0 +1,125 @@ +//===-- common.h ------------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// This file contains code that is common between the crash handler and the +// GuardedPoolAllocator. + +#ifndef GWP_ASAN_COMMON_H_ +#define GWP_ASAN_COMMON_H_ + +#include "gwp_asan/definitions.h" +#include "gwp_asan/options.h" + +#include <stddef.h> +#include <stdint.h> + +namespace gwp_asan { +enum class Error { + UNKNOWN, + USE_AFTER_FREE, + DOUBLE_FREE, + INVALID_FREE, + BUFFER_OVERFLOW, + BUFFER_UNDERFLOW +}; + +const char *ErrorToString(const Error &E); + +static constexpr uint64_t kInvalidThreadID = UINT64_MAX; +// Get the current thread ID, or kInvalidThreadID if failure. Note: This +// implementation is platform-specific. +uint64_t getThreadID(); + +// This struct contains all the metadata recorded about a single allocation made +// by GWP-ASan. If `AllocationMetadata.Addr` is zero, the metadata is non-valid. +struct AllocationMetadata { + // The number of bytes used to store a compressed stack frame. On 64-bit + // platforms, assuming a compression ratio of 50%, this should allow us to + // store ~64 frames per trace. + static constexpr size_t kStackFrameStorageBytes = 256; + + // Maximum number of stack frames to collect on allocation/deallocation. The + // actual number of collected frames may be less than this as the stack + // frames are compressed into a fixed memory range. + static constexpr size_t kMaxTraceLengthToCollect = 128; + + // Records the given allocation metadata into this struct. + void RecordAllocation(uintptr_t Addr, size_t Size); + // Record that this allocation is now deallocated. + void RecordDeallocation(); + + struct CallSiteInfo { + // Record the current backtrace to this callsite. + void RecordBacktrace(options::Backtrace_t Backtrace); + + // The compressed backtrace to the allocation/deallocation. + uint8_t CompressedTrace[kStackFrameStorageBytes]; + // The thread ID for this trace, or kInvalidThreadID if not available. + uint64_t ThreadID = kInvalidThreadID; + // The size of the compressed trace (in bytes). Zero indicates that no + // trace was collected. + size_t TraceSize = 0; + }; + + // The address of this allocation. If zero, the rest of this struct isn't + // valid, as the allocation has never occurred. + uintptr_t Addr = 0; + // Represents the actual size of the allocation. + size_t Size = 0; + + CallSiteInfo AllocationTrace; + CallSiteInfo DeallocationTrace; + + // Whether this allocation has been deallocated yet. + bool IsDeallocated = false; +}; + +// This holds the state that's shared between the GWP-ASan allocator and the +// crash handler. This, in conjunction with the Metadata array, forms the entire +// set of information required for understanding a GWP-ASan crash. +struct AllocatorState { + // Returns whether the provided pointer is a current sampled allocation that + // is owned by this pool. + GWP_ASAN_ALWAYS_INLINE bool pointerIsMine(const void *Ptr) const { + uintptr_t P = reinterpret_cast<uintptr_t>(Ptr); + return P < GuardedPagePoolEnd && GuardedPagePool <= P; + } + + // Returns the address of the N-th guarded slot. + uintptr_t slotToAddr(size_t N) const; + + // Returns the largest allocation that is supported by this pool. + size_t maximumAllocationSize() const; + + // Gets the nearest slot to the provided address. + size_t getNearestSlot(uintptr_t Ptr) const; + + // Returns whether the provided pointer is a guard page or not. The pointer + // must be within memory owned by this pool, else the result is undefined. + bool isGuardPage(uintptr_t Ptr) const; + + // The number of guarded slots that this pool holds. + size_t MaxSimultaneousAllocations = 0; + + // Pointer to the pool of guarded slots. Note that this points to the start of + // the pool (which is a guard page), not a pointer to the first guarded page. + uintptr_t GuardedPagePool = 0; + uintptr_t GuardedPagePoolEnd = 0; + + // Cached page size for this system in bytes. + size_t PageSize = 0; + + // The type and address of an internally-detected failure. For INVALID_FREE + // and DOUBLE_FREE, these errors are detected in GWP-ASan, which will set + // these values and terminate the process. + Error FailureType = Error::UNKNOWN; + uintptr_t FailureAddress = 0; +}; + +} // namespace gwp_asan +#endif // GWP_ASAN_COMMON_H_ diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/crash_handler.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/crash_handler.cpp new file mode 100644 index 00000000000..c3b9e1467bd --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/crash_handler.cpp @@ -0,0 +1,140 @@ +//===-- crash_handler_interface.cpp -----------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/common.h" +#include "gwp_asan/stack_trace_compressor.h" + +#include <assert.h> + +using AllocationMetadata = gwp_asan::AllocationMetadata; +using Error = gwp_asan::Error; + +#ifdef __cplusplus +extern "C" { +#endif + +bool __gwp_asan_error_is_mine(const gwp_asan::AllocatorState *State, + uintptr_t ErrorPtr) { + assert(State && "State should not be nullptr."); + if (State->FailureType != Error::UNKNOWN && State->FailureAddress != 0) + return true; + + return ErrorPtr < State->GuardedPagePoolEnd && + State->GuardedPagePool <= ErrorPtr; +} + +uintptr_t +__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State) { + return State->FailureAddress; +} + +static const AllocationMetadata * +addrToMetadata(const gwp_asan::AllocatorState *State, + const AllocationMetadata *Metadata, uintptr_t Ptr) { + // Note - Similar implementation in guarded_pool_allocator.cpp. + return &Metadata[State->getNearestSlot(Ptr)]; +} + +gwp_asan::Error +__gwp_asan_diagnose_error(const gwp_asan::AllocatorState *State, + const gwp_asan::AllocationMetadata *Metadata, + uintptr_t ErrorPtr) { + if (!__gwp_asan_error_is_mine(State, ErrorPtr)) + return Error::UNKNOWN; + + if (State->FailureType != Error::UNKNOWN) + return State->FailureType; + + // Let's try and figure out what the source of this error is. + if (State->isGuardPage(ErrorPtr)) { + size_t Slot = State->getNearestSlot(ErrorPtr); + const AllocationMetadata *SlotMeta = + addrToMetadata(State, Metadata, State->slotToAddr(Slot)); + + // Ensure that this slot was allocated once upon a time. + if (!SlotMeta->Addr) + return Error::UNKNOWN; + + if (SlotMeta->Addr < ErrorPtr) + return Error::BUFFER_OVERFLOW; + return Error::BUFFER_UNDERFLOW; + } + + // Access wasn't a guard page, check for use-after-free. + const AllocationMetadata *SlotMeta = + addrToMetadata(State, Metadata, ErrorPtr); + if (SlotMeta->IsDeallocated) { + return Error::USE_AFTER_FREE; + } + + // If we have reached here, the error is still unknown. + return Error::UNKNOWN; +} + +const gwp_asan::AllocationMetadata * +__gwp_asan_get_metadata(const gwp_asan::AllocatorState *State, + const gwp_asan::AllocationMetadata *Metadata, + uintptr_t ErrorPtr) { + if (!__gwp_asan_error_is_mine(State, ErrorPtr)) + return nullptr; + + if (ErrorPtr >= State->GuardedPagePoolEnd || + State->GuardedPagePool > ErrorPtr) + return nullptr; + + const AllocationMetadata *Meta = addrToMetadata(State, Metadata, ErrorPtr); + if (Meta->Addr == 0) + return nullptr; + + return Meta; +} + +uintptr_t __gwp_asan_get_allocation_address( + const gwp_asan::AllocationMetadata *AllocationMeta) { + return AllocationMeta->Addr; +} + +size_t __gwp_asan_get_allocation_size( + const gwp_asan::AllocationMetadata *AllocationMeta) { + return AllocationMeta->Size; +} + +uint64_t __gwp_asan_get_allocation_thread_id( + const gwp_asan::AllocationMetadata *AllocationMeta) { + return AllocationMeta->AllocationTrace.ThreadID; +} + +size_t __gwp_asan_get_allocation_trace( + const gwp_asan::AllocationMetadata *AllocationMeta, uintptr_t *Buffer, + size_t BufferLen) { + return gwp_asan::compression::unpack( + AllocationMeta->AllocationTrace.CompressedTrace, + AllocationMeta->AllocationTrace.TraceSize, Buffer, BufferLen); +} + +bool __gwp_asan_is_deallocated( + const gwp_asan::AllocationMetadata *AllocationMeta) { + return AllocationMeta->IsDeallocated; +} + +uint64_t __gwp_asan_get_deallocation_thread_id( + const gwp_asan::AllocationMetadata *AllocationMeta) { + return AllocationMeta->DeallocationTrace.ThreadID; +} + +size_t __gwp_asan_get_deallocation_trace( + const gwp_asan::AllocationMetadata *AllocationMeta, uintptr_t *Buffer, + size_t BufferLen) { + return gwp_asan::compression::unpack( + AllocationMeta->DeallocationTrace.CompressedTrace, + AllocationMeta->DeallocationTrace.TraceSize, Buffer, BufferLen); +} + +#ifdef __cplusplus +} // extern "C" +#endif diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/crash_handler.h b/gnu/llvm/compiler-rt/lib/gwp_asan/crash_handler.h new file mode 100644 index 00000000000..631c3192973 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/crash_handler.h @@ -0,0 +1,125 @@ +//===-- crash_handler_interface.h -------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// This file contains interface functions that can be called by an in-process or +// out-of-process crash handler after the process has terminated. Functions in +// this interface are never thread safe. For an in-process crash handler, the +// handler should call GuardedPoolAllocator::disable() to stop any other threads +// from retrieving new GWP-ASan allocations, which may corrupt the metadata. +#ifndef GWP_ASAN_INTERFACE_H_ +#define GWP_ASAN_INTERFACE_H_ + +#include "gwp_asan/common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// When a process crashes, there are three possible outcomes: +// 1. The crash is unrelated to GWP-ASan - in which case this function returns +// false. +// 2. The crash is internally detected within GWP-ASan itself (e.g. a +// double-free bug is caught in GuardedPoolAllocator::deallocate(), and +// GWP-ASan will terminate the process). In this case - this function +// returns true. +// 3. The crash is caused by a memory error at `AccessPtr` that's caught by the +// system, but GWP-ASan is responsible for the allocation. In this case - +// the function also returns true. +// This function takes an optional `AccessPtr` parameter. If the pointer that +// was attempted to be accessed is available, you should provide it here. In the +// case of some internally-detected errors, the crash may manifest as an abort +// or trap may or may not have an associated pointer. In these cases, the +// pointer can be obtained by a call to __gwp_asan_get_internal_crash_address. +bool __gwp_asan_error_is_mine(const gwp_asan::AllocatorState *State, + uintptr_t ErrorPtr = 0u); + +// Diagnose and return the type of error that occurred at `ErrorPtr`. If +// `ErrorPtr` is unrelated to GWP-ASan, or if the error type cannot be deduced, +// this function returns Error::UNKNOWN. +gwp_asan::Error +__gwp_asan_diagnose_error(const gwp_asan::AllocatorState *State, + const gwp_asan::AllocationMetadata *Metadata, + uintptr_t ErrorPtr); + +// For internally-detected errors (double free, invalid free), this function +// returns the pointer that the error occurred at. If the error is unrelated to +// GWP-ASan, or if the error was caused by a non-internally detected failure, +// this function returns zero. +uintptr_t +__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State); + +// Returns a pointer to the metadata for the allocation that's responsible for +// the crash. This metadata should not be dereferenced directly due to API +// compatibility issues, but should be instead passed to functions below for +// information retrieval. Returns nullptr if there is no metadata available for +// this crash. +const gwp_asan::AllocationMetadata * +__gwp_asan_get_metadata(const gwp_asan::AllocatorState *State, + const gwp_asan::AllocationMetadata *Metadata, + uintptr_t ErrorPtr); + +// +---------------------------------------------------------------------------+ +// | Error Information Functions | +// +---------------------------------------------------------------------------+ +// Functions below return information about the type of error that was caught by +// GWP-ASan, or information about the allocation that caused the error. These +// functions generally take an `AllocationMeta` argument, which should be +// retrieved via. __gwp_asan_get_metadata. + +// Returns the start of the allocation whose metadata is in `AllocationMeta`. +uintptr_t __gwp_asan_get_allocation_address( + const gwp_asan::AllocationMetadata *AllocationMeta); + +// Returns the size of the allocation whose metadata is in `AllocationMeta` +size_t __gwp_asan_get_allocation_size( + const gwp_asan::AllocationMetadata *AllocationMeta); + +// Returns the Thread ID that allocated the memory that caused the error at +// `ErrorPtr`. This function may not be called if __gwp_asan_has_metadata() +// returns false. +uint64_t __gwp_asan_get_allocation_thread_id( + const gwp_asan::AllocationMetadata *AllocationMeta); + +// Retrieve the allocation trace for the allocation whose metadata is in +// `AllocationMeta`, and place it into the provided `Buffer` that has at least +// `BufferLen` elements. This function returns the number of frames that would +// have been written into `Buffer` if the space was available (i.e. however many +// frames were stored by GWP-ASan). A return value greater than `BufferLen` +// indicates that the trace was truncated when storing to `Buffer`. +size_t __gwp_asan_get_allocation_trace( + const gwp_asan::AllocationMetadata *AllocationMeta, uintptr_t *Buffer, + size_t BufferLen); + +// Returns whether the allocation whose metadata is in `AllocationMeta` has been +// deallocated. This function may not be called if __gwp_asan_has_metadata() +// returns false. +bool __gwp_asan_is_deallocated( + const gwp_asan::AllocationMetadata *AllocationMeta); + +// Returns the Thread ID that deallocated the memory whose metadata is in +// `AllocationMeta`. This function may not be called if +// __gwp_asan_is_deallocated() returns false. +uint64_t __gwp_asan_get_deallocation_thread_id( + const gwp_asan::AllocationMetadata *AllocationMeta); + +// Retrieve the deallocation trace for the allocation whose metadata is in +// `AllocationMeta`, and place it into the provided `Buffer` that has at least +// `BufferLen` elements. This function returns the number of frames that would +// have been written into `Buffer` if the space was available (i.e. however many +// frames were stored by GWP-ASan). A return value greater than `BufferLen` +// indicates that the trace was truncated when storing to `Buffer`. This +// function may not be called if __gwp_asan_is_deallocated() returns false. +size_t __gwp_asan_get_deallocation_trace( + const gwp_asan::AllocationMetadata *AllocationMeta, uintptr_t *Buffer, + size_t BufferLen); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // GWP_ASAN_INTERFACE_H_ diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/definitions.h b/gnu/llvm/compiler-rt/lib/gwp_asan/definitions.h index bebe56c55a2..563c408b631 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/definitions.h +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/definitions.h @@ -9,9 +9,12 @@ #ifndef GWP_ASAN_DEFINITIONS_H_ #define GWP_ASAN_DEFINITIONS_H_ -#define GWP_ASAN_TLS_INITIAL_EXEC __thread __attribute__((tls_model("initial-exec"))) +#define GWP_ASAN_TLS_INITIAL_EXEC \ + __thread __attribute__((tls_model("initial-exec"))) #define GWP_ASAN_UNLIKELY(X) __builtin_expect(!!(X), 0) #define GWP_ASAN_ALWAYS_INLINE inline __attribute__((always_inline)) +#define GWP_ASAN_WEAK __attribute__((weak)) + #endif // GWP_ASAN_DEFINITIONS_H_ diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp index df454772a23..b2602e4caa5 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp @@ -8,7 +8,10 @@ #include "gwp_asan/guarded_pool_allocator.h" +#include "gwp_asan/optional/segv_handler.h" #include "gwp_asan/options.h" +#include "gwp_asan/random.h" +#include "gwp_asan/utilities.h" // RHEL creates the PRIu64 format macro (for printing uint64_t's) only when this // macro is defined before including <inttypes.h>. @@ -18,13 +21,14 @@ #include <assert.h> #include <inttypes.h> +#include <signal.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> -using AllocationMetadata = gwp_asan::GuardedPoolAllocator::AllocationMetadata; -using Error = gwp_asan::GuardedPoolAllocator::Error; +using AllocationMetadata = gwp_asan::AllocationMetadata; +using Error = gwp_asan::Error; namespace gwp_asan { namespace { @@ -43,63 +47,12 @@ public: private: bool &Bool; }; - -void defaultPrintStackTrace(uintptr_t *Trace, size_t TraceLength, - options::Printf_t Printf) { - if (TraceLength == 0) - Printf(" <unknown (does your allocator support backtracing?)>\n"); - - for (size_t i = 0; i < TraceLength; ++i) { - Printf(" #%zu 0x%zx in <unknown>\n", i, Trace[i]); - } - Printf("\n"); -} } // anonymous namespace // Gets the singleton implementation of this class. Thread-compatible until // init() is called, thread-safe afterwards. -GuardedPoolAllocator *getSingleton() { return SingletonPtr; } - -void GuardedPoolAllocator::AllocationMetadata::RecordAllocation( - uintptr_t AllocAddr, size_t AllocSize, options::Backtrace_t Backtrace) { - Addr = AllocAddr; - Size = AllocSize; - IsDeallocated = false; - - // TODO(hctim): Ask the caller to provide the thread ID, so we don't waste - // other thread's time getting the thread ID under lock. - AllocationTrace.ThreadID = getThreadID(); - AllocationTrace.TraceSize = 0; - DeallocationTrace.TraceSize = 0; - DeallocationTrace.ThreadID = kInvalidThreadID; - - if (Backtrace) { - uintptr_t UncompressedBuffer[kMaxTraceLengthToCollect]; - size_t BacktraceLength = - Backtrace(UncompressedBuffer, kMaxTraceLengthToCollect); - AllocationTrace.TraceSize = compression::pack( - UncompressedBuffer, BacktraceLength, AllocationTrace.CompressedTrace, - kStackFrameStorageBytes); - } -} - -void GuardedPoolAllocator::AllocationMetadata::RecordDeallocation( - options::Backtrace_t Backtrace) { - IsDeallocated = true; - // Ensure that the unwinder is not called if the recursive flag is set, - // otherwise non-reentrant unwinders may deadlock. - DeallocationTrace.TraceSize = 0; - if (Backtrace && !ThreadLocals.RecursiveGuard) { - ScopedBoolean B(ThreadLocals.RecursiveGuard); - - uintptr_t UncompressedBuffer[kMaxTraceLengthToCollect]; - size_t BacktraceLength = - Backtrace(UncompressedBuffer, kMaxTraceLengthToCollect); - DeallocationTrace.TraceSize = compression::pack( - UncompressedBuffer, BacktraceLength, DeallocationTrace.CompressedTrace, - kStackFrameStorageBytes); - } - DeallocationTrace.ThreadID = getThreadID(); +GuardedPoolAllocator *GuardedPoolAllocator::getSingleton() { + return SingletonPtr; } void GuardedPoolAllocator::init(const options::Options &Opts) { @@ -110,71 +63,99 @@ void GuardedPoolAllocator::init(const options::Options &Opts) { Opts.MaxSimultaneousAllocations == 0) return; - if (Opts.SampleRate < 0) { - Opts.Printf("GWP-ASan Error: SampleRate is < 0.\n"); - exit(EXIT_FAILURE); - } - - if (Opts.SampleRate > INT32_MAX) { - Opts.Printf("GWP-ASan Error: SampleRate is > 2^31.\n"); - exit(EXIT_FAILURE); - } - - if (Opts.MaxSimultaneousAllocations < 0) { - Opts.Printf("GWP-ASan Error: MaxSimultaneousAllocations is < 0.\n"); - exit(EXIT_FAILURE); - } + Check(Opts.SampleRate >= 0, "GWP-ASan Error: SampleRate is < 0."); + Check(Opts.SampleRate <= INT32_MAX, "GWP-ASan Error: SampleRate is > 2^31."); + Check(Opts.MaxSimultaneousAllocations >= 0, + "GWP-ASan Error: MaxSimultaneousAllocations is < 0."); SingletonPtr = this; + Backtrace = Opts.Backtrace; - MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations; + State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations; - PageSize = getPlatformPageSize(); + State.PageSize = getPlatformPageSize(); PerfectlyRightAlign = Opts.PerfectlyRightAlign; - Printf = Opts.Printf; - Backtrace = Opts.Backtrace; - if (Opts.PrintBacktrace) - PrintBacktrace = Opts.PrintBacktrace; - else - PrintBacktrace = defaultPrintStackTrace; size_t PoolBytesRequired = - PageSize * (1 + MaxSimultaneousAllocations) + - MaxSimultaneousAllocations * maximumAllocationSize(); - void *GuardedPoolMemory = mapMemory(PoolBytesRequired); + State.PageSize * (1 + State.MaxSimultaneousAllocations) + + State.MaxSimultaneousAllocations * State.maximumAllocationSize(); + void *GuardedPoolMemory = mapMemory(PoolBytesRequired, kGwpAsanGuardPageName); - size_t BytesRequired = MaxSimultaneousAllocations * sizeof(*Metadata); - Metadata = reinterpret_cast<AllocationMetadata *>(mapMemory(BytesRequired)); - markReadWrite(Metadata, BytesRequired); + size_t BytesRequired = State.MaxSimultaneousAllocations * sizeof(*Metadata); + Metadata = reinterpret_cast<AllocationMetadata *>( + mapMemory(BytesRequired, kGwpAsanMetadataName)); + markReadWrite(Metadata, BytesRequired, kGwpAsanMetadataName); // Allocate memory and set up the free pages queue. - BytesRequired = MaxSimultaneousAllocations * sizeof(*FreeSlots); - FreeSlots = reinterpret_cast<size_t *>(mapMemory(BytesRequired)); - markReadWrite(FreeSlots, BytesRequired); + BytesRequired = State.MaxSimultaneousAllocations * sizeof(*FreeSlots); + FreeSlots = reinterpret_cast<size_t *>( + mapMemory(BytesRequired, kGwpAsanFreeSlotsName)); + markReadWrite(FreeSlots, BytesRequired, kGwpAsanFreeSlotsName); // Multiply the sample rate by 2 to give a good, fast approximation for (1 / // SampleRate) chance of sampling. if (Opts.SampleRate != 1) - AdjustedSampleRate = static_cast<uint32_t>(Opts.SampleRate) * 2; + AdjustedSampleRatePlusOne = static_cast<uint32_t>(Opts.SampleRate) * 2 + 1; else - AdjustedSampleRate = 1; + AdjustedSampleRatePlusOne = 2; - GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory); - GuardedPagePoolEnd = + initPRNG(); + ThreadLocals.NextSampleCounter = + (getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1; + + State.GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory); + State.GuardedPagePoolEnd = reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired; - // Ensure that signal handlers are installed as late as possible, as the class - // is not thread-safe until init() is finished, and thus a SIGSEGV may cause a - // race to members if received during init(). - if (Opts.InstallSignalHandlers) - installSignalHandlers(); + if (Opts.InstallForkHandlers) + installAtFork(); +} + +void GuardedPoolAllocator::disable() { PoolMutex.lock(); } + +void GuardedPoolAllocator::enable() { PoolMutex.unlock(); } + +void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb, + void *Arg) { + uintptr_t Start = reinterpret_cast<uintptr_t>(Base); + for (size_t i = 0; i < State.MaxSimultaneousAllocations; ++i) { + const AllocationMetadata &Meta = Metadata[i]; + if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start && + Meta.Addr < Start + Size) + Cb(Meta.Addr, Meta.Size, Arg); + } +} + +void GuardedPoolAllocator::uninitTestOnly() { + if (State.GuardedPagePool) { + unmapMemory(reinterpret_cast<void *>(State.GuardedPagePool), + State.GuardedPagePoolEnd - State.GuardedPagePool, + kGwpAsanGuardPageName); + State.GuardedPagePool = 0; + State.GuardedPagePoolEnd = 0; + } + if (Metadata) { + unmapMemory(Metadata, State.MaxSimultaneousAllocations * sizeof(*Metadata), + kGwpAsanMetadataName); + Metadata = nullptr; + } + if (FreeSlots) { + unmapMemory(FreeSlots, + State.MaxSimultaneousAllocations * sizeof(*FreeSlots), + kGwpAsanFreeSlotsName); + FreeSlots = nullptr; + } +} + +static uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) { + return Ptr & ~(PageSize - 1); } void *GuardedPoolAllocator::allocate(size_t Size) { // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall // back to the supporting allocator. - if (GuardedPagePoolEnd == 0) + if (State.GuardedPagePoolEnd == 0) return nullptr; // Protect against recursivity. @@ -182,7 +163,7 @@ void *GuardedPoolAllocator::allocate(size_t Size) { return nullptr; ScopedBoolean SB(ThreadLocals.RecursiveGuard); - if (Size == 0 || Size > maximumAllocationSize()) + if (Size == 0 || Size > State.maximumAllocationSize()) return nullptr; size_t Index; @@ -194,28 +175,54 @@ void *GuardedPoolAllocator::allocate(size_t Size) { if (Index == kInvalidSlotID) return nullptr; - uintptr_t Ptr = slotToAddr(Index); - Ptr += allocationSlotOffset(Size); + uintptr_t Ptr = State.slotToAddr(Index); + // Should we right-align this allocation? + if (getRandomUnsigned32() % 2 == 0) { + AlignmentStrategy Align = AlignmentStrategy::DEFAULT; + if (PerfectlyRightAlign) + Align = AlignmentStrategy::PERFECT; + Ptr += + State.maximumAllocationSize() - rightAlignedAllocationSize(Size, Align); + } AllocationMetadata *Meta = addrToMetadata(Ptr); // If a slot is multiple pages in size, and the allocation takes up a single // page, we can improve overflow detection by leaving the unused pages as // unmapped. - markReadWrite(reinterpret_cast<void *>(getPageAddr(Ptr)), Size); + markReadWrite(reinterpret_cast<void *>(getPageAddr(Ptr, State.PageSize)), + Size, kGwpAsanAliveSlotName); - Meta->RecordAllocation(Ptr, Size, Backtrace); + Meta->RecordAllocation(Ptr, Size); + Meta->AllocationTrace.RecordBacktrace(Backtrace); return reinterpret_cast<void *>(Ptr); } +void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) { + State.FailureType = E; + State.FailureAddress = Address; + + // Raise a SEGV by touching first guard page. + volatile char *p = reinterpret_cast<char *>(State.GuardedPagePool); + *p = 0; + __builtin_unreachable(); +} + +void GuardedPoolAllocator::stop() { + ThreadLocals.RecursiveGuard = true; + PoolMutex.tryLock(); +} + void GuardedPoolAllocator::deallocate(void *Ptr) { assert(pointerIsMine(Ptr) && "Pointer is not mine!"); uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr); - uintptr_t SlotStart = slotToAddr(addrToSlot(UPtr)); + size_t Slot = State.getNearestSlot(UPtr); + uintptr_t SlotStart = State.slotToAddr(Slot); AllocationMetadata *Meta = addrToMetadata(UPtr); if (Meta->Addr != UPtr) { - reportError(UPtr, Error::INVALID_FREE); - exit(EXIT_FAILURE); + // If multiple errors occur at the same time, use the first one. + ScopedLock L(PoolMutex); + trapOnAddress(UPtr, Error::INVALID_FREE); } // Intentionally scope the mutex here, so that other threads can access the @@ -223,22 +230,28 @@ void GuardedPoolAllocator::deallocate(void *Ptr) { { ScopedLock L(PoolMutex); if (Meta->IsDeallocated) { - reportError(UPtr, Error::DOUBLE_FREE); - exit(EXIT_FAILURE); + trapOnAddress(UPtr, Error::DOUBLE_FREE); } // Ensure that the deallocation is recorded before marking the page as // inaccessible. Otherwise, a racy use-after-free will have inconsistent // metadata. - Meta->RecordDeallocation(Backtrace); + Meta->RecordDeallocation(); + + // Ensure that the unwinder is not called if the recursive flag is set, + // otherwise non-reentrant unwinders may deadlock. + if (!ThreadLocals.RecursiveGuard) { + ScopedBoolean B(ThreadLocals.RecursiveGuard); + Meta->DeallocationTrace.RecordBacktrace(Backtrace); + } } markInaccessible(reinterpret_cast<void *>(SlotStart), - maximumAllocationSize()); + State.maximumAllocationSize(), kGwpAsanGuardPageName); // And finally, lock again to release the slot back into the pool. ScopedLock L(PoolMutex); - freeSlot(addrToSlot(UPtr)); + freeSlot(Slot); } size_t GuardedPoolAllocator::getSize(const void *Ptr) { @@ -249,38 +262,14 @@ size_t GuardedPoolAllocator::getSize(const void *Ptr) { return Meta->Size; } -size_t GuardedPoolAllocator::maximumAllocationSize() const { return PageSize; } - AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const { - return &Metadata[addrToSlot(Ptr)]; -} - -size_t GuardedPoolAllocator::addrToSlot(uintptr_t Ptr) const { - assert(pointerIsMine(reinterpret_cast<void *>(Ptr))); - size_t ByteOffsetFromPoolStart = Ptr - GuardedPagePool; - return ByteOffsetFromPoolStart / (maximumAllocationSize() + PageSize); -} - -uintptr_t GuardedPoolAllocator::slotToAddr(size_t N) const { - return GuardedPagePool + (PageSize * (1 + N)) + (maximumAllocationSize() * N); -} - -uintptr_t GuardedPoolAllocator::getPageAddr(uintptr_t Ptr) const { - assert(pointerIsMine(reinterpret_cast<void *>(Ptr))); - return Ptr & ~(static_cast<uintptr_t>(PageSize) - 1); -} - -bool GuardedPoolAllocator::isGuardPage(uintptr_t Ptr) const { - assert(pointerIsMine(reinterpret_cast<void *>(Ptr))); - size_t PageOffsetFromPoolStart = (Ptr - GuardedPagePool) / PageSize; - size_t PagesPerSlot = maximumAllocationSize() / PageSize; - return (PageOffsetFromPoolStart % (PagesPerSlot + 1)) == 0; + return &Metadata[State.getNearestSlot(Ptr)]; } size_t GuardedPoolAllocator::reserveSlot() { // Avoid potential reuse of a slot before we have made at least a single // allocation in each slot. Helps with our use-after-free detection. - if (NumSampledAllocations < MaxSimultaneousAllocations) + if (NumSampledAllocations < State.MaxSimultaneousAllocations) return NumSampledAllocations++; if (FreeSlotsLength == 0) @@ -293,233 +282,10 @@ size_t GuardedPoolAllocator::reserveSlot() { } void GuardedPoolAllocator::freeSlot(size_t SlotIndex) { - assert(FreeSlotsLength < MaxSimultaneousAllocations); + assert(FreeSlotsLength < State.MaxSimultaneousAllocations); FreeSlots[FreeSlotsLength++] = SlotIndex; } -uintptr_t GuardedPoolAllocator::allocationSlotOffset(size_t Size) const { - assert(Size > 0); - - bool ShouldRightAlign = getRandomUnsigned32() % 2 == 0; - if (!ShouldRightAlign) - return 0; - - uintptr_t Offset = maximumAllocationSize(); - if (!PerfectlyRightAlign) { - if (Size == 3) - Size = 4; - else if (Size > 4 && Size <= 8) - Size = 8; - else if (Size > 8 && (Size % 16) != 0) - Size += 16 - (Size % 16); - } - Offset -= Size; - return Offset; -} - -void GuardedPoolAllocator::reportError(uintptr_t AccessPtr, Error E) { - if (SingletonPtr) - SingletonPtr->reportErrorInternal(AccessPtr, E); -} - -size_t GuardedPoolAllocator::getNearestSlot(uintptr_t Ptr) const { - if (Ptr <= GuardedPagePool + PageSize) - return 0; - if (Ptr > GuardedPagePoolEnd - PageSize) - return MaxSimultaneousAllocations - 1; - - if (!isGuardPage(Ptr)) - return addrToSlot(Ptr); - - if (Ptr % PageSize <= PageSize / 2) - return addrToSlot(Ptr - PageSize); // Round down. - return addrToSlot(Ptr + PageSize); // Round up. -} - -Error GuardedPoolAllocator::diagnoseUnknownError(uintptr_t AccessPtr, - AllocationMetadata **Meta) { - // Let's try and figure out what the source of this error is. - if (isGuardPage(AccessPtr)) { - size_t Slot = getNearestSlot(AccessPtr); - AllocationMetadata *SlotMeta = addrToMetadata(slotToAddr(Slot)); - - // Ensure that this slot was allocated once upon a time. - if (!SlotMeta->Addr) - return Error::UNKNOWN; - *Meta = SlotMeta; - - if (SlotMeta->Addr < AccessPtr) - return Error::BUFFER_OVERFLOW; - return Error::BUFFER_UNDERFLOW; - } - - // Access wasn't a guard page, check for use-after-free. - AllocationMetadata *SlotMeta = addrToMetadata(AccessPtr); - if (SlotMeta->IsDeallocated) { - *Meta = SlotMeta; - return Error::USE_AFTER_FREE; - } - - // If we have reached here, the error is still unknown. There is no metadata - // available. - *Meta = nullptr; - return Error::UNKNOWN; -} - -namespace { -// Prints the provided error and metadata information. -void printErrorType(Error E, uintptr_t AccessPtr, AllocationMetadata *Meta, - options::Printf_t Printf, uint64_t ThreadID) { - // Print using intermediate strings. Platforms like Android don't like when - // you print multiple times to the same line, as there may be a newline - // appended to a log file automatically per Printf() call. - const char *ErrorString; - switch (E) { - case Error::UNKNOWN: - ErrorString = "GWP-ASan couldn't automatically determine the source of " - "the memory error. It was likely caused by a wild memory " - "access into the GWP-ASan pool. The error occurred"; - break; - case Error::USE_AFTER_FREE: - ErrorString = "Use after free"; - break; - case Error::DOUBLE_FREE: - ErrorString = "Double free"; - break; - case Error::INVALID_FREE: - ErrorString = "Invalid (wild) free"; - break; - case Error::BUFFER_OVERFLOW: - ErrorString = "Buffer overflow"; - break; - case Error::BUFFER_UNDERFLOW: - ErrorString = "Buffer underflow"; - break; - } - - constexpr size_t kDescriptionBufferLen = 128; - char DescriptionBuffer[kDescriptionBufferLen]; - if (Meta) { - if (E == Error::USE_AFTER_FREE) { - snprintf(DescriptionBuffer, kDescriptionBufferLen, - "(%zu byte%s into a %zu-byte allocation at 0x%zx)", - AccessPtr - Meta->Addr, (AccessPtr - Meta->Addr == 1) ? "" : "s", - Meta->Size, Meta->Addr); - } else if (AccessPtr < Meta->Addr) { - snprintf(DescriptionBuffer, kDescriptionBufferLen, - "(%zu byte%s to the left of a %zu-byte allocation at 0x%zx)", - Meta->Addr - AccessPtr, (Meta->Addr - AccessPtr == 1) ? "" : "s", - Meta->Size, Meta->Addr); - } else if (AccessPtr > Meta->Addr) { - snprintf(DescriptionBuffer, kDescriptionBufferLen, - "(%zu byte%s to the right of a %zu-byte allocation at 0x%zx)", - AccessPtr - Meta->Addr, (AccessPtr - Meta->Addr == 1) ? "" : "s", - Meta->Size, Meta->Addr); - } else { - snprintf(DescriptionBuffer, kDescriptionBufferLen, - "(a %zu-byte allocation)", Meta->Size); - } - } - - // Possible number of digits of a 64-bit number: ceil(log10(2^64)) == 20. Add - // a null terminator, and round to the nearest 8-byte boundary. - constexpr size_t kThreadBufferLen = 24; - char ThreadBuffer[kThreadBufferLen]; - if (ThreadID == GuardedPoolAllocator::kInvalidThreadID) - snprintf(ThreadBuffer, kThreadBufferLen, "<unknown>"); - else - snprintf(ThreadBuffer, kThreadBufferLen, "%" PRIu64, ThreadID); - - Printf("%s at 0x%zx %s by thread %s here:\n", ErrorString, AccessPtr, - DescriptionBuffer, ThreadBuffer); -} - -void printAllocDeallocTraces(uintptr_t AccessPtr, AllocationMetadata *Meta, - options::Printf_t Printf, - options::PrintBacktrace_t PrintBacktrace) { - assert(Meta != nullptr && "Metadata is non-null for printAllocDeallocTraces"); - - if (Meta->IsDeallocated) { - if (Meta->DeallocationTrace.ThreadID == - GuardedPoolAllocator::kInvalidThreadID) - Printf("0x%zx was deallocated by thread <unknown> here:\n", AccessPtr); - else - Printf("0x%zx was deallocated by thread %zu here:\n", AccessPtr, - Meta->DeallocationTrace.ThreadID); - - uintptr_t UncompressedTrace[AllocationMetadata::kMaxTraceLengthToCollect]; - size_t UncompressedLength = compression::unpack( - Meta->DeallocationTrace.CompressedTrace, - Meta->DeallocationTrace.TraceSize, UncompressedTrace, - AllocationMetadata::kMaxTraceLengthToCollect); - - PrintBacktrace(UncompressedTrace, UncompressedLength, Printf); - } - - if (Meta->AllocationTrace.ThreadID == GuardedPoolAllocator::kInvalidThreadID) - Printf("0x%zx was allocated by thread <unknown> here:\n", Meta->Addr); - else - Printf("0x%zx was allocated by thread %zu here:\n", Meta->Addr, - Meta->AllocationTrace.ThreadID); - - uintptr_t UncompressedTrace[AllocationMetadata::kMaxTraceLengthToCollect]; - size_t UncompressedLength = compression::unpack( - Meta->AllocationTrace.CompressedTrace, Meta->AllocationTrace.TraceSize, - UncompressedTrace, AllocationMetadata::kMaxTraceLengthToCollect); - - PrintBacktrace(UncompressedTrace, UncompressedLength, Printf); -} - -struct ScopedEndOfReportDecorator { - ScopedEndOfReportDecorator(options::Printf_t Printf) : Printf(Printf) {} - ~ScopedEndOfReportDecorator() { Printf("*** End GWP-ASan report ***\n"); } - options::Printf_t Printf; -}; -} // anonymous namespace - -void GuardedPoolAllocator::reportErrorInternal(uintptr_t AccessPtr, Error E) { - if (!pointerIsMine(reinterpret_cast<void *>(AccessPtr))) { - return; - } - - // Attempt to prevent races to re-use the same slot that triggered this error. - // This does not guarantee that there are no races, because another thread can - // take the locks during the time that the signal handler is being called. - PoolMutex.tryLock(); - ThreadLocals.RecursiveGuard = true; - - Printf("*** GWP-ASan detected a memory error ***\n"); - ScopedEndOfReportDecorator Decorator(Printf); - - AllocationMetadata *Meta = nullptr; - - if (E == Error::UNKNOWN) { - E = diagnoseUnknownError(AccessPtr, &Meta); - } else { - size_t Slot = getNearestSlot(AccessPtr); - Meta = addrToMetadata(slotToAddr(Slot)); - // Ensure that this slot has been previously allocated. - if (!Meta->Addr) - Meta = nullptr; - } - - // Print the error information. - uint64_t ThreadID = getThreadID(); - printErrorType(E, AccessPtr, Meta, Printf, ThreadID); - if (Backtrace) { - static constexpr unsigned kMaximumStackFramesForCrashTrace = 512; - uintptr_t Trace[kMaximumStackFramesForCrashTrace]; - size_t TraceLength = Backtrace(Trace, kMaximumStackFramesForCrashTrace); - - PrintBacktrace(Trace, TraceLength, Printf); - } else { - Printf(" <unknown (does your allocator support backtracing?)>\n\n"); - } - - if (Meta) - printAllocDeallocTraces(AccessPtr, Meta, Printf, PrintBacktrace); -} - GWP_ASAN_TLS_INITIAL_EXEC GuardedPoolAllocator::ThreadLocalPackedVariables GuardedPoolAllocator::ThreadLocals; diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h b/gnu/llvm/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h index 7e6e13769d3..ae00506c569 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h @@ -9,6 +9,7 @@ #ifndef GWP_ASAN_GUARDED_POOL_ALLOCATOR_H_ #define GWP_ASAN_GUARDED_POOL_ALLOCATOR_H_ +#include "gwp_asan/common.h" #include "gwp_asan/definitions.h" #include "gwp_asan/mutex.h" #include "gwp_asan/options.h" @@ -28,56 +29,8 @@ namespace gwp_asan { // otherwise). class GuardedPoolAllocator { public: - static constexpr uint64_t kInvalidThreadID = UINT64_MAX; - - enum class Error { - UNKNOWN, - USE_AFTER_FREE, - DOUBLE_FREE, - INVALID_FREE, - BUFFER_OVERFLOW, - BUFFER_UNDERFLOW - }; - - struct AllocationMetadata { - // The number of bytes used to store a compressed stack frame. On 64-bit - // platforms, assuming a compression ratio of 50%, this should allow us to - // store ~64 frames per trace. - static constexpr size_t kStackFrameStorageBytes = 256; - - // Maximum number of stack frames to collect on allocation/deallocation. The - // actual number of collected frames may be less than this as the stack - // frames are compressed into a fixed memory range. - static constexpr size_t kMaxTraceLengthToCollect = 128; - - // Records the given allocation metadata into this struct. - void RecordAllocation(uintptr_t Addr, size_t Size, - options::Backtrace_t Backtrace); - - // Record that this allocation is now deallocated. - void RecordDeallocation(options::Backtrace_t Backtrace); - - struct CallSiteInfo { - // The compressed backtrace to the allocation/deallocation. - uint8_t CompressedTrace[kStackFrameStorageBytes]; - // The thread ID for this trace, or kInvalidThreadID if not available. - uint64_t ThreadID = kInvalidThreadID; - // The size of the compressed trace (in bytes). Zero indicates that no - // trace was collected. - size_t TraceSize = 0; - }; - - // The address of this allocation. - uintptr_t Addr = 0; - // Represents the actual size of the allocation. - size_t Size = 0; - - CallSiteInfo AllocationTrace; - CallSiteInfo DeallocationTrace; - - // Whether this allocation has been deallocated yet. - bool IsDeallocated = false; - }; + // Name of the GWP-ASan mapping that for `Metadata`. + static constexpr const char *kGwpAsanMetadataName = "GWP-ASan Metadata"; // During program startup, we must ensure that memory allocations do not land // in this allocation pool if the allocator decides to runtime-disable @@ -98,14 +51,36 @@ public: // pool using the provided options. See options.inc for runtime configuration // options. void init(const options::Options &Opts); + void uninitTestOnly(); + + // Functions exported for libmemunreachable's use on Android. disable() + // installs a lock in the allocator that prevents any thread from being able + // to allocate memory, until enable() is called. + void disable(); + void enable(); + + typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg); + // Execute the callback Cb for every allocation the lies in [Base, Base + + // Size). Must be called while the allocator is disabled. The callback can not + // allocate. + void iterate(void *Base, size_t Size, iterate_callback Cb, void *Arg); + + // This function is used to signal the allocator to indefinitely stop + // functioning, as a crash has occurred. This stops the allocator from + // servicing any further allocations permanently. + void stop(); // Return whether the allocation should be randomly chosen for sampling. GWP_ASAN_ALWAYS_INLINE bool shouldSample() { // NextSampleCounter == 0 means we "should regenerate the counter". // == 1 means we "should sample this allocation". + // AdjustedSampleRatePlusOne is designed to intentionally underflow. This + // class must be valid when zero-initialised, and we wish to sample as + // infrequently as possible when this is the case, hence we underflow to + // UINT32_MAX. if (GWP_ASAN_UNLIKELY(ThreadLocals.NextSampleCounter == 0)) ThreadLocals.NextSampleCounter = - (getRandomUnsigned32() % AdjustedSampleRate) + 1; + (getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1; return GWP_ASAN_UNLIKELY(--ThreadLocals.NextSampleCounter == 0); } @@ -113,8 +88,7 @@ public: // Returns whether the provided pointer is a current sampled allocation that // is owned by this pool. GWP_ASAN_ALWAYS_INLINE bool pointerIsMine(const void *Ptr) const { - uintptr_t P = reinterpret_cast<uintptr_t>(Ptr); - return GuardedPagePool <= P && P < GuardedPagePoolEnd; + return State.pointerIsMine(Ptr); } // Allocate memory in a guarded slot, and return a pointer to the new @@ -129,23 +103,21 @@ public: // Returns the size of the allocation at Ptr. size_t getSize(const void *Ptr); - // Returns the largest allocation that is supported by this pool. Any - // allocations larger than this should go to the regular system allocator. - size_t maximumAllocationSize() const; - - // Dumps an error report (including allocation and deallocation stack traces). - // An optional error may be provided if the caller knows what the error is - // ahead of time. This is primarily a helper function to locate the static - // singleton pointer and call the internal version of this function. This - // method is never thread safe, and should only be called when fatal errors - // occur. - static void reportError(uintptr_t AccessPtr, Error E = Error::UNKNOWN); + // Returns a pointer to the Metadata region, or nullptr if it doesn't exist. + const AllocationMetadata *getMetadataRegion() const { return Metadata; } - // Get the current thread ID, or kInvalidThreadID if failure. Note: This - // implementation is platform-specific. - static uint64_t getThreadID(); + // Returns a pointer to the AllocatorState region. + const AllocatorState *getAllocatorState() const { return &State; } private: + // Name of actively-occupied slot mappings. + static constexpr const char *kGwpAsanAliveSlotName = "GWP-ASan Alive Slot"; + // Name of the guard pages. This includes all slots that are not actively in + // use (i.e. were never used, or have been free()'d).) + static constexpr const char *kGwpAsanGuardPageName = "GWP-ASan Guard Page"; + // Name of the mapping for `FreeSlots`. + static constexpr const char *kGwpAsanFreeSlotsName = "GWP-ASan Metadata"; + static constexpr size_t kInvalidSlotID = SIZE_MAX; // These functions anonymously map memory or change the permissions of mapped @@ -154,43 +126,22 @@ private: // return on error, instead electing to kill the calling process on failure. // Note that memory is initially mapped inaccessible. In order for RW // mappings, call mapMemory() followed by markReadWrite() on the returned - // pointer. - void *mapMemory(size_t Size) const; - void markReadWrite(void *Ptr, size_t Size) const; - void markInaccessible(void *Ptr, size_t Size) const; + // pointer. Each mapping is named on platforms that support it, primarily + // Android. This name must be a statically allocated string, as the Android + // kernel uses the string pointer directly. + void *mapMemory(size_t Size, const char *Name) const; + void unmapMemory(void *Ptr, size_t Size, const char *Name) const; + void markReadWrite(void *Ptr, size_t Size, const char *Name) const; + void markInaccessible(void *Ptr, size_t Size, const char *Name) const; // Get the page size from the platform-specific implementation. Only needs to // be called once, and the result should be cached in PageSize in this class. static size_t getPlatformPageSize(); - // Install the SIGSEGV crash handler for printing use-after-free and heap- - // buffer-{under|over}flow exceptions. This is platform specific as even - // though POSIX and Windows both support registering handlers through - // signal(), we have to use platform-specific signal handlers to obtain the - // address that caused the SIGSEGV exception. - static void installSignalHandlers(); - - // Returns the index of the slot that this pointer resides in. If the pointer - // is not owned by this pool, the result is undefined. - size_t addrToSlot(uintptr_t Ptr) const; - - // Returns the address of the N-th guarded slot. - uintptr_t slotToAddr(size_t N) const; - // Returns a pointer to the metadata for the owned pointer. If the pointer is // not owned by this pool, the result is undefined. AllocationMetadata *addrToMetadata(uintptr_t Ptr) const; - // Returns the address of the page that this pointer resides in. - uintptr_t getPageAddr(uintptr_t Ptr) const; - - // Gets the nearest slot to the provided address. - size_t getNearestSlot(uintptr_t Ptr) const; - - // Returns whether the provided pointer is a guard page or not. The pointer - // must be within memory owned by this pool, else the result is undefined. - bool isGuardPage(uintptr_t Ptr) const; - // Reserve a slot for a new guarded allocation. Returns kInvalidSlotID if no // slot is available to be reserved. size_t reserveSlot(); @@ -198,33 +149,24 @@ private: // Unreserve the guarded slot. void freeSlot(size_t SlotIndex); - // Returns the offset (in bytes) between the start of a guarded slot and where - // the start of the allocation should take place. Determined using the size of - // the allocation and the options provided at init-time. - uintptr_t allocationSlotOffset(size_t AllocationSize) const; + // Raise a SEGV and set the corresponding fields in the Allocator's State in + // order to tell the crash handler what happened. Used when errors are + // detected internally (Double Free, Invalid Free). + void trapOnAddress(uintptr_t Address, Error E); - // Returns the diagnosis for an unknown error. If the diagnosis is not - // Error::INVALID_FREE or Error::UNKNOWN, the metadata for the slot - // responsible for the error is placed in *Meta. - Error diagnoseUnknownError(uintptr_t AccessPtr, AllocationMetadata **Meta); + static GuardedPoolAllocator *getSingleton(); - void reportErrorInternal(uintptr_t AccessPtr, Error E); + // Install a pthread_atfork handler. + void installAtFork(); - // Cached page size for this system in bytes. - size_t PageSize = 0; + gwp_asan::AllocatorState State; // A mutex to protect the guarded slot and metadata pool for this class. Mutex PoolMutex; - // The number of guarded slots that this pool holds. - size_t MaxSimultaneousAllocations = 0; // Record the number allocations that we've sampled. We store this amount so // that we don't randomly choose to recycle a slot that previously had an // allocation before all the slots have been utilised. size_t NumSampledAllocations = 0; - // Pointer to the pool of guarded slots. Note that this points to the start of - // the pool (which is a guard page), not a pointer to the first guarded page. - uintptr_t GuardedPagePool = UINTPTR_MAX; - uintptr_t GuardedPagePoolEnd = 0; // Pointer to the allocation metadata (allocation/deallocation stack traces), // if any. AllocationMetadata *Metadata = nullptr; @@ -237,12 +179,9 @@ private: // See options.{h, inc} for more information. bool PerfectlyRightAlign = false; - // Printf function supplied by the implementing allocator. We can't (in - // general) use printf() from the cstdlib as it may malloc(), causing infinite - // recursion. - options::Printf_t Printf = nullptr; + // Backtrace function provided by the supporting allocator. See `options.h` + // for more information. options::Backtrace_t Backtrace = nullptr; - options::PrintBacktrace_t PrintBacktrace = nullptr; // The adjusted sample rate for allocation sampling. Default *must* be // nonzero, as dynamic initialisation may call malloc (e.g. from libstdc++) @@ -250,7 +189,7 @@ private: // where we would calculate modulo zero. This value is set UINT32_MAX, as when // GWP-ASan is disabled, we wish to never spend wasted cycles recalculating // the sample rate. - uint32_t AdjustedSampleRate = UINT32_MAX; + uint32_t AdjustedSampleRatePlusOne = 0; // Pack the thread local variables into a struct to ensure that they're in // the same cache line for performance reasons. These are the most touched diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/optional/backtrace.h b/gnu/llvm/compiler-rt/lib/gwp_asan/optional/backtrace.h index 6c9ee9f6506..3a72eb3d08e 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/optional/backtrace.h +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/optional/backtrace.h @@ -9,6 +9,7 @@ #ifndef GWP_ASAN_OPTIONAL_BACKTRACE_H_ #define GWP_ASAN_OPTIONAL_BACKTRACE_H_ +#include "gwp_asan/optional/segv_handler.h" #include "gwp_asan/options.h" namespace gwp_asan { @@ -21,7 +22,7 @@ namespace options { // note any thread-safety descriptions for the implementation of these functions // that you use. Backtrace_t getBacktraceFunction(); -PrintBacktrace_t getPrintBacktraceFunction(); +crash_handler::PrintBacktrace_t getPrintBacktraceFunction(); } // namespace options } // namespace gwp_asan diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/optional/backtrace_linux_libc.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/optional/backtrace_linux_libc.cpp index a656c9b41d5..bb0aad224a1 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/optional/backtrace_linux_libc.cpp +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/optional/backtrace_linux_libc.cpp @@ -24,7 +24,7 @@ size_t Backtrace(uintptr_t *TraceBuffer, size_t Size) { } static void PrintBacktrace(uintptr_t *Trace, size_t TraceLength, - gwp_asan::options::Printf_t Printf) { + gwp_asan::crash_handler::Printf_t Printf) { if (TraceLength == 0) { Printf(" <not found (does your allocator support backtracing?)>\n\n"); return; @@ -49,6 +49,8 @@ static void PrintBacktrace(uintptr_t *Trace, size_t TraceLength, namespace gwp_asan { namespace options { Backtrace_t getBacktraceFunction() { return Backtrace; } -PrintBacktrace_t getPrintBacktraceFunction() { return PrintBacktrace; } +crash_handler::PrintBacktrace_t getPrintBacktraceFunction() { + return PrintBacktrace; +} } // namespace options } // namespace gwp_asan diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp index 5e07fd6f465..3ac4b52bfc2 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp @@ -45,7 +45,7 @@ size_t Backtrace(uintptr_t *TraceBuffer, size_t Size) { } static void PrintBacktrace(uintptr_t *Trace, size_t TraceLength, - gwp_asan::options::Printf_t Printf) { + gwp_asan::crash_handler::Printf_t Printf) { __sanitizer::StackTrace StackTrace; StackTrace.trace = reinterpret_cast<__sanitizer::uptr *>(Trace); StackTrace.size = TraceLength; @@ -73,6 +73,8 @@ Backtrace_t getBacktraceFunction() { __sanitizer::InitializeCommonFlags(); return Backtrace; } -PrintBacktrace_t getPrintBacktraceFunction() { return PrintBacktrace; } +crash_handler::PrintBacktrace_t getPrintBacktraceFunction() { + return PrintBacktrace; +} } // namespace options } // namespace gwp_asan diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/optional/options_parser.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/optional/options_parser.cpp index 6c2167288d6..2e638628674 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/optional/options_parser.cpp +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/optional/options_parser.cpp @@ -83,8 +83,6 @@ void initOptions() { "GWP-ASan ERROR: SampleRate must be > 0 when GWP-ASan is enabled.\n"); exit(EXIT_FAILURE); } - - o->Printf = __sanitizer::Printf; } Options &getOptions() { return *getOptionsInternal(); } diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/optional/segv_handler.h b/gnu/llvm/compiler-rt/lib/gwp_asan/optional/segv_handler.h new file mode 100644 index 00000000000..10af15055e2 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/optional/segv_handler.h @@ -0,0 +1,81 @@ +//===-- crash_handler.h -----------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef GWP_ASAN_OPTIONAL_CRASH_HANDLER_H_ +#define GWP_ASAN_OPTIONAL_CRASH_HANDLER_H_ + +#include "gwp_asan/guarded_pool_allocator.h" +#include "gwp_asan/options.h" + +namespace gwp_asan { +namespace crash_handler { +// ================================ Requirements =============================== +// This function must be provided by the supporting allocator only when this +// provided crash handler is used to dump the generic report. +// sanitizer::Printf() function can be simply used here. +// ================================ Description ================================ +// This function shall produce output according to a strict subset of the C +// standard library's printf() family. This function must support printing the +// following formats: +// 1. integers: "%([0-9]*)?(z|ll)?{d,u,x,X}" +// 2. pointers: "%p" +// 3. strings: "%[-]([0-9]*)?(\\.\\*)?s" +// 4. chars: "%c" +// This function must be implemented in a signal-safe manner, and thus must not +// malloc(). +// =================================== Notes =================================== +// This function has a slightly different signature than the C standard +// library's printf(). Notably, it returns 'void' rather than 'int'. +typedef void (*Printf_t)(const char *Format, ...); + +// ================================ Requirements =============================== +// This function is required for the supporting allocator, but one of the three +// provided implementations may be used (RTGwpAsanBacktraceLibc, +// RTGwpAsanBacktraceSanitizerCommon, or BasicPrintBacktraceFunction). +// ================================ Description ================================ +// This function shall take the backtrace provided in `TraceBuffer`, and print +// it in a human-readable format using `Print`. Generally, this function shall +// resolve raw pointers to section offsets and print them with the following +// sanitizer-common format: +// " #{frame_number} {pointer} in {function name} ({binary name}+{offset}" +// e.g. " #5 0x420459 in _start (/tmp/uaf+0x420459)" +// This format allows the backtrace to be symbolized offline successfully using +// llvm-symbolizer. +// =================================== Notes =================================== +// This function may directly or indirectly call malloc(), as the +// GuardedPoolAllocator contains a reentrancy barrier to prevent infinite +// recursion. Any allocation made inside this function will be served by the +// supporting allocator, and will not have GWP-ASan protections. +typedef void (*PrintBacktrace_t)(uintptr_t *TraceBuffer, size_t TraceLength, + Printf_t Print); + +// Returns a function pointer to a basic PrintBacktrace implementation. This +// implementation simply prints the stack trace in a human readable fashion +// without any symbolization. +PrintBacktrace_t getBasicPrintBacktraceFunction(); + +// Install the SIGSEGV crash handler for printing use-after-free and heap- +// buffer-{under|over}flow exceptions if the user asked for it. This is platform +// specific as even though POSIX and Windows both support registering handlers +// through signal(), we have to use platform-specific signal handlers to obtain +// the address that caused the SIGSEGV exception. GPA->init() must be called +// before this function. +void installSignalHandlers(gwp_asan::GuardedPoolAllocator *GPA, Printf_t Printf, + PrintBacktrace_t PrintBacktrace, + options::Backtrace_t Backtrace); + +void uninstallSignalHandlers(); + +void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State, + const gwp_asan::AllocationMetadata *Metadata, + options::Backtrace_t Backtrace, Printf_t Printf, + PrintBacktrace_t PrintBacktrace); +} // namespace crash_handler +} // namespace gwp_asan + +#endif // GWP_ASAN_OPTIONAL_CRASH_HANDLER_H_ diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp new file mode 100644 index 00000000000..22589b89360 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp @@ -0,0 +1,227 @@ +//===-- crash_handler_posix.cpp ---------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/common.h" +#include "gwp_asan/crash_handler.h" +#include "gwp_asan/guarded_pool_allocator.h" +#include "gwp_asan/optional/segv_handler.h" +#include "gwp_asan/options.h" + +#include <assert.h> +#include <inttypes.h> +#include <signal.h> +#include <stdio.h> + +namespace { +using gwp_asan::AllocationMetadata; +using gwp_asan::Error; +using gwp_asan::GuardedPoolAllocator; +using gwp_asan::crash_handler::PrintBacktrace_t; +using gwp_asan::crash_handler::Printf_t; +using gwp_asan::options::Backtrace_t; + +struct sigaction PreviousHandler; +bool SignalHandlerInstalled; +gwp_asan::GuardedPoolAllocator *GPAForSignalHandler; +Printf_t PrintfForSignalHandler; +PrintBacktrace_t PrintBacktraceForSignalHandler; +Backtrace_t BacktraceForSignalHandler; + +static void sigSegvHandler(int sig, siginfo_t *info, void *ucontext) { + if (GPAForSignalHandler) { + GPAForSignalHandler->stop(); + + gwp_asan::crash_handler::dumpReport( + reinterpret_cast<uintptr_t>(info->si_addr), + GPAForSignalHandler->getAllocatorState(), + GPAForSignalHandler->getMetadataRegion(), BacktraceForSignalHandler, + PrintfForSignalHandler, PrintBacktraceForSignalHandler); + } + + // Process any previous handlers. + if (PreviousHandler.sa_flags & SA_SIGINFO) { + PreviousHandler.sa_sigaction(sig, info, ucontext); + } else if (PreviousHandler.sa_handler == SIG_DFL) { + // If the previous handler was the default handler, cause a core dump. + signal(SIGSEGV, SIG_DFL); + raise(SIGSEGV); + } else if (PreviousHandler.sa_handler == SIG_IGN) { + // If the previous segv handler was SIGIGN, crash iff we were responsible + // for the crash. + if (__gwp_asan_error_is_mine(GPAForSignalHandler->getAllocatorState(), + reinterpret_cast<uintptr_t>(info->si_addr))) { + signal(SIGSEGV, SIG_DFL); + raise(SIGSEGV); + } + } else { + PreviousHandler.sa_handler(sig); + } +} + +struct ScopedEndOfReportDecorator { + ScopedEndOfReportDecorator(gwp_asan::crash_handler::Printf_t Printf) + : Printf(Printf) {} + ~ScopedEndOfReportDecorator() { Printf("*** End GWP-ASan report ***\n"); } + gwp_asan::crash_handler::Printf_t Printf; +}; + +// Prints the provided error and metadata information. +void printHeader(Error E, uintptr_t AccessPtr, + const gwp_asan::AllocationMetadata *Metadata, + Printf_t Printf) { + // Print using intermediate strings. Platforms like Android don't like when + // you print multiple times to the same line, as there may be a newline + // appended to a log file automatically per Printf() call. + constexpr size_t kDescriptionBufferLen = 128; + char DescriptionBuffer[kDescriptionBufferLen] = ""; + if (E != Error::UNKNOWN && Metadata != nullptr) { + uintptr_t Address = __gwp_asan_get_allocation_address(Metadata); + size_t Size = __gwp_asan_get_allocation_size(Metadata); + if (E == Error::USE_AFTER_FREE) { + snprintf(DescriptionBuffer, kDescriptionBufferLen, + "(%zu byte%s into a %zu-byte allocation at 0x%zx) ", + AccessPtr - Address, (AccessPtr - Address == 1) ? "" : "s", Size, + Address); + } else if (AccessPtr < Address) { + snprintf(DescriptionBuffer, kDescriptionBufferLen, + "(%zu byte%s to the left of a %zu-byte allocation at 0x%zx) ", + Address - AccessPtr, (Address - AccessPtr == 1) ? "" : "s", Size, + Address); + } else if (AccessPtr > Address) { + snprintf(DescriptionBuffer, kDescriptionBufferLen, + "(%zu byte%s to the right of a %zu-byte allocation at 0x%zx) ", + AccessPtr - Address, (AccessPtr - Address == 1) ? "" : "s", Size, + Address); + } else { + snprintf(DescriptionBuffer, kDescriptionBufferLen, + "(a %zu-byte allocation) ", Size); + } + } + + // Possible number of digits of a 64-bit number: ceil(log10(2^64)) == 20. Add + // a null terminator, and round to the nearest 8-byte boundary. + uint64_t ThreadID = gwp_asan::getThreadID(); + constexpr size_t kThreadBufferLen = 24; + char ThreadBuffer[kThreadBufferLen]; + if (ThreadID == gwp_asan::kInvalidThreadID) + snprintf(ThreadBuffer, kThreadBufferLen, "<unknown>"); + else + snprintf(ThreadBuffer, kThreadBufferLen, "%" PRIu64, ThreadID); + + Printf("%s at 0x%zx %sby thread %s here:\n", gwp_asan::ErrorToString(E), + AccessPtr, DescriptionBuffer, ThreadBuffer); +} + +void defaultPrintStackTrace(uintptr_t *Trace, size_t TraceLength, + gwp_asan::crash_handler::Printf_t Printf) { + if (TraceLength == 0) + Printf(" <unknown (does your allocator support backtracing?)>\n"); + + for (size_t i = 0; i < TraceLength; ++i) { + Printf(" #%zu 0x%zx in <unknown>\n", i, Trace[i]); + } + Printf("\n"); +} + +} // anonymous namespace + +namespace gwp_asan { +namespace crash_handler { +PrintBacktrace_t getBasicPrintBacktraceFunction() { + return defaultPrintStackTrace; +} + +void installSignalHandlers(gwp_asan::GuardedPoolAllocator *GPA, Printf_t Printf, + PrintBacktrace_t PrintBacktrace, + options::Backtrace_t Backtrace) { + GPAForSignalHandler = GPA; + PrintfForSignalHandler = Printf; + PrintBacktraceForSignalHandler = PrintBacktrace; + BacktraceForSignalHandler = Backtrace; + + struct sigaction Action; + Action.sa_sigaction = sigSegvHandler; + Action.sa_flags = SA_SIGINFO; + sigaction(SIGSEGV, &Action, &PreviousHandler); + SignalHandlerInstalled = true; +} + +void uninstallSignalHandlers() { + if (SignalHandlerInstalled) { + sigaction(SIGSEGV, &PreviousHandler, nullptr); + SignalHandlerInstalled = false; + } +} + +void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State, + const gwp_asan::AllocationMetadata *Metadata, + options::Backtrace_t Backtrace, Printf_t Printf, + PrintBacktrace_t PrintBacktrace) { + assert(State && "dumpReport missing Allocator State."); + assert(Metadata && "dumpReport missing Metadata."); + assert(Printf && "dumpReport missing Printf."); + + if (!__gwp_asan_error_is_mine(State, ErrorPtr)) + return; + + Printf("*** GWP-ASan detected a memory error ***\n"); + ScopedEndOfReportDecorator Decorator(Printf); + + uintptr_t InternalErrorPtr = __gwp_asan_get_internal_crash_address(State); + if (InternalErrorPtr != 0u) + ErrorPtr = InternalErrorPtr; + + Error E = __gwp_asan_diagnose_error(State, Metadata, ErrorPtr); + + if (E == Error::UNKNOWN) { + Printf("GWP-ASan cannot provide any more information about this error. " + "This may occur due to a wild memory access into the GWP-ASan pool, " + "or an overflow/underflow that is > 512B in length.\n"); + return; + } + + const gwp_asan::AllocationMetadata *AllocMeta = + __gwp_asan_get_metadata(State, Metadata, ErrorPtr); + + // Print the error header. + printHeader(E, ErrorPtr, AllocMeta, Printf); + + // Print the fault backtrace. + static constexpr unsigned kMaximumStackFramesForCrashTrace = 512; + uintptr_t Trace[kMaximumStackFramesForCrashTrace]; + size_t TraceLength = Backtrace(Trace, kMaximumStackFramesForCrashTrace); + + PrintBacktrace(Trace, TraceLength, Printf); + + if (AllocMeta == nullptr) + return; + + // Maybe print the deallocation trace. + if (__gwp_asan_is_deallocated(AllocMeta)) { + uint64_t ThreadID = __gwp_asan_get_deallocation_thread_id(AllocMeta); + if (ThreadID == kInvalidThreadID) + Printf("0x%zx was deallocated by thread <unknown> here:\n", ErrorPtr); + else + Printf("0x%zx was deallocated by thread %zu here:\n", ErrorPtr, ThreadID); + TraceLength = __gwp_asan_get_deallocation_trace( + AllocMeta, Trace, kMaximumStackFramesForCrashTrace); + PrintBacktrace(Trace, TraceLength, Printf); + } + + // Print the allocation trace. + uint64_t ThreadID = __gwp_asan_get_allocation_thread_id(AllocMeta); + if (ThreadID == kInvalidThreadID) + Printf("0x%zx was allocated by thread <unknown> here:\n", ErrorPtr); + else + Printf("0x%zx was allocated by thread %zu here:\n", ErrorPtr, ThreadID); + TraceLength = __gwp_asan_get_allocation_trace( + AllocMeta, Trace, kMaximumStackFramesForCrashTrace); + PrintBacktrace(Trace, TraceLength, Printf); +} +} // namespace crash_handler +} // namespace gwp_asan diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/options.h b/gnu/llvm/compiler-rt/lib/gwp_asan/options.h index ae3f3d45e94..6fb43108b5d 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/options.h +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/options.h @@ -15,23 +15,6 @@ namespace gwp_asan { namespace options { // ================================ Requirements =============================== -// This function is required to be implemented by the supporting allocator. The -// sanitizer::Printf() function can be simply used here. -// ================================ Description ================================ -// This function shall produce output according to a strict subset of the C -// standard library's printf() family. This function must support printing the -// following formats: -// 1. integers: "%([0-9]*)?(z|ll)?{d,u,x,X}" -// 2. pointers: "%p" -// 3. strings: "%[-]([0-9]*)?(\\.\\*)?s" -// 4. chars: "%c" -// This function must be implemented in a signal-safe manner. -// =================================== Notes =================================== -// This function has a slightly different signature than the C standard -// library's printf(). Notably, it returns 'void' rather than 'int'. -typedef void (*Printf_t)(const char *Format, ...); - -// ================================ Requirements =============================== // This function is required to be either implemented by the supporting // allocator, or one of the two provided implementations may be used // (RTGwpAsanBacktraceLibc or RTGwpAsanBacktraceSanitizerCommon). @@ -50,32 +33,8 @@ typedef void (*Printf_t)(const char *Format, ...); // supporting allocator, and will not have GWP-ASan protections. typedef size_t (*Backtrace_t)(uintptr_t *TraceBuffer, size_t Size); -// ================================ Requirements =============================== -// This function is optional for the supporting allocator, but one of the two -// provided implementations may be used (RTGwpAsanBacktraceLibc or -// RTGwpAsanBacktraceSanitizerCommon). If not provided, a default implementation -// is used which prints the raw pointers only. -// ================================ Description ================================ -// This function shall take the backtrace provided in `TraceBuffer`, and print -// it in a human-readable format using `Print`. Generally, this function shall -// resolve raw pointers to section offsets and print them with the following -// sanitizer-common format: -// " #{frame_number} {pointer} in {function name} ({binary name}+{offset}" -// e.g. " #5 0x420459 in _start (/tmp/uaf+0x420459)" -// This format allows the backtrace to be symbolized offline successfully using -// llvm-symbolizer. -// =================================== Notes =================================== -// This function may directly or indirectly call malloc(), as the -// GuardedPoolAllocator contains a reentrancy barrier to prevent infinite -// recursion. Any allocation made inside this function will be served by the -// supporting allocator, and will not have GWP-ASan protections. -typedef void (*PrintBacktrace_t)(uintptr_t *TraceBuffer, size_t TraceLength, - Printf_t Print); - struct Options { - Printf_t Printf = nullptr; Backtrace_t Backtrace = nullptr; - PrintBacktrace_t PrintBacktrace = nullptr; // Read the options from the included definitions file. #define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \ @@ -89,9 +48,7 @@ struct Options { #include "gwp_asan/options.inc" #undef GWP_ASAN_OPTION - Printf = nullptr; Backtrace = nullptr; - PrintBacktrace = nullptr; } }; } // namespace options diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/options.inc b/gnu/llvm/compiler-rt/lib/gwp_asan/options.inc index df6c46e6e98..6cdddfbad84 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/options.inc +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/options.inc @@ -17,9 +17,10 @@ GWP_ASAN_OPTION( "When allocations are right-aligned, should we perfectly align them up to " "the page boundary? By default (false), we round up allocation size to the " "nearest power of two (1, 2, 4, 8, 16) up to a maximum of 16-byte " - "alignment for performance reasons. Setting this to true can find single " - "byte buffer-overflows for multibyte allocations at the cost of " - "performance, and may be incompatible with some architectures.") + "alignment for performance reasons. For Bionic, we use 8-byte alignment by " + "default. Setting this to true can find single byte buffer-overflows for " + "multibyte allocations at the cost of performance, and may be incompatible " + "with some architectures.") GWP_ASAN_OPTION(int, MaxSimultaneousAllocations, 16, "Number of simultaneously-guarded allocations available in the " @@ -30,6 +31,13 @@ GWP_ASAN_OPTION(int, SampleRate, 5000, "selected for GWP-ASan sampling. Default is 5000. Sample rates " "up to (2^31 - 1) are supported.") +// Developer note - This option is not actually processed by GWP-ASan itself. It +// is included here so that a user can specify whether they want signal handlers +// or not. The supporting allocator should inspect this value to see whether +// signal handlers need to be installed, and then use +// crash_handler::installSignalHandlers() in order to install the handlers. Note +// that in order to support signal handlers, you will need to link against the +// optional crash_handler component. GWP_ASAN_OPTION( bool, InstallSignalHandlers, true, "Install GWP-ASan signal handlers for SIGSEGV during dynamic loading. This " @@ -39,3 +47,7 @@ GWP_ASAN_OPTION( "programs that install further signal handlers should make sure they do " "the same. Note, if the previously installed SIGSEGV handler is SIG_IGN, " "we terminate the process after dumping the error report.") + +GWP_ASAN_OPTION(bool, InstallForkHandlers, true, + "Install GWP-ASan atfork handlers to acquire internal locks " + "before fork and release them after.") diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/platform_specific/common_posix.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/platform_specific/common_posix.cpp new file mode 100644 index 00000000000..e44e6299eea --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/platform_specific/common_posix.cpp @@ -0,0 +1,24 @@ +//===-- common_posix.cpp ---------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/common.h" + +#include <sys/syscall.h> +#include <unistd.h> + +namespace gwp_asan { + +uint64_t getThreadID() { +#ifdef SYS_gettid + return syscall(SYS_gettid); +#else + return kInvalidThreadID; +#endif +} + +} // namespace gwp_asan diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp index 8bc0aefeec4..a8767a4cb80 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp @@ -7,90 +7,79 @@ //===----------------------------------------------------------------------===// #include "gwp_asan/guarded_pool_allocator.h" +#include "gwp_asan/utilities.h" -#include <stdlib.h> +#include <assert.h> #include <errno.h> #include <signal.h> +#include <stdlib.h> +#include <string.h> #include <sys/mman.h> -#include <sys/syscall.h> #include <sys/types.h> #include <unistd.h> -namespace gwp_asan { +#ifdef ANDROID +#include <sys/prctl.h> +#define PR_SET_VMA 0x53564d41 +#define PR_SET_VMA_ANON_NAME 0 +#endif // ANDROID + +void MaybeSetMappingName(void *Mapping, size_t Size, const char *Name) { +#ifdef ANDROID + prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, Mapping, Size, Name); +#endif // ANDROID + // Anonymous mapping names are only supported on Android. + return; +} -void *GuardedPoolAllocator::mapMemory(size_t Size) const { +namespace gwp_asan { +void *GuardedPoolAllocator::mapMemory(size_t Size, const char *Name) const { void *Ptr = mmap(nullptr, Size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); - - if (Ptr == MAP_FAILED) { - Printf("Failed to map guarded pool allocator memory, errno: %d\n", errno); - Printf(" mmap(nullptr, %zu, ...) failed.\n", Size); - exit(EXIT_FAILURE); - } + Check(Ptr != MAP_FAILED, "Failed to map guarded pool allocator memory"); + MaybeSetMappingName(Ptr, Size, Name); return Ptr; } -void GuardedPoolAllocator::markReadWrite(void *Ptr, size_t Size) const { - if (mprotect(Ptr, Size, PROT_READ | PROT_WRITE) != 0) { - Printf("Failed to set guarded pool allocator memory at as RW, errno: %d\n", - errno); - Printf(" mprotect(%p, %zu, RW) failed.\n", Ptr, Size); - exit(EXIT_FAILURE); - } +void GuardedPoolAllocator::unmapMemory(void *Ptr, size_t Size, + const char *Name) const { + Check(munmap(Ptr, Size) == 0, + "Failed to unmap guarded pool allocator memory."); + MaybeSetMappingName(Ptr, Size, Name); } -void GuardedPoolAllocator::markInaccessible(void *Ptr, size_t Size) const { +void GuardedPoolAllocator::markReadWrite(void *Ptr, size_t Size, + const char *Name) const { + Check(mprotect(Ptr, Size, PROT_READ | PROT_WRITE) == 0, + "Failed to set guarded pool allocator memory at as RW."); + MaybeSetMappingName(Ptr, Size, Name); +} + +void GuardedPoolAllocator::markInaccessible(void *Ptr, size_t Size, + const char *Name) const { // mmap() a PROT_NONE page over the address to release it to the system, if // we used mprotect() here the system would count pages in the quarantine // against the RSS. - if (mmap(Ptr, Size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, - 0) == MAP_FAILED) { - Printf("Failed to set guarded pool allocator memory as inaccessible, " - "errno: %d\n", - errno); - Printf(" mmap(%p, %zu, NONE, ...) failed.\n", Ptr, Size); - exit(EXIT_FAILURE); - } + Check(mmap(Ptr, Size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, + 0) != MAP_FAILED, + "Failed to set guarded pool allocator memory as inaccessible."); + MaybeSetMappingName(Ptr, Size, Name); } size_t GuardedPoolAllocator::getPlatformPageSize() { return sysconf(_SC_PAGESIZE); } -struct sigaction PreviousHandler; - -static void sigSegvHandler(int sig, siginfo_t *info, void *ucontext) { - gwp_asan::GuardedPoolAllocator::reportError( - reinterpret_cast<uintptr_t>(info->si_addr)); - - // Process any previous handlers. - if (PreviousHandler.sa_flags & SA_SIGINFO) { - PreviousHandler.sa_sigaction(sig, info, ucontext); - } else if (PreviousHandler.sa_handler == SIG_IGN || - PreviousHandler.sa_handler == SIG_DFL) { - // If the previous handler was the default handler, or was ignoring this - // signal, install the default handler and re-raise the signal in order to - // get a core dump and terminate this process. - signal(SIGSEGV, SIG_DFL); - raise(SIGSEGV); - } else { - PreviousHandler.sa_handler(sig); - } -} - -void GuardedPoolAllocator::installSignalHandlers() { - struct sigaction Action; - Action.sa_sigaction = sigSegvHandler; - Action.sa_flags = SA_SIGINFO; - sigaction(SIGSEGV, &Action, &PreviousHandler); -} - -uint64_t GuardedPoolAllocator::getThreadID() { -#ifdef SYS_gettid - return syscall(SYS_gettid); -#else - return kInvalidThreadID; -#endif +void GuardedPoolAllocator::installAtFork() { + auto Disable = []() { + if (auto *S = getSingleton()) + S->disable(); + }; + auto Enable = []() { + if (auto *S = getSingleton()) + S->enable(); + }; + pthread_atfork(Disable, Enable, Enable); } } // namespace gwp_asan diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/platform_specific/utilities_posix.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/platform_specific/utilities_posix.cpp new file mode 100644 index 00000000000..0e605989670 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/platform_specific/utilities_posix.cpp @@ -0,0 +1,90 @@ +//===-- utilities_posix.cpp -------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/definitions.h" +#include "gwp_asan/utilities.h" + +#include <assert.h> + +#ifdef __BIONIC__ +#include <stdlib.h> +extern "C" GWP_ASAN_WEAK void android_set_abort_message(const char *); +#else // __BIONIC__ +#include <stdio.h> +#endif + +namespace gwp_asan { + +#ifdef __BIONIC__ +void Check(bool Condition, const char *Message) { + if (Condition) + return; + if (&android_set_abort_message != nullptr) + android_set_abort_message(Message); + abort(); +} +#else // __BIONIC__ +void Check(bool Condition, const char *Message) { + if (Condition) + return; + fprintf(stderr, "%s", Message); + __builtin_trap(); +} +#endif // __BIONIC__ + +// See `bionic/tests/malloc_test.cpp` in the Android source for documentation +// regarding their alignment guarantees. We always round up to the closest +// 8-byte window. As GWP-ASan's malloc(X) can always get exactly an X-sized +// allocation, an allocation that rounds up to 16-bytes will always be given a +// 16-byte aligned allocation. +static size_t alignBionic(size_t RealAllocationSize) { + if (RealAllocationSize % 8 == 0) + return RealAllocationSize; + return RealAllocationSize + 8 - (RealAllocationSize % 8); +} + +static size_t alignPowerOfTwo(size_t RealAllocationSize) { + if (RealAllocationSize <= 2) + return RealAllocationSize; + if (RealAllocationSize <= 4) + return 4; + if (RealAllocationSize <= 8) + return 8; + if (RealAllocationSize % 16 == 0) + return RealAllocationSize; + return RealAllocationSize + 16 - (RealAllocationSize % 16); +} + +#ifdef __BIONIC__ +static constexpr AlignmentStrategy PlatformDefaultAlignment = + AlignmentStrategy::BIONIC; +#else // __BIONIC__ +static constexpr AlignmentStrategy PlatformDefaultAlignment = + AlignmentStrategy::POWER_OF_TWO; +#endif // __BIONIC__ + +size_t rightAlignedAllocationSize(size_t RealAllocationSize, + AlignmentStrategy Align) { + assert(RealAllocationSize > 0); + if (Align == AlignmentStrategy::DEFAULT) + Align = PlatformDefaultAlignment; + + switch (Align) { + case AlignmentStrategy::BIONIC: + return alignBionic(RealAllocationSize); + case AlignmentStrategy::POWER_OF_TWO: + return alignPowerOfTwo(RealAllocationSize); + case AlignmentStrategy::PERFECT: + return RealAllocationSize; + case AlignmentStrategy::DEFAULT: + __builtin_unreachable(); + } + __builtin_unreachable(); +} + +} // namespace gwp_asan diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/random.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/random.cpp index 90493da7e03..2180f920408 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/random.cpp +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/random.cpp @@ -7,14 +7,22 @@ //===----------------------------------------------------------------------===// #include "gwp_asan/random.h" -#include "gwp_asan/guarded_pool_allocator.h" +#include "gwp_asan/common.h" #include <time.h> +// Initialised to a magic constant so that an uninitialised GWP-ASan won't +// regenerate its sample counter for as long as possible. The xorshift32() +// algorithm used below results in getRandomUnsigned32(0xff82eb50) == +// 0xfffffea4. +GWP_ASAN_TLS_INITIAL_EXEC uint32_t RandomState = 0xff82eb50; + namespace gwp_asan { +void initPRNG() { + RandomState = time(nullptr) + getThreadID(); +} + uint32_t getRandomUnsigned32() { - thread_local uint32_t RandomState = - time(nullptr) + GuardedPoolAllocator::getThreadID(); RandomState ^= RandomState << 13; RandomState ^= RandomState >> 17; RandomState ^= RandomState << 5; diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/random.h b/gnu/llvm/compiler-rt/lib/gwp_asan/random.h index 5fcf30d557e..953b98909e9 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/random.h +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/random.h @@ -12,6 +12,9 @@ #include <stdint.h> namespace gwp_asan { +// Initialise the PRNG, using time and thread ID as the seed. +void initPRNG(); + // xorshift (32-bit output), extremely fast PRNG that uses arithmetic operations // only. Seeded using walltime. uint32_t getRandomUnsigned32(); diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/CMakeLists.txt index d6553d2cc5a..feac23df9fe 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/CMakeLists.txt @@ -14,10 +14,15 @@ set(GWP_ASAN_UNITTESTS backtrace.cpp basic.cpp compression.cpp + iterate.cpp + crash_handler_api.cpp driver.cpp mutex_test.cpp slot_reuse.cpp - thread_contention.cpp) + thread_contention.cpp + harness.cpp + enable_disable.cpp + late_init.cpp) set(GWP_ASAN_UNIT_TEST_HEADERS ${GWP_ASAN_HEADERS} @@ -41,6 +46,7 @@ if(COMPILER_RT_DEFAULT_TARGET_ARCH IN_LIST GWP_ASAN_SUPPORTED_ARCH) set(GWP_ASAN_TEST_RUNTIME_OBJECTS $<TARGET_OBJECTS:RTGwpAsan.${arch}> $<TARGET_OBJECTS:RTGwpAsanBacktraceSanitizerCommon.${arch}> + $<TARGET_OBJECTS:RTGwpAsanSegvHandler.${arch}> $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.${arch}>) diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/alignment.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/alignment.cpp index 8b1ce8ccd71..bf98f1f5833 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/alignment.cpp +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/alignment.cpp @@ -7,21 +7,38 @@ //===----------------------------------------------------------------------===// #include "gwp_asan/tests/harness.h" +#include "gwp_asan/utilities.h" -TEST_F(DefaultGuardedPoolAllocator, BasicAllocation) { - std::vector<std::pair<int, int>> AllocSizeToAlignment = { +TEST(AlignmentTest, PowerOfTwo) { + std::vector<std::pair<size_t, size_t>> AskedSizeToAlignedSize = { {1, 1}, {2, 2}, {3, 4}, {4, 4}, {5, 8}, {7, 8}, - {8, 8}, {9, 16}, {15, 16}, {16, 16}, {17, 16}, {31, 16}, - {32, 16}, {33, 16}, {4095, 4096}, {4096, 4096}, + {8, 8}, {9, 16}, {15, 16}, {16, 16}, {17, 32}, {31, 32}, + {32, 32}, {33, 48}, {4095, 4096}, {4096, 4096}, }; - for (const auto &KV : AllocSizeToAlignment) { - void *Ptr = GPA.allocate(KV.first); - EXPECT_NE(nullptr, Ptr); + for (const auto &KV : AskedSizeToAlignedSize) { + EXPECT_EQ(KV.second, + gwp_asan::rightAlignedAllocationSize( + KV.first, gwp_asan::AlignmentStrategy::POWER_OF_TWO)); + } +} + +TEST(AlignmentTest, AlignBionic) { + std::vector<std::pair<size_t, size_t>> AskedSizeToAlignedSize = { + {1, 8}, {2, 8}, {3, 8}, {4, 8}, {5, 8}, {7, 8}, + {8, 8}, {9, 16}, {15, 16}, {16, 16}, {17, 24}, {31, 32}, + {32, 32}, {33, 40}, {4095, 4096}, {4096, 4096}, + }; - // Check the alignment of the pointer is as expected. - EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(Ptr) % KV.second); + for (const auto &KV : AskedSizeToAlignedSize) { + EXPECT_EQ(KV.second, gwp_asan::rightAlignedAllocationSize( + KV.first, gwp_asan::AlignmentStrategy::BIONIC)); + } +} - GPA.deallocate(Ptr); +TEST(AlignmentTest, PerfectAlignment) { + for (size_t i = 1; i <= 4096; ++i) { + EXPECT_EQ(i, gwp_asan::rightAlignedAllocationSize( + i, gwp_asan::AlignmentStrategy::PERFECT)); } } diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/backtrace.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/backtrace.cpp index 6dccdb8c001..b3d44270bb2 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/backtrace.cpp +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/backtrace.cpp @@ -8,34 +8,77 @@ #include <string> +#include "gwp_asan/crash_handler.h" #include "gwp_asan/tests/harness.h" -TEST_F(BacktraceGuardedPoolAllocator, DoubleFree) { - void *Ptr = GPA.allocate(1); +// Optnone to ensure that the calls to these functions are not optimized away, +// as we're looking for them in the backtraces. +__attribute((optnone)) void * +AllocateMemory(gwp_asan::GuardedPoolAllocator &GPA) { + return GPA.allocate(1); +} +__attribute((optnone)) void +DeallocateMemory(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr) { + GPA.deallocate(Ptr); +} +__attribute((optnone)) void +DeallocateMemory2(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr) { GPA.deallocate(Ptr); +} +__attribute__((optnone)) void TouchMemory(void *Ptr) { + *(reinterpret_cast<volatile char *>(Ptr)) = 7; +} - std::string DeathRegex = "Double free.*"; - DeathRegex.append("backtrace\\.cpp:25.*"); +TEST_F(BacktraceGuardedPoolAllocator, DoubleFree) { + void *Ptr = AllocateMemory(GPA); + DeallocateMemory(GPA, Ptr); + + std::string DeathRegex = "Double Free.*"; + DeathRegex.append("DeallocateMemory2.*"); DeathRegex.append("was deallocated.*"); - DeathRegex.append("backtrace\\.cpp:15.*"); + DeathRegex.append("DeallocateMemory.*"); DeathRegex.append("was allocated.*"); - DeathRegex.append("backtrace\\.cpp:14.*"); - ASSERT_DEATH(GPA.deallocate(Ptr), DeathRegex); + DeathRegex.append("AllocateMemory.*"); + ASSERT_DEATH(DeallocateMemory2(GPA, Ptr), DeathRegex); } TEST_F(BacktraceGuardedPoolAllocator, UseAfterFree) { - char *Ptr = static_cast<char *>(GPA.allocate(1)); - GPA.deallocate(Ptr); + void *Ptr = AllocateMemory(GPA); + DeallocateMemory(GPA, Ptr); - std::string DeathRegex = "Use after free.*"; - DeathRegex.append("backtrace\\.cpp:40.*"); + std::string DeathRegex = "Use After Free.*"; + DeathRegex.append("TouchMemory.*"); DeathRegex.append("was deallocated.*"); - DeathRegex.append("backtrace\\.cpp:30.*"); + DeathRegex.append("DeallocateMemory.*"); DeathRegex.append("was allocated.*"); - DeathRegex.append("backtrace\\.cpp:29.*"); - ASSERT_DEATH({ *Ptr = 7; }, DeathRegex); + DeathRegex.append("AllocateMemory.*"); + ASSERT_DEATH(TouchMemory(Ptr), DeathRegex); +} + +TEST(Backtrace, Short) { + gwp_asan::AllocationMetadata Meta; + Meta.AllocationTrace.RecordBacktrace( + [](uintptr_t *TraceBuffer, size_t /* Size */) -> size_t { + TraceBuffer[0] = 123u; + TraceBuffer[1] = 321u; + return 2u; + }); + uintptr_t TraceOutput[2] = {}; + EXPECT_EQ(2u, __gwp_asan_get_allocation_trace(&Meta, TraceOutput, 2)); + EXPECT_EQ(TraceOutput[0], 123u); + EXPECT_EQ(TraceOutput[1], 321u); +} + +TEST(Backtrace, ExceedsStorableLength) { + gwp_asan::AllocationMetadata Meta; + Meta.AllocationTrace.RecordBacktrace( + [](uintptr_t * /* TraceBuffer */, size_t /* Size */) -> size_t { + return SIZE_MAX; // Wow, that's big! + }); + uintptr_t TraceOutput; + EXPECT_EQ(1u, __gwp_asan_get_allocation_trace(&Meta, &TraceOutput, 1)); } diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/basic.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/basic.cpp index 663db91b7ef..29f420d3027 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/basic.cpp +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/basic.cpp @@ -24,7 +24,7 @@ TEST_F(DefaultGuardedPoolAllocator, NullptrIsNotMine) { TEST_F(CustomGuardedPoolAllocator, SizedAllocations) { InitNumSlots(1); - std::size_t MaxAllocSize = GPA.maximumAllocationSize(); + std::size_t MaxAllocSize = GPA.getAllocatorState()->maximumAllocationSize(); EXPECT_TRUE(MaxAllocSize > 0); for (unsigned AllocSize = 1; AllocSize <= MaxAllocSize; AllocSize <<= 1) { @@ -37,7 +37,8 @@ TEST_F(CustomGuardedPoolAllocator, SizedAllocations) { } TEST_F(DefaultGuardedPoolAllocator, TooLargeAllocation) { - EXPECT_EQ(nullptr, GPA.allocate(GPA.maximumAllocationSize() + 1)); + EXPECT_EQ(nullptr, + GPA.allocate(GPA.getAllocatorState()->maximumAllocationSize() + 1)); } TEST_F(CustomGuardedPoolAllocator, AllocAllSlots) { diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/crash_handler_api.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/crash_handler_api.cpp new file mode 100644 index 00000000000..10a014ecd4e --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/crash_handler_api.cpp @@ -0,0 +1,208 @@ +//===-- crash_handler_api.cpp -----------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/crash_handler.h" +#include "gwp_asan/guarded_pool_allocator.h" +#include "gwp_asan/stack_trace_compressor.h" +#include "gwp_asan/tests/harness.h" + +using Error = gwp_asan::Error; +using GuardedPoolAllocator = gwp_asan::GuardedPoolAllocator; +using AllocationMetadata = gwp_asan::AllocationMetadata; +using AllocatorState = gwp_asan::AllocatorState; + +class CrashHandlerAPITest : public ::testing::Test { +public: + void SetUp() override { setupState(); } + +protected: + size_t metadata(uintptr_t Addr, uintptr_t Size, bool IsDeallocated) { + // Should only be allocating the 0x3000, 0x5000, 0x7000, 0x9000 pages. + EXPECT_GE(Addr, 0x3000u); + EXPECT_LT(Addr, 0xa000u); + + size_t Slot = State.getNearestSlot(Addr); + + Metadata[Slot].Addr = Addr; + Metadata[Slot].Size = Size; + Metadata[Slot].IsDeallocated = IsDeallocated; + Metadata[Slot].AllocationTrace.ThreadID = 123; + Metadata[Slot].DeallocationTrace.ThreadID = 321; + setupBacktraces(&Metadata[Slot]); + + return Slot; + } + + void setupState() { + State.GuardedPagePool = 0x2000; + State.GuardedPagePoolEnd = 0xb000; + State.MaxSimultaneousAllocations = 4; // 0x3000, 0x5000, 0x7000, 0x9000. + State.PageSize = 0x1000; + } + + void setupBacktraces(AllocationMetadata *Meta) { + Meta->AllocationTrace.TraceSize = gwp_asan::compression::pack( + BacktraceConstants, kNumBacktraceConstants, + Meta->AllocationTrace.CompressedTrace, + AllocationMetadata::kStackFrameStorageBytes); + + if (Meta->IsDeallocated) + Meta->DeallocationTrace.TraceSize = gwp_asan::compression::pack( + BacktraceConstants, kNumBacktraceConstants, + Meta->DeallocationTrace.CompressedTrace, + AllocationMetadata::kStackFrameStorageBytes); + } + + void checkBacktrace(const AllocationMetadata *Meta, bool IsDeallocated) { + uintptr_t Buffer[kNumBacktraceConstants]; + size_t NumBacktraceConstants = kNumBacktraceConstants; + EXPECT_EQ(NumBacktraceConstants, __gwp_asan_get_allocation_trace( + Meta, Buffer, kNumBacktraceConstants)); + for (size_t i = 0; i < kNumBacktraceConstants; ++i) + EXPECT_EQ(Buffer[i], BacktraceConstants[i]); + + if (IsDeallocated) { + EXPECT_EQ(NumBacktraceConstants, + __gwp_asan_get_deallocation_trace(Meta, Buffer, + kNumBacktraceConstants)); + for (size_t i = 0; i < kNumBacktraceConstants; ++i) + EXPECT_EQ(Buffer[i], BacktraceConstants[i]); + } + } + + void checkMetadata(size_t Index, uintptr_t ErrorPtr) { + const AllocationMetadata *Meta = + __gwp_asan_get_metadata(&State, Metadata, ErrorPtr); + EXPECT_NE(nullptr, Meta); + EXPECT_EQ(Metadata[Index].Addr, __gwp_asan_get_allocation_address(Meta)); + EXPECT_EQ(Metadata[Index].Size, __gwp_asan_get_allocation_size(Meta)); + EXPECT_EQ(Metadata[Index].AllocationTrace.ThreadID, + __gwp_asan_get_allocation_thread_id(Meta)); + + bool IsDeallocated = __gwp_asan_is_deallocated(Meta); + EXPECT_EQ(Metadata[Index].IsDeallocated, IsDeallocated); + checkBacktrace(Meta, IsDeallocated); + + if (!IsDeallocated) + return; + + EXPECT_EQ(Metadata[Index].DeallocationTrace.ThreadID, + __gwp_asan_get_deallocation_thread_id(Meta)); + } + + static constexpr size_t kNumBacktraceConstants = 4; + static uintptr_t BacktraceConstants[kNumBacktraceConstants]; + AllocatorState State = {}; + AllocationMetadata Metadata[4] = {}; +}; + +uintptr_t CrashHandlerAPITest::BacktraceConstants[kNumBacktraceConstants] = { + 0xdeadbeef, 0xdeadc0de, 0xbadc0ffe, 0xcafef00d}; + +TEST_F(CrashHandlerAPITest, PointerNotMine) { + uintptr_t UnknownPtr = reinterpret_cast<uintptr_t>(&State); + + EXPECT_FALSE(__gwp_asan_error_is_mine(&State, 0)); + EXPECT_FALSE(__gwp_asan_error_is_mine(&State, UnknownPtr)); + + EXPECT_EQ(Error::UNKNOWN, __gwp_asan_diagnose_error(&State, Metadata, 0)); + EXPECT_EQ(Error::UNKNOWN, + __gwp_asan_diagnose_error(&State, Metadata, UnknownPtr)); + + EXPECT_EQ(nullptr, __gwp_asan_get_metadata(&State, Metadata, 0)); + EXPECT_EQ(nullptr, __gwp_asan_get_metadata(&State, Metadata, UnknownPtr)); +} + +TEST_F(CrashHandlerAPITest, PointerNotAllocated) { + uintptr_t FailureAddress = 0x9000; + + EXPECT_TRUE(__gwp_asan_error_is_mine(&State, FailureAddress)); + EXPECT_EQ(Error::UNKNOWN, + __gwp_asan_diagnose_error(&State, Metadata, FailureAddress)); + EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State)); + EXPECT_EQ(nullptr, __gwp_asan_get_metadata(&State, Metadata, FailureAddress)); +} + +TEST_F(CrashHandlerAPITest, DoubleFree) { + size_t Index = + metadata(/* Addr */ 0x7000, /* Size */ 0x20, /* IsDeallocated */ true); + uintptr_t FailureAddress = 0x7000; + + State.FailureType = Error::DOUBLE_FREE; + State.FailureAddress = FailureAddress; + + EXPECT_TRUE(__gwp_asan_error_is_mine(&State)); + EXPECT_EQ(Error::DOUBLE_FREE, + __gwp_asan_diagnose_error(&State, Metadata, 0x0)); + EXPECT_EQ(FailureAddress, __gwp_asan_get_internal_crash_address(&State)); + checkMetadata(Index, FailureAddress); +} + +TEST_F(CrashHandlerAPITest, InvalidFree) { + size_t Index = + metadata(/* Addr */ 0x7000, /* Size */ 0x20, /* IsDeallocated */ false); + uintptr_t FailureAddress = 0x7001; + + State.FailureType = Error::INVALID_FREE; + State.FailureAddress = FailureAddress; + + EXPECT_TRUE(__gwp_asan_error_is_mine(&State)); + EXPECT_EQ(Error::INVALID_FREE, + __gwp_asan_diagnose_error(&State, Metadata, 0x0)); + EXPECT_EQ(FailureAddress, __gwp_asan_get_internal_crash_address(&State)); + checkMetadata(Index, FailureAddress); +} + +TEST_F(CrashHandlerAPITest, InvalidFreeNoMetadata) { + uintptr_t FailureAddress = 0x7001; + + State.FailureType = Error::INVALID_FREE; + State.FailureAddress = FailureAddress; + + EXPECT_TRUE(__gwp_asan_error_is_mine(&State)); + EXPECT_EQ(Error::INVALID_FREE, + __gwp_asan_diagnose_error(&State, Metadata, 0x0)); + EXPECT_EQ(FailureAddress, __gwp_asan_get_internal_crash_address(&State)); + EXPECT_EQ(nullptr, __gwp_asan_get_metadata(&State, Metadata, FailureAddress)); +} + +TEST_F(CrashHandlerAPITest, UseAfterFree) { + size_t Index = + metadata(/* Addr */ 0x7000, /* Size */ 0x20, /* IsDeallocated */ true); + uintptr_t FailureAddress = 0x7001; + + EXPECT_TRUE(__gwp_asan_error_is_mine(&State, FailureAddress)); + EXPECT_EQ(Error::USE_AFTER_FREE, + __gwp_asan_diagnose_error(&State, Metadata, FailureAddress)); + EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State)); + checkMetadata(Index, FailureAddress); +} + +TEST_F(CrashHandlerAPITest, BufferOverflow) { + size_t Index = + metadata(/* Addr */ 0x5f00, /* Size */ 0x100, /* IsDeallocated */ false); + uintptr_t FailureAddress = 0x6000; + + EXPECT_TRUE(__gwp_asan_error_is_mine(&State, FailureAddress)); + EXPECT_EQ(Error::BUFFER_OVERFLOW, + __gwp_asan_diagnose_error(&State, Metadata, FailureAddress)); + EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State)); + checkMetadata(Index, FailureAddress); +} + +TEST_F(CrashHandlerAPITest, BufferUnderflow) { + size_t Index = + metadata(/* Addr */ 0x3000, /* Size */ 0x10, /* IsDeallocated*/ false); + uintptr_t FailureAddress = 0x2fff; + + EXPECT_TRUE(__gwp_asan_error_is_mine(&State, FailureAddress)); + EXPECT_EQ(Error::BUFFER_UNDERFLOW, + __gwp_asan_diagnose_error(&State, Metadata, FailureAddress)); + EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State)); + checkMetadata(Index, FailureAddress); +} diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/enable_disable.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/enable_disable.cpp new file mode 100644 index 00000000000..2c6ba514f49 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/enable_disable.cpp @@ -0,0 +1,86 @@ +//===-- enable_disable.cpp --------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/tests/harness.h" + +constexpr size_t Size = 100; + +TEST_F(DefaultGuardedPoolAllocator, Fork) { + void *P; + pid_t Pid = fork(); + EXPECT_GE(Pid, 0); + if (Pid == 0) { + P = GPA.allocate(Size); + EXPECT_NE(P, nullptr); + memset(P, 0x42, Size); + GPA.deallocate(P); + _exit(0); + } + waitpid(Pid, nullptr, 0); + P = GPA.allocate(Size); + EXPECT_NE(P, nullptr); + memset(P, 0x42, Size); + GPA.deallocate(P); + + // fork should stall if the allocator has been disabled. + EXPECT_DEATH( + { + GPA.disable(); + alarm(1); + Pid = fork(); + EXPECT_GE(Pid, 0); + }, + ""); +} + +namespace { +pthread_mutex_t Mutex; +pthread_cond_t Conditional = PTHREAD_COND_INITIALIZER; +bool ThreadReady = false; + +void *enableMalloc(void *arg) { + auto &GPA = *reinterpret_cast<gwp_asan::GuardedPoolAllocator *>(arg); + + // Signal the main thread we are ready. + pthread_mutex_lock(&Mutex); + ThreadReady = true; + pthread_cond_signal(&Conditional); + pthread_mutex_unlock(&Mutex); + + // Wait for the malloc_disable & fork, then enable the allocator again. + sleep(1); + GPA.enable(); + + return nullptr; +} + +TEST_F(DefaultGuardedPoolAllocator, DisableForkEnable) { + pthread_t ThreadId; + EXPECT_EQ(pthread_create(&ThreadId, nullptr, &enableMalloc, &GPA), 0); + + // Do not lock the allocator right away, the other thread may need it to start + // up. + pthread_mutex_lock(&Mutex); + while (!ThreadReady) + pthread_cond_wait(&Conditional, &Mutex); + pthread_mutex_unlock(&Mutex); + + // Disable the allocator and fork. fork should succeed after malloc_enable. + GPA.disable(); + pid_t Pid = fork(); + EXPECT_GE(Pid, 0); + if (Pid == 0) { + void *P = GPA.allocate(Size); + EXPECT_NE(P, nullptr); + GPA.deallocate(P); + _exit(0); + } + waitpid(Pid, nullptr, 0); + EXPECT_EQ(pthread_join(ThreadId, 0), 0); +} +} // namespace diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/harness.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/harness.cpp new file mode 100644 index 00000000000..77c25ee5a6e --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/harness.cpp @@ -0,0 +1,10 @@ +#include "harness.h" + +namespace gwp_asan { +namespace test { +bool OnlyOnce() { + static int x = 0; + return !x++; +} +} // namespace test +} // namespace gwp_asan diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/harness.h b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/harness.h index 77f7b5160d2..e47254e13c4 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/harness.h +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/harness.h @@ -15,6 +15,7 @@ #include "gwp_asan/guarded_pool_allocator.h" #include "gwp_asan/optional/backtrace.h" +#include "gwp_asan/optional/segv_handler.h" #include "gwp_asan/options.h" namespace gwp_asan { @@ -23,21 +24,27 @@ namespace test { // their own signal-safe Printf function. In LLVM, we use // `optional/printf_sanitizer_common.cpp` which supplies the __sanitizer::Printf // for this purpose. -options::Printf_t getPrintfFunction(); +crash_handler::Printf_t getPrintfFunction(); + +// First call returns true, all the following calls return false. +bool OnlyOnce(); + }; // namespace test }; // namespace gwp_asan class DefaultGuardedPoolAllocator : public ::testing::Test { public: - DefaultGuardedPoolAllocator() { + void SetUp() override { gwp_asan::options::Options Opts; Opts.setDefaults(); MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations; - Opts.Printf = gwp_asan::test::getPrintfFunction(); + Opts.InstallForkHandlers = gwp_asan::test::OnlyOnce(); GPA.init(Opts); } + void TearDown() override { GPA.uninitTestOnly(); } + protected: gwp_asan::GuardedPoolAllocator GPA; decltype(gwp_asan::options::Options::MaxSimultaneousAllocations) @@ -55,10 +62,12 @@ public: Opts.MaxSimultaneousAllocations = MaxSimultaneousAllocationsArg; MaxSimultaneousAllocations = MaxSimultaneousAllocationsArg; - Opts.Printf = gwp_asan::test::getPrintfFunction(); + Opts.InstallForkHandlers = gwp_asan::test::OnlyOnce(); GPA.init(Opts); } + void TearDown() override { GPA.uninitTestOnly(); } + protected: gwp_asan::GuardedPoolAllocator GPA; decltype(gwp_asan::options::Options::MaxSimultaneousAllocations) @@ -67,14 +76,22 @@ protected: class BacktraceGuardedPoolAllocator : public ::testing::Test { public: - BacktraceGuardedPoolAllocator() { + void SetUp() override { gwp_asan::options::Options Opts; Opts.setDefaults(); - Opts.Printf = gwp_asan::test::getPrintfFunction(); Opts.Backtrace = gwp_asan::options::getBacktraceFunction(); - Opts.PrintBacktrace = gwp_asan::options::getPrintBacktraceFunction(); + Opts.InstallForkHandlers = gwp_asan::test::OnlyOnce(); GPA.init(Opts); + + gwp_asan::crash_handler::installSignalHandlers( + &GPA, gwp_asan::test::getPrintfFunction(), + gwp_asan::options::getPrintBacktraceFunction(), Opts.Backtrace); + } + + void TearDown() override { + GPA.uninitTestOnly(); + gwp_asan::crash_handler::uninstallSignalHandlers(); } protected: diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/iterate.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/iterate.cpp new file mode 100644 index 00000000000..c40df15e09c --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/iterate.cpp @@ -0,0 +1,66 @@ +//===-- iterate.cpp ---------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/tests/harness.h" + +TEST_F(CustomGuardedPoolAllocator, Iterate) { + InitNumSlots(7); + std::vector<std::pair<void *, size_t>> Allocated; + auto alloc = [&](size_t size) { + Allocated.push_back({GPA.allocate(size), size}); + }; + + void *Ptr = GPA.allocate(5); + alloc(2); + alloc(1); + alloc(100); + GPA.deallocate(Ptr); + alloc(42); + std::sort(Allocated.begin(), Allocated.end()); + + GPA.disable(); + void *Base = Allocated[0].first; + size_t Size = reinterpret_cast<size_t>(Allocated.back().first) - + reinterpret_cast<size_t>(Base) + 1; + std::vector<std::pair<void *, size_t>> Found; + GPA.iterate( + Base, Size, + [](uintptr_t Addr, size_t Size, void *Arg) { + reinterpret_cast<std::vector<std::pair<void *, size_t>> *>(Arg) + ->push_back({(void *)Addr, Size}); + }, + reinterpret_cast<void *>(&Found)); + GPA.enable(); + + std::sort(Found.begin(), Found.end()); + EXPECT_EQ(Allocated, Found); + + // Now without the last allocation. + GPA.disable(); + Size = reinterpret_cast<size_t>(Allocated.back().first) - + reinterpret_cast<size_t>(Base); // Allocated.back() is out of range. + Found.clear(); + GPA.iterate( + Base, Size, + [](uintptr_t Addr, size_t Size, void *Arg) { + reinterpret_cast<std::vector<std::pair<void *, size_t>> *>(Arg) + ->push_back({(void *)Addr, Size}); + }, + reinterpret_cast<void *>(&Found)); + GPA.enable(); + + // We should have found every allocation but the last. + // Remove it and compare the rest. + std::sort(Found.begin(), Found.end()); + GPA.deallocate(Allocated.back().first); + Allocated.pop_back(); + EXPECT_EQ(Allocated, Found); + + for (auto PS : Allocated) + GPA.deallocate(PS.first); +} diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/late_init.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/late_init.cpp new file mode 100644 index 00000000000..c7d62c8f3c8 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/late_init.cpp @@ -0,0 +1,25 @@ +//===-- late_init.cpp -------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/guarded_pool_allocator.h" +#include "gwp_asan/options.h" +#include "gtest/gtest.h" + +TEST(LateInit, CheckLateInitIsOK) { + gwp_asan::GuardedPoolAllocator GPA; + + for (size_t i = 0; i < 0x100; ++i) + EXPECT_FALSE(GPA.shouldSample()); + + gwp_asan::options::Options Opts; + Opts.Enabled = true; + Opts.SampleRate = 1; + + GPA.init(Opts); + EXPECT_TRUE(GPA.shouldSample()); +} diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/optional/printf_sanitizer_common.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/optional/printf_sanitizer_common.cpp index e823aeb370c..ea7141b46ac 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/optional/printf_sanitizer_common.cpp +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/optional/printf_sanitizer_common.cpp @@ -6,8 +6,8 @@ // //===----------------------------------------------------------------------===// +#include "gwp_asan/optional/segv_handler.h" #include "sanitizer_common/sanitizer_common.h" -#include "gwp_asan/options.h" namespace gwp_asan { namespace test { @@ -15,8 +15,6 @@ namespace test { // their own signal-safe Printf function. In LLVM, we use // `optional/printf_sanitizer_common.cpp` which supplies the __sanitizer::Printf // for this purpose. -options::Printf_t getPrintfFunction() { - return __sanitizer::Printf; -} +crash_handler::Printf_t getPrintfFunction() { return __sanitizer::Printf; } }; // namespace test }; // namespace gwp_asan diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/thread_contention.cpp b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/thread_contention.cpp index 33b57484a1a..0992b97b316 100644 --- a/gnu/llvm/compiler-rt/lib/gwp_asan/tests/thread_contention.cpp +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/tests/thread_contention.cpp @@ -24,7 +24,7 @@ void asyncTask(gwp_asan::GuardedPoolAllocator *GPA, // Get ourselves a new allocation. for (unsigned i = 0; i < NumIterations; ++i) { volatile char *Ptr = reinterpret_cast<volatile char *>( - GPA->allocate(GPA->maximumAllocationSize())); + GPA->allocate(GPA->getAllocatorState()->maximumAllocationSize())); // Do any other threads have access to this page? EXPECT_EQ(*Ptr, 0); diff --git a/gnu/llvm/compiler-rt/lib/gwp_asan/utilities.h b/gnu/llvm/compiler-rt/lib/gwp_asan/utilities.h new file mode 100644 index 00000000000..71d525f9e14 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/gwp_asan/utilities.h @@ -0,0 +1,31 @@ +//===-- utilities.h ---------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/definitions.h" + +#include <stddef.h> +#include <stdint.h> + +namespace gwp_asan { +// Checks that `Condition` is true, otherwise fails in a platform-specific way +// with `Message`. +void Check(bool Condition, const char *Message); + +enum class AlignmentStrategy { + // Default => POWER_OF_TWO on most platforms, BIONIC for Android Bionic. + DEFAULT, + POWER_OF_TWO, + BIONIC, + PERFECT, +}; + +// Returns the real size of a right-aligned allocation. +size_t rightAlignedAllocationSize( + size_t RealAllocationSize, + AlignmentStrategy Align = AlignmentStrategy::DEFAULT); +} // namespace gwp_asan diff --git a/gnu/llvm/compiler-rt/lib/hwasan/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/hwasan/CMakeLists.txt index 03863e4be68..d294579c970 100644 --- a/gnu/llvm/compiler-rt/lib/hwasan/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/lib/hwasan/CMakeLists.txt @@ -6,6 +6,7 @@ set(HWASAN_RTL_SOURCES hwasan_allocator.cpp hwasan_dynamic_shadow.cpp hwasan_exceptions.cpp + hwasan_globals.cpp hwasan_interceptors.cpp hwasan_interceptors_vfork.S hwasan_linux.cpp @@ -29,6 +30,7 @@ set(HWASAN_RTL_HEADERS hwasan_dynamic_shadow.h hwasan_flags.h hwasan_flags.inc + hwasan_globals.h hwasan_interface_internal.h hwasan_malloc_bisect.h hwasan_mapping.h diff --git a/gnu/llvm/compiler-rt/lib/hwasan/hwasan.cpp b/gnu/llvm/compiler-rt/lib/hwasan/hwasan.cpp index 7b5c6c694be..d67a88d455e 100644 --- a/gnu/llvm/compiler-rt/lib/hwasan/hwasan.cpp +++ b/gnu/llvm/compiler-rt/lib/hwasan/hwasan.cpp @@ -12,8 +12,10 @@ //===----------------------------------------------------------------------===// #include "hwasan.h" + #include "hwasan_checks.h" #include "hwasan_dynamic_shadow.h" +#include "hwasan_globals.h" #include "hwasan_poisoning.h" #include "hwasan_report.h" #include "hwasan_thread.h" @@ -36,21 +38,6 @@ using namespace __sanitizer; namespace __hwasan { -void EnterSymbolizer() { - Thread *t = GetCurrentThread(); - CHECK(t); - t->EnterSymbolizer(); -} -void ExitSymbolizer() { - Thread *t = GetCurrentThread(); - CHECK(t); - t->LeaveSymbolizer(); -} -bool IsInSymbolizer() { - Thread *t = GetCurrentThread(); - return t && t->InSymbolizer(); -} - static Flags hwasan_flags; Flags *flags() { @@ -201,108 +188,28 @@ void __sanitizer::BufferedStackTrace::UnwindImpl( uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) { Thread *t = GetCurrentThread(); if (!t) { - // the thread is still being created. + // The thread is still being created, or has already been destroyed. size = 0; return; } - if (!StackTrace::WillUseFastUnwind(request_fast)) { - // Block reports from our interceptors during _Unwind_Backtrace. - SymbolizerScope sym_scope; - return Unwind(max_depth, pc, bp, context, 0, 0, request_fast); - } - if (StackTrace::WillUseFastUnwind(request_fast)) - Unwind(max_depth, pc, bp, nullptr, t->stack_top(), t->stack_bottom(), true); - else - Unwind(max_depth, pc, 0, context, 0, 0, false); -} - -struct hwasan_global { - s32 gv_relptr; - u32 info; -}; - -static void InitGlobals(const hwasan_global *begin, const hwasan_global *end) { - for (auto *desc = begin; desc != end; ++desc) { - uptr gv = reinterpret_cast<uptr>(desc) + desc->gv_relptr; - uptr size = desc->info & 0xffffff; - uptr full_granule_size = RoundDownTo(size, 16); - u8 tag = desc->info >> 24; - TagMemoryAligned(gv, full_granule_size, tag); - if (size % 16) - TagMemoryAligned(gv + full_granule_size, 16, size % 16); - } + Unwind(max_depth, pc, bp, context, t->stack_top(), t->stack_bottom(), + request_fast); } -enum { NT_LLVM_HWASAN_GLOBALS = 3 }; - -struct hwasan_global_note { - s32 begin_relptr; - s32 end_relptr; -}; - -// Check that the given library meets the code model requirements for tagged -// globals. These properties are not checked at link time so they need to be -// checked at runtime. -static void CheckCodeModel(ElfW(Addr) base, const ElfW(Phdr) * phdr, - ElfW(Half) phnum) { - ElfW(Addr) min_addr = -1ull, max_addr = 0; - for (unsigned i = 0; i != phnum; ++i) { - if (phdr[i].p_type != PT_LOAD) - continue; - ElfW(Addr) lo = base + phdr[i].p_vaddr, hi = lo + phdr[i].p_memsz; - if (min_addr > lo) - min_addr = lo; - if (max_addr < hi) - max_addr = hi; - } - - if (max_addr - min_addr > 1ull << 32) { - Report("FATAL: HWAddressSanitizer: library size exceeds 2^32\n"); - Die(); - } - if (max_addr > 1ull << 48) { - Report("FATAL: HWAddressSanitizer: library loaded above address 2^48\n"); - Die(); - } -} - -static void InitGlobalsFromPhdrs(ElfW(Addr) base, const ElfW(Phdr) * phdr, - ElfW(Half) phnum) { - for (unsigned i = 0; i != phnum; ++i) { - if (phdr[i].p_type != PT_NOTE) - continue; - const char *note = reinterpret_cast<const char *>(base + phdr[i].p_vaddr); - const char *nend = note + phdr[i].p_memsz; - while (note < nend) { - auto *nhdr = reinterpret_cast<const ElfW(Nhdr) *>(note); - const char *name = note + sizeof(ElfW(Nhdr)); - const char *desc = name + RoundUpTo(nhdr->n_namesz, 4); - if (nhdr->n_type != NT_LLVM_HWASAN_GLOBALS || - internal_strcmp(name, "LLVM") != 0) { - note = desc + RoundUpTo(nhdr->n_descsz, 4); - continue; - } - - // Only libraries with instrumented globals need to be checked against the - // code model since they use relocations that aren't checked at link time. - CheckCodeModel(base, phdr, phnum); - - auto *global_note = reinterpret_cast<const hwasan_global_note *>(desc); - auto *global_begin = reinterpret_cast<const hwasan_global *>( - note + global_note->begin_relptr); - auto *global_end = reinterpret_cast<const hwasan_global *>( - note + global_note->end_relptr); - InitGlobals(global_begin, global_end); - return; - } - } +static bool InitializeSingleGlobal(const hwasan_global &global) { + uptr full_granule_size = RoundDownTo(global.size(), 16); + TagMemoryAligned(global.addr(), full_granule_size, global.tag()); + if (global.size() % 16) + TagMemoryAligned(global.addr() + full_granule_size, 16, global.size() % 16); + return false; } static void InitLoadedGlobals() { dl_iterate_phdr( - [](dl_phdr_info *info, size_t size, void *data) { - InitGlobalsFromPhdrs(info->dlpi_addr, info->dlpi_phdr, - info->dlpi_phnum); + [](dl_phdr_info *info, size_t /* size */, void * /* data */) -> int { + for (const hwasan_global &global : HwasanGlobalsFor( + info->dlpi_addr, info->dlpi_phdr, info->dlpi_phnum)) + InitializeSingleGlobal(global); return 0; }, nullptr); @@ -343,11 +250,13 @@ void __hwasan_init_static() { // Fortunately, since this is a statically linked executable we can use the // linker-defined symbol __ehdr_start to find the only relevant set of phdrs. extern ElfW(Ehdr) __ehdr_start; - InitGlobalsFromPhdrs( - 0, - reinterpret_cast<const ElfW(Phdr) *>( - reinterpret_cast<const char *>(&__ehdr_start) + __ehdr_start.e_phoff), - __ehdr_start.e_phnum); + for (const hwasan_global &global : HwasanGlobalsFor( + /* base */ 0, + reinterpret_cast<const ElfW(Phdr) *>( + reinterpret_cast<const char *>(&__ehdr_start) + + __ehdr_start.e_phoff), + __ehdr_start.e_phnum)) + InitializeSingleGlobal(global); } void __hwasan_init() { @@ -387,8 +296,6 @@ void __hwasan_init() { InstallDeadlySignalHandlers(HwasanOnDeadlySignal); InstallAtExitHandler(); // Needs __cxa_atexit interceptor. - Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); - InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); HwasanTSDInit(); @@ -408,7 +315,8 @@ void __hwasan_init() { void __hwasan_library_loaded(ElfW(Addr) base, const ElfW(Phdr) * phdr, ElfW(Half) phnum) { - InitGlobalsFromPhdrs(base, phdr, phnum); + for (const hwasan_global &global : HwasanGlobalsFor(base, phdr, phnum)) + InitializeSingleGlobal(global); } void __hwasan_library_unloaded(ElfW(Addr) base, const ElfW(Phdr) * phdr, diff --git a/gnu/llvm/compiler-rt/lib/hwasan/hwasan.h b/gnu/llvm/compiler-rt/lib/hwasan/hwasan.h index 64cdcf30f5c..8cbd9e74e33 100644 --- a/gnu/llvm/compiler-rt/lib/hwasan/hwasan.h +++ b/gnu/llvm/compiler-rt/lib/hwasan/hwasan.h @@ -72,16 +72,13 @@ extern int hwasan_inited; extern bool hwasan_init_is_running; extern int hwasan_report_count; -bool ProtectRange(uptr beg, uptr end); bool InitShadow(); void InitPrctl(); void InitThreads(); void MadviseShadow(); -char *GetProcSelfMaps(); void InitializeInterceptors(); void HwasanAllocatorInit(); -void HwasanAllocatorThreadFinish(); void *hwasan_malloc(uptr size, StackTrace *stack); void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack); @@ -95,24 +92,8 @@ int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size, StackTrace *stack); void hwasan_free(void *ptr, StackTrace *stack); -void InstallTrapHandler(); void InstallAtExitHandler(); -void EnterSymbolizer(); -void ExitSymbolizer(); -bool IsInSymbolizer(); - -struct SymbolizerScope { - SymbolizerScope() { EnterSymbolizer(); } - ~SymbolizerScope() { ExitSymbolizer(); } -}; - -// Returns a "chained" origin id, pointing to the given stack trace followed by -// the previous origin id. -u32 ChainOrigin(u32 id, StackTrace *stack); - -const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1; - #define GET_MALLOC_STACK_TRACE \ BufferedStackTrace stack; \ if (hwasan_inited) \ @@ -134,16 +115,6 @@ const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1; stack.Print(); \ } -class ScopedThreadLocalStateBackup { - public: - ScopedThreadLocalStateBackup() { Backup(); } - ~ScopedThreadLocalStateBackup() { Restore(); } - void Backup(); - void Restore(); - private: - u64 va_arg_overflow_size_tls; -}; - void HwasanTSDInit(); void HwasanTSDThreadInit(); diff --git a/gnu/llvm/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/gnu/llvm/compiler-rt/lib/hwasan/hwasan_allocator.cpp index 81a57d3afd4..1d82db0e394 100644 --- a/gnu/llvm/compiler-rt/lib/hwasan/hwasan_allocator.cpp +++ b/gnu/llvm/compiler-rt/lib/hwasan/hwasan_allocator.cpp @@ -363,7 +363,7 @@ int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size, // OOM error is already taken care of by HwasanAllocate. return errno_ENOMEM; CHECK(IsAligned((uptr)ptr, alignment)); - *memptr = ptr; + *(void **)UntagPtr(memptr) = ptr; return 0; } diff --git a/gnu/llvm/compiler-rt/lib/hwasan/hwasan_flags.inc b/gnu/llvm/compiler-rt/lib/hwasan/hwasan_flags.inc index dffbf56cb15..8e431d9c4ff 100644 --- a/gnu/llvm/compiler-rt/lib/hwasan/hwasan_flags.inc +++ b/gnu/llvm/compiler-rt/lib/hwasan/hwasan_flags.inc @@ -33,7 +33,7 @@ HWASAN_FLAG(bool, disable_allocator_tagging, false, "") HWASAN_FLAG(bool, random_tags, true, "") HWASAN_FLAG( - int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K. + int, max_malloc_fill_size, 0, "HWASan allocator flag. max_malloc_fill_size is the maximal amount of " "bytes that will be filled with malloc_fill_byte on malloc.") diff --git a/gnu/llvm/compiler-rt/lib/hwasan/hwasan_globals.cpp b/gnu/llvm/compiler-rt/lib/hwasan/hwasan_globals.cpp new file mode 100644 index 00000000000..d71bcd792e1 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/hwasan/hwasan_globals.cpp @@ -0,0 +1,91 @@ +//===-- hwasan_globals.cpp ------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of HWAddressSanitizer. +// +// HWAddressSanitizer globals-specific runtime. +//===----------------------------------------------------------------------===// + +#include "hwasan_globals.h" + +namespace __hwasan { + +enum { NT_LLVM_HWASAN_GLOBALS = 3 }; +struct hwasan_global_note { + s32 begin_relptr; + s32 end_relptr; +}; + +// Check that the given library meets the code model requirements for tagged +// globals. These properties are not checked at link time so they need to be +// checked at runtime. +static void CheckCodeModel(ElfW(Addr) base, const ElfW(Phdr) * phdr, + ElfW(Half) phnum) { + ElfW(Addr) min_addr = -1ull, max_addr = 0; + for (unsigned i = 0; i != phnum; ++i) { + if (phdr[i].p_type != PT_LOAD) + continue; + ElfW(Addr) lo = base + phdr[i].p_vaddr, hi = lo + phdr[i].p_memsz; + if (min_addr > lo) + min_addr = lo; + if (max_addr < hi) + max_addr = hi; + } + + if (max_addr - min_addr > 1ull << 32) { + Report("FATAL: HWAddressSanitizer: library size exceeds 2^32\n"); + Die(); + } + if (max_addr > 1ull << 48) { + Report("FATAL: HWAddressSanitizer: library loaded above address 2^48\n"); + Die(); + } +} + +ArrayRef<const hwasan_global> HwasanGlobalsFor(ElfW(Addr) base, + const ElfW(Phdr) * phdr, + ElfW(Half) phnum) { + // Read the phdrs from this DSO. + for (unsigned i = 0; i != phnum; ++i) { + if (phdr[i].p_type != PT_NOTE) + continue; + + const char *note = reinterpret_cast<const char *>(base + phdr[i].p_vaddr); + const char *nend = note + phdr[i].p_memsz; + + // Traverse all the notes until we find a HWASan note. + while (note < nend) { + auto *nhdr = reinterpret_cast<const ElfW(Nhdr) *>(note); + const char *name = note + sizeof(ElfW(Nhdr)); + const char *desc = name + RoundUpTo(nhdr->n_namesz, 4); + + // Discard non-HWASan-Globals notes. + if (nhdr->n_type != NT_LLVM_HWASAN_GLOBALS || + internal_strcmp(name, "LLVM") != 0) { + note = desc + RoundUpTo(nhdr->n_descsz, 4); + continue; + } + + // Only libraries with instrumented globals need to be checked against the + // code model since they use relocations that aren't checked at link time. + CheckCodeModel(base, phdr, phnum); + + auto *global_note = reinterpret_cast<const hwasan_global_note *>(desc); + auto *globals_begin = reinterpret_cast<const hwasan_global *>( + note + global_note->begin_relptr); + auto *globals_end = reinterpret_cast<const hwasan_global *>( + note + global_note->end_relptr); + + return {globals_begin, globals_end}; + } + } + + return {}; +} + +} // namespace __hwasan diff --git a/gnu/llvm/compiler-rt/lib/hwasan/hwasan_globals.h b/gnu/llvm/compiler-rt/lib/hwasan/hwasan_globals.h new file mode 100644 index 00000000000..fd7adf7a058 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/hwasan/hwasan_globals.h @@ -0,0 +1,49 @@ +//===-- hwasan_globals.h ----------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of HWAddressSanitizer. +// +// Private Hwasan header. +//===----------------------------------------------------------------------===// + +#ifndef HWASAN_GLOBALS_H +#define HWASAN_GLOBALS_H + +#include <link.h> + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_internal_defs.h" + +namespace __hwasan { +// This object should only ever be casted over the global (i.e. not constructed) +// in the ELF PT_NOTE in order for `addr()` to work correctly. +struct hwasan_global { + // The size of this global variable. Note that the size in the descriptor is + // max 1 << 24. Larger globals have multiple descriptors. + uptr size() const { return info & 0xffffff; } + // The fully-relocated address of this global. + uptr addr() const { return reinterpret_cast<uintptr_t>(this) + gv_relptr; } + // The static tag of this global. + u8 tag() const { return info >> 24; }; + + // The relative address between the start of the descriptor for the HWASan + // global (in the PT_NOTE), and the fully relocated address of the global. + s32 gv_relptr; + u32 info; +}; + +// Walk through the specific DSO (as specified by the base, phdr, and phnum), +// and return the range of the [beginning, end) of the HWASan globals descriptor +// array. +ArrayRef<const hwasan_global> HwasanGlobalsFor(ElfW(Addr) base, + const ElfW(Phdr) * phdr, + ElfW(Half) phnum); + +} // namespace __hwasan + +#endif // HWASAN_GLOBALS_H diff --git a/gnu/llvm/compiler-rt/lib/hwasan/hwasan_linux.cpp b/gnu/llvm/compiler-rt/lib/hwasan/hwasan_linux.cpp index ed0f30161b0..f1e830ddf90 100644 --- a/gnu/llvm/compiler-rt/lib/hwasan/hwasan_linux.cpp +++ b/gnu/llvm/compiler-rt/lib/hwasan/hwasan_linux.cpp @@ -354,8 +354,11 @@ void AndroidTestTlsSlot() {} #endif Thread *GetCurrentThread() { - auto *R = (StackAllocationsRingBuffer *)GetCurrentThreadLongPtr(); - return hwasanThreadList().GetThreadByBufferAddress((uptr)(R->Next())); + uptr *ThreadLongPtr = GetCurrentThreadLongPtr(); + if (UNLIKELY(*ThreadLongPtr == 0)) + return nullptr; + auto *R = (StackAllocationsRingBuffer *)ThreadLongPtr; + return hwasanThreadList().GetThreadByBufferAddress((uptr)R->Next()); } struct AccessInfo { diff --git a/gnu/llvm/compiler-rt/lib/hwasan/hwasan_report.cpp b/gnu/llvm/compiler-rt/lib/hwasan/hwasan_report.cpp index 5df8c0ac910..206aa601903 100644 --- a/gnu/llvm/compiler-rt/lib/hwasan/hwasan_report.cpp +++ b/gnu/llvm/compiler-rt/lib/hwasan/hwasan_report.cpp @@ -11,10 +11,14 @@ // Error reporting. //===----------------------------------------------------------------------===// +#include "hwasan_report.h" + +#include <dlfcn.h> + #include "hwasan.h" #include "hwasan_allocator.h" +#include "hwasan_globals.h" #include "hwasan_mapping.h" -#include "hwasan_report.h" #include "hwasan_thread.h" #include "hwasan_thread_list.h" #include "sanitizer_common/sanitizer_allocator_internal.h" @@ -122,21 +126,43 @@ class Decorator: public __sanitizer::SanitizerCommonDecorator { const char *Thread() { return Green(); } }; -// Returns the index of the rb element that matches tagged_addr (plus one), -// or zero if found nothing. -uptr FindHeapAllocation(HeapAllocationsRingBuffer *rb, - uptr tagged_addr, - HeapAllocationRecord *har) { - if (!rb) return 0; +static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr, + HeapAllocationRecord *har, uptr *ring_index, + uptr *num_matching_addrs, + uptr *num_matching_addrs_4b) { + if (!rb) return false; + + *num_matching_addrs = 0; + *num_matching_addrs_4b = 0; for (uptr i = 0, size = rb->size(); i < size; i++) { auto h = (*rb)[i]; if (h.tagged_addr <= tagged_addr && h.tagged_addr + h.requested_size > tagged_addr) { *har = h; - return i + 1; + *ring_index = i; + return true; + } + + // Measure the number of heap ring buffer entries that would have matched + // if we had only one entry per address (e.g. if the ring buffer data was + // stored at the address itself). This will help us tune the allocator + // implementation for MTE. + if (UntagAddr(h.tagged_addr) <= UntagAddr(tagged_addr) && + UntagAddr(h.tagged_addr) + h.requested_size > UntagAddr(tagged_addr)) { + ++*num_matching_addrs; + } + + // Measure the number of heap ring buffer entries that would have matched + // if we only had 4 tag bits, which is the case for MTE. + auto untag_4b = [](uptr p) { + return p & ((1ULL << 60) - 1); + }; + if (untag_4b(h.tagged_addr) <= untag_4b(tagged_addr) && + untag_4b(h.tagged_addr) + h.requested_size > untag_4b(tagged_addr)) { + ++*num_matching_addrs_4b; } } - return 0; + return false; } static void PrintStackAllocations(StackAllocationsRingBuffer *sa, @@ -221,6 +247,42 @@ static bool TagsEqual(tag_t tag, tag_t *tag_ptr) { return tag == inline_tag; } +// HWASan globals store the size of the global in the descriptor. In cases where +// we don't have a binary with symbols, we can't grab the size of the global +// from the debug info - but we might be able to retrieve it from the +// descriptor. Returns zero if the lookup failed. +static uptr GetGlobalSizeFromDescriptor(uptr ptr) { + // Find the ELF object that this global resides in. + Dl_info info; + dladdr(reinterpret_cast<void *>(ptr), &info); + auto *ehdr = reinterpret_cast<const ElfW(Ehdr) *>(info.dli_fbase); + auto *phdr_begin = reinterpret_cast<const ElfW(Phdr) *>( + reinterpret_cast<const u8 *>(ehdr) + ehdr->e_phoff); + + // Get the load bias. This is normally the same as the dli_fbase address on + // position-independent code, but can be different on non-PIE executables, + // binaries using LLD's partitioning feature, or binaries compiled with a + // linker script. + ElfW(Addr) load_bias = 0; + for (const auto &phdr : + ArrayRef<const ElfW(Phdr)>(phdr_begin, phdr_begin + ehdr->e_phnum)) { + if (phdr.p_type != PT_LOAD || phdr.p_offset != 0) + continue; + load_bias = reinterpret_cast<ElfW(Addr)>(ehdr) - phdr.p_vaddr; + break; + } + + // Walk all globals in this ELF object, looking for the one we're interested + // in. Once we find it, we can stop iterating and return the size of the + // global we're interested in. + for (const hwasan_global &global : + HwasanGlobalsFor(load_bias, phdr_begin, ehdr->e_phnum)) + if (global.addr() <= ptr && ptr < global.addr() + global.size()) + return global.size(); + + return 0; +} + void PrintAddressDescription( uptr tagged_addr, uptr access_size, StackAllocationsRingBuffer *current_stack_allocations) { @@ -297,9 +359,19 @@ void PrintAddressDescription( candidate == left ? "right" : "left", info.size, info.name, info.start, info.start + info.size, module_name); } else { - Printf("%p is located to the %s of a global variable in (%s+0x%x)\n", - untagged_addr, candidate == left ? "right" : "left", - module_name, module_address); + uptr size = GetGlobalSizeFromDescriptor(mem); + if (size == 0) + // We couldn't find the size of the global from the descriptors. + Printf( + "%p is located to the %s of a global variable in (%s+0x%x)\n", + untagged_addr, candidate == left ? "right" : "left", + module_name, module_address); + else + Printf( + "%p is located to the %s of a %zd-byte global variable in " + "(%s+0x%x)\n", + untagged_addr, candidate == left ? "right" : "left", size, + module_name, module_address); } num_descriptions_printed++; } @@ -309,7 +381,10 @@ void PrintAddressDescription( hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { // Scan all threads' ring buffers to find if it's a heap-use-after-free. HeapAllocationRecord har; - if (uptr D = FindHeapAllocation(t->heap_allocations(), tagged_addr, &har)) { + uptr ring_index, num_matching_addrs, num_matching_addrs_4b; + if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har, + &ring_index, &num_matching_addrs, + &num_matching_addrs_4b)) { Printf("%s", d.Location()); Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n", untagged_addr, untagged_addr - UntagAddr(har.tagged_addr), @@ -327,8 +402,11 @@ void PrintAddressDescription( // Print a developer note: the index of this heap object // in the thread's deallocation ring buffer. - Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", D, + Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index + 1, flags()->heap_history_size); + Printf("hwasan_dev_note_num_matching_addrs: %zd\n", num_matching_addrs); + Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n", + num_matching_addrs_4b); t->Announce(); num_descriptions_printed++; diff --git a/gnu/llvm/compiler-rt/lib/hwasan/hwasan_thread.cpp b/gnu/llvm/compiler-rt/lib/hwasan/hwasan_thread.cpp index cabf614c005..b81a6350c05 100644 --- a/gnu/llvm/compiler-rt/lib/hwasan/hwasan_thread.cpp +++ b/gnu/llvm/compiler-rt/lib/hwasan/hwasan_thread.cpp @@ -90,6 +90,12 @@ void Thread::Destroy() { if (heap_allocations_) heap_allocations_->Delete(); DTLS_Destroy(); + // Unregister this as the current thread. + // Instrumented code can not run on this thread from this point onwards, but + // malloc/free can still be served. Glibc may call free() very late, after all + // TSD destructors are done. + CHECK_EQ(GetCurrentThread(), this); + *GetCurrentThreadLongPtr() = 0; } void Thread::Print(const char *Prefix) { diff --git a/gnu/llvm/compiler-rt/lib/hwasan/hwasan_thread.h b/gnu/llvm/compiler-rt/lib/hwasan/hwasan_thread.h index 42c1e9e124b..ebcdb791fb3 100644 --- a/gnu/llvm/compiler-rt/lib/hwasan/hwasan_thread.h +++ b/gnu/llvm/compiler-rt/lib/hwasan/hwasan_thread.h @@ -38,14 +38,6 @@ class Thread { return addr >= stack_bottom_ && addr < stack_top_; } - bool InSignalHandler() { return in_signal_handler_; } - void EnterSignalHandler() { in_signal_handler_++; } - void LeaveSignalHandler() { in_signal_handler_--; } - - bool InSymbolizer() { return in_symbolizer_; } - void EnterSymbolizer() { in_symbolizer_++; } - void LeaveSymbolizer() { in_symbolizer_--; } - AllocatorCache *allocator_cache() { return &allocator_cache_; } HeapAllocationsRingBuffer *heap_allocations() { return heap_allocations_; } StackAllocationsRingBuffer *stack_allocations() { return stack_allocations_; } @@ -54,7 +46,6 @@ class Thread { void DisableTagging() { tagging_disabled_++; } void EnableTagging() { tagging_disabled_--; } - bool TaggingIsDisabled() const { return tagging_disabled_; } u64 unique_id() const { return unique_id_; } void Announce() { @@ -76,9 +67,6 @@ class Thread { uptr tls_begin_; uptr tls_end_; - unsigned in_signal_handler_; - unsigned in_symbolizer_; - u32 random_state_; u32 random_buffer_; @@ -86,8 +74,6 @@ class Thread { HeapAllocationsRingBuffer *heap_allocations_; StackAllocationsRingBuffer *stack_allocations_; - static void InsertIntoThreadList(Thread *t); - static void RemoveFromThreadList(Thread *t); Thread *next_; // All live threads form a linked list. u64 unique_id_; // counting from zero. diff --git a/gnu/llvm/compiler-rt/lib/hwasan/scripts/hwasan_symbolize b/gnu/llvm/compiler-rt/lib/hwasan/scripts/hwasan_symbolize index f77e36fbd62..dd5f859561e 100755 --- a/gnu/llvm/compiler-rt/lib/hwasan/scripts/hwasan_symbolize +++ b/gnu/llvm/compiler-rt/lib/hwasan/scripts/hwasan_symbolize @@ -28,6 +28,7 @@ class Symbolizer: self.__binary_prefixes = binary_prefixes self.__paths_to_cut = paths_to_cut self.__log = False + self.__warnings = set() def enable_logging(self, enable): self.__log = enable @@ -73,7 +74,9 @@ class Symbolizer: full_path = os.path.join(p, os.path.basename(name)) if os.path.exists(full_path): return full_path - print >>sys.stderr, "Could not find symbols for", name + if name not in self.__warnings: + print >>sys.stderr, "Could not find symbols for", name + self.__warnings.add(name) return None def iter_locals(self, binary, addr): @@ -116,7 +119,7 @@ class Symbolizer: def symbolize_line(line, symbolizer_path): #0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45) - match = re.match(r'^(.*?)#([0-9]+)( *)(0x[0-9a-f]+) *\((.*)\+(0x[0-9a-f]+)\)', line, re.UNICODE) + match = re.match(r'^(.*?)#([0-9]+)( *)(0x[0-9a-f]*) *\((.*)\+(0x[0-9a-f]+)\)', line, re.UNICODE) if match: frameno = match.group(2) binary = match.group(5) @@ -141,7 +144,7 @@ def save_access_address(line): match = re.match(r'^(.*?)HWAddressSanitizer: tag-mismatch on address (0x[0-9a-f]+) ', line, re.UNICODE) if match: last_access_address = int(match.group(2), 16) - match = re.match(r'^(.*?) of size [0-9]+ at 0x[0-9a-f]+ tags: ([0-9a-f]+)/[0-9a-f]+ \(ptr/mem\)', line, re.UNICODE) + match = re.match(r'^(.*?) of size [0-9]+ at 0x[0-9a-f]* tags: ([0-9a-f]+)/[0-9a-f]+ \(ptr/mem\)', line, re.UNICODE) if match: last_access_tag = int(match.group(2), 16) diff --git a/gnu/llvm/compiler-rt/lib/lsan/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/lsan/CMakeLists.txt index 65d47476939..ff8d38d8484 100644 --- a/gnu/llvm/compiler-rt/lib/lsan/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/lib/lsan/CMakeLists.txt @@ -5,6 +5,7 @@ append_rtti_flag(OFF LSAN_CFLAGS) set(LSAN_COMMON_SOURCES lsan_common.cpp + lsan_common_fuchsia.cpp lsan_common_linux.cpp lsan_common_mac.cpp ) @@ -12,10 +13,12 @@ set(LSAN_COMMON_SOURCES set(LSAN_SOURCES lsan.cpp lsan_allocator.cpp - lsan_linux.cpp + lsan_fuchsia.cpp lsan_interceptors.cpp + lsan_linux.cpp lsan_mac.cpp lsan_malloc_mac.cpp + lsan_posix.cpp lsan_preinit.cpp lsan_thread.cpp ) diff --git a/gnu/llvm/compiler-rt/lib/lsan/lsan.cpp b/gnu/llvm/compiler-rt/lib/lsan/lsan.cpp index 4ce03046ffb..80a6e2fa701 100644 --- a/gnu/llvm/compiler-rt/lib/lsan/lsan.cpp +++ b/gnu/llvm/compiler-rt/lib/lsan/lsan.cpp @@ -15,7 +15,6 @@ #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flag_parser.h" -#include "sanitizer_common/sanitizer_stacktrace.h" #include "lsan_allocator.h" #include "lsan_common.h" #include "lsan_thread.h" @@ -87,17 +86,6 @@ static void InitializeFlags() { __sanitizer_set_report_path(common_flags()->log_path); } -static void OnStackUnwind(const SignalContext &sig, const void *, - BufferedStackTrace *stack) { - stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context, - common_flags()->fast_unwind_on_fatal); -} - -static void LsanOnDeadlySignal(int signo, void *siginfo, void *context) { - HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind, - nullptr); -} - extern "C" void __lsan_init() { CHECK(!lsan_init_is_running); if (lsan_inited) @@ -114,10 +102,7 @@ extern "C" void __lsan_init() { InitializeInterceptors(); InitializeThreadRegistry(); InstallDeadlySignalHandlers(LsanOnDeadlySignal); - u32 tid = ThreadCreate(0, 0, true); - CHECK_EQ(tid, 0); - ThreadStart(tid, GetTid()); - SetCurrentThread(tid); + InitializeMainThread(); if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) Atexit(DoLeakCheck); diff --git a/gnu/llvm/compiler-rt/lib/lsan/lsan.h b/gnu/llvm/compiler-rt/lib/lsan/lsan.h index 9904ada4bb3..1e82ad72f00 100644 --- a/gnu/llvm/compiler-rt/lib/lsan/lsan.h +++ b/gnu/llvm/compiler-rt/lib/lsan/lsan.h @@ -12,6 +12,11 @@ //===----------------------------------------------------------------------===// #include "lsan_thread.h" +#if SANITIZER_POSIX +#include "lsan_posix.h" +#elif SANITIZER_FUCHSIA +#include "lsan_fuchsia.h" +#endif #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_stacktrace.h" @@ -33,6 +38,7 @@ namespace __lsan { void InitializeInterceptors(); void ReplaceSystemMalloc(); +void LsanOnDeadlySignal(int signo, void *siginfo, void *context); #define ENSURE_LSAN_INITED do { \ CHECK(!lsan_init_is_running); \ diff --git a/gnu/llvm/compiler-rt/lib/lsan/lsan_allocator.h b/gnu/llvm/compiler-rt/lib/lsan/lsan_allocator.h index e1397099767..17e13cd014b 100644 --- a/gnu/llvm/compiler-rt/lib/lsan/lsan_allocator.h +++ b/gnu/llvm/compiler-rt/lib/lsan/lsan_allocator.h @@ -65,10 +65,16 @@ struct AP32 { template <typename AddressSpaceView> using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView>>; using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>; -#elif defined(__x86_64__) || defined(__powerpc64__) -# if defined(__powerpc64__) +#elif defined(__x86_64__) || defined(__powerpc64__) || defined(__s390x__) +# if SANITIZER_FUCHSIA +const uptr kAllocatorSpace = ~(uptr)0; +const uptr kAllocatorSize = 0x40000000000ULL; // 4T. +# elif defined(__powerpc64__) const uptr kAllocatorSpace = 0xa0000000000ULL; const uptr kAllocatorSize = 0x20000000000ULL; // 2T. +#elif defined(__s390x__) +const uptr kAllocatorSpace = 0x40000000000ULL; +const uptr kAllocatorSize = 0x40000000000ULL; // 4T. # else const uptr kAllocatorSpace = 0x600000000000ULL; const uptr kAllocatorSize = 0x40000000000ULL; // 4T. diff --git a/gnu/llvm/compiler-rt/lib/lsan/lsan_common.cpp b/gnu/llvm/compiler-rt/lib/lsan/lsan_common.cpp index 9ff9f4c5d1c..67f85f2f31d 100644 --- a/gnu/llvm/compiler-rt/lib/lsan/lsan_common.cpp +++ b/gnu/llvm/compiler-rt/lib/lsan/lsan_common.cpp @@ -25,6 +25,8 @@ #include "sanitizer_common/sanitizer_thread_registry.h" #include "sanitizer_common/sanitizer_tls_get_addr.h" +extern "C" const char *__lsan_current_stage = "unknown"; + #if CAN_SANITIZE_LEAKS namespace __lsan { @@ -34,6 +36,7 @@ BlockingMutex global_mutex(LINKER_INITIALIZED); Flags lsan_flags; + void DisableCounterUnderflow() { if (common_flags()->detect_leaks) { Report("Unmatched call to __lsan_enable().\n"); @@ -211,6 +214,13 @@ void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) { ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable); } +#if SANITIZER_FUCHSIA + +// Fuchsia handles all threads together with its own callback. +static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {} + +#else + // Scans thread data (stacks and TLS) for heap pointers. static void ProcessThreads(SuspendedThreadsList const &suspended_threads, Frontier *frontier) { @@ -308,6 +318,8 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads, } } +#endif // SANITIZER_FUCHSIA + void ScanRootRegion(Frontier *frontier, const RootRegion &root_region, uptr region_begin, uptr region_end, bool is_readable) { uptr intersection_begin = Max(root_region.begin, region_begin); @@ -354,6 +366,7 @@ static void FloodFillTag(Frontier *frontier, ChunkTag tag) { // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks // which are reachable from it as indirectly leaked. static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) { + __lsan_current_stage = "MarkIndirectlyLeakedCb"; chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (m.allocated() && m.tag() != kReachable) { @@ -366,6 +379,7 @@ static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) { // frontier. static void CollectIgnoredCb(uptr chunk, void *arg) { CHECK(arg); + __lsan_current_stage = "CollectIgnoredCb"; chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (m.allocated() && m.tag() == kIgnored) { @@ -395,6 +409,7 @@ struct InvalidPCParam { static void MarkInvalidPCCb(uptr chunk, void *arg) { CHECK(arg); InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg); + __lsan_current_stage = "MarkInvalidPCCb"; chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) { @@ -443,25 +458,23 @@ void ProcessPC(Frontier *frontier) { } // Sets the appropriate tag on each chunk. -static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { - // Holds the flood fill frontier. - Frontier frontier; +static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads, + Frontier *frontier) { + ForEachChunk(CollectIgnoredCb, frontier); + ProcessGlobalRegions(frontier); + ProcessThreads(suspended_threads, frontier); + ProcessRootRegions(frontier); + FloodFillTag(frontier, kReachable); - ForEachChunk(CollectIgnoredCb, &frontier); - ProcessGlobalRegions(&frontier); - ProcessThreads(suspended_threads, &frontier); - ProcessRootRegions(&frontier); - FloodFillTag(&frontier, kReachable); - - CHECK_EQ(0, frontier.size()); - ProcessPC(&frontier); + CHECK_EQ(0, frontier->size()); + ProcessPC(frontier); // The check here is relatively expensive, so we do this in a separate flood // fill. That way we can skip the check for chunks that are reachable // otherwise. LOG_POINTERS("Processing platform-specific allocations.\n"); - ProcessPlatformSpecificAllocations(&frontier); - FloodFillTag(&frontier, kReachable); + ProcessPlatformSpecificAllocations(frontier); + FloodFillTag(frontier, kReachable); // Iterate over leaked chunks and mark those that are reachable from other // leaked chunks. @@ -472,6 +485,7 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { // ForEachChunk callback. Resets the tags to pre-leak-check state. static void ResetTagsCb(uptr chunk, void *arg) { (void)arg; + __lsan_current_stage = "ResetTagsCb"; chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (m.allocated() && m.tag() != kIgnored) @@ -488,6 +502,7 @@ static void PrintStackTraceById(u32 stack_trace_id) { static void CollectLeaksCb(uptr chunk, void *arg) { CHECK(arg); LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg); + __lsan_current_stage = "CollectLeaksCb"; chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (!m.allocated()) return; @@ -521,11 +536,6 @@ static void PrintMatchedSuppressions() { Printf("%s\n\n", line); } -struct CheckForLeaksParam { - bool success; - LeakReport leak_report; -}; - static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) { const InternalMmapVector<tid_t> &suspended_threads = *(const InternalMmapVector<tid_t> *)arg; @@ -538,6 +548,14 @@ static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) { } } +#if SANITIZER_FUCHSIA + +// Fuchsia provides a libc interface that guarantees all threads are +// covered, and SuspendedThreadList is never really used. +static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {} + +#else // !SANITIZER_FUCHSIA + static void ReportUnsuspendedThreads( const SuspendedThreadsList &suspended_threads) { InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount()); @@ -550,13 +568,15 @@ static void ReportUnsuspendedThreads( &ReportIfNotSuspended, &threads); } +#endif // !SANITIZER_FUCHSIA + static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, void *arg) { CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg); CHECK(param); CHECK(!param->success); ReportUnsuspendedThreads(suspended_threads); - ClassifyAllChunks(suspended_threads); + ClassifyAllChunks(suspended_threads, ¶m->frontier); ForEachChunk(CollectLeaksCb, ¶m->leak_report); // Clean up for subsequent leak checks. This assumes we did not overwrite any // kIgnored tags. @@ -569,7 +589,6 @@ static bool CheckForLeaks() { return false; EnsureMainThreadIDIsCorrect(); CheckForLeaksParam param; - param.success = false; LockStuffAndStopTheWorld(CheckForLeaksCallback, ¶m); if (!param.success) { diff --git a/gnu/llvm/compiler-rt/lib/lsan/lsan_common.h b/gnu/llvm/compiler-rt/lib/lsan/lsan_common.h index d24abe31b71..3434beede82 100644 --- a/gnu/llvm/compiler-rt/lib/lsan/lsan_common.h +++ b/gnu/llvm/compiler-rt/lib/lsan/lsan_common.h @@ -29,10 +29,10 @@ // To enable LeakSanitizer on a new architecture, one needs to implement the // internal_clone function as well as (probably) adjust the TLS machinery for // the new architecture inside the sanitizer library. -#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \ - (SANITIZER_WORDSIZE == 64) && \ +#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \ + (SANITIZER_WORDSIZE == 64) && \ (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \ - defined(__powerpc64__)) + defined(__powerpc64__) || defined(__s390x__)) #define CAN_SANITIZE_LEAKS 1 #elif defined(__i386__) && \ (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) @@ -40,7 +40,7 @@ #elif defined(__arm__) && \ SANITIZER_LINUX && !SANITIZER_ANDROID #define CAN_SANITIZE_LEAKS 1 -#elif SANITIZER_NETBSD +#elif SANITIZER_NETBSD || SANITIZER_FUCHSIA #define CAN_SANITIZE_LEAKS 1 #else #define CAN_SANITIZE_LEAKS 0 @@ -126,12 +126,24 @@ struct RootRegion { uptr size; }; +// LockStuffAndStopTheWorld can start to use Scan* calls to collect into +// this Frontier vector before the StopTheWorldCallback actually runs. +// This is used when the OS has a unified callback API for suspending +// threads and enumerating roots. +struct CheckForLeaksParam { + Frontier frontier; + LeakReport leak_report; + bool success = false; +}; + InternalMmapVector<RootRegion> const *GetRootRegions(); void ScanRootRegion(Frontier *frontier, RootRegion const ®ion, uptr region_begin, uptr region_end, bool is_readable); +void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg); // Run stoptheworld while holding any platform-specific locks, as well as the // allocator and thread registry locks. -void LockStuffAndStopTheWorld(StopTheWorldCallback callback, void* argument); +void LockStuffAndStopTheWorld(StopTheWorldCallback callback, + CheckForLeaksParam* argument); void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier, @@ -211,6 +223,7 @@ ThreadRegistry *GetThreadRegistryLocked(); bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, uptr *tls_begin, uptr *tls_end, uptr *cache_begin, uptr *cache_end, DTLS **dtls); +void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches); void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, void *arg); // If called from the main thread, updates the main thread's TID in the thread diff --git a/gnu/llvm/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp b/gnu/llvm/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp new file mode 100644 index 00000000000..caedbf15596 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp @@ -0,0 +1,166 @@ +//=-- lsan_common_fuchsia.cpp --------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Implementation of common leak checking functionality. Fuchsia-specific code. +// +//===---------------------------------------------------------------------===// + +#include "lsan_common.h" +#include "sanitizer_common/sanitizer_platform.h" + +#if CAN_SANITIZE_LEAKS && SANITIZER_FUCHSIA +#include <zircon/sanitizer.h> + +#include "lsan_allocator.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_thread_registry.h" + +// Ensure that the Zircon system ABI is linked in. +#pragma comment(lib, "zircon") + +namespace __lsan { + +void InitializePlatformSpecificModules() {} + +LoadedModule *GetLinker() { return nullptr; } + +__attribute__((tls_model("initial-exec"))) THREADLOCAL int disable_counter; +bool DisabledInThisThread() { return disable_counter > 0; } +void DisableInThisThread() { disable_counter++; } +void EnableInThisThread() { + if (disable_counter == 0) { + DisableCounterUnderflow(); + } + disable_counter--; +} + +// There is nothing left to do after the globals callbacks. +void ProcessGlobalRegions(Frontier *frontier) {} + +// Nothing to do here. +void ProcessPlatformSpecificAllocations(Frontier *frontier) {} + +// On Fuchsia, we can intercept _Exit gracefully, and return a failing exit +// code if required at that point. Calling Die() here is undefined +// behavior and causes rare race conditions. +void HandleLeaks() {} + +int ExitHook(int status) { + return status == 0 && HasReportedLeaks() ? common_flags()->exitcode : status; +} + +void LockStuffAndStopTheWorld(StopTheWorldCallback callback, + CheckForLeaksParam *argument) { + LockThreadRegistry(); + LockAllocator(); + + struct Params { + InternalMmapVector<uptr> allocator_caches; + StopTheWorldCallback callback; + CheckForLeaksParam *argument; + } params = {{}, callback, argument}; + + // Callback from libc for globals (data/bss modulo relro), when enabled. + auto globals = +[](void *chunk, size_t size, void *data) { + auto params = static_cast<const Params *>(data); + uptr begin = reinterpret_cast<uptr>(chunk); + uptr end = begin + size; + ScanGlobalRange(begin, end, ¶ms->argument->frontier); + }; + + // Callback from libc for thread stacks. + auto stacks = +[](void *chunk, size_t size, void *data) { + auto params = static_cast<const Params *>(data); + uptr begin = reinterpret_cast<uptr>(chunk); + uptr end = begin + size; + ScanRangeForPointers(begin, end, ¶ms->argument->frontier, "STACK", + kReachable); + }; + + // Callback from libc for thread registers. + auto registers = +[](void *chunk, size_t size, void *data) { + auto params = static_cast<const Params *>(data); + uptr begin = reinterpret_cast<uptr>(chunk); + uptr end = begin + size; + ScanRangeForPointers(begin, end, ¶ms->argument->frontier, "REGISTERS", + kReachable); + }; + + if (flags()->use_tls) { + // Collect the allocator cache range from each thread so these + // can all be excluded from the reported TLS ranges. + GetAllThreadAllocatorCachesLocked(¶ms.allocator_caches); + __sanitizer::Sort(params.allocator_caches.data(), + params.allocator_caches.size()); + } + + // Callback from libc for TLS regions. This includes thread_local + // variables as well as C11 tss_set and POSIX pthread_setspecific. + auto tls = +[](void *chunk, size_t size, void *data) { + auto params = static_cast<const Params *>(data); + uptr begin = reinterpret_cast<uptr>(chunk); + uptr end = begin + size; + auto i = __sanitizer::InternalLowerBound(params->allocator_caches, 0, + params->allocator_caches.size(), + begin, CompareLess<uptr>()); + if (i < params->allocator_caches.size() && + params->allocator_caches[i] >= begin && + end - params->allocator_caches[i] <= sizeof(AllocatorCache)) { + // Split the range in two and omit the allocator cache within. + ScanRangeForPointers(begin, params->allocator_caches[i], + ¶ms->argument->frontier, "TLS", kReachable); + uptr begin2 = params->allocator_caches[i] + sizeof(AllocatorCache); + ScanRangeForPointers(begin2, end, ¶ms->argument->frontier, "TLS", + kReachable); + } else { + ScanRangeForPointers(begin, end, ¶ms->argument->frontier, "TLS", + kReachable); + } + }; + + // This stops the world and then makes callbacks for various memory regions. + // The final callback is the last thing before the world starts up again. + __sanitizer_memory_snapshot( + flags()->use_globals ? globals : nullptr, + flags()->use_stacks ? stacks : nullptr, + flags()->use_registers ? registers : nullptr, + flags()->use_tls ? tls : nullptr, + [](zx_status_t, void *data) { + auto params = static_cast<const Params *>(data); + + // We don't use the thread registry at all for enumerating the threads + // and their stacks, registers, and TLS regions. So use it separately + // just for the allocator cache, and to call ForEachExtraStackRange, + // which ASan needs. + if (flags()->use_stacks) { + GetThreadRegistryLocked()->RunCallbackForEachThreadLocked( + [](ThreadContextBase *tctx, void *arg) { + ForEachExtraStackRange(tctx->os_id, ForEachExtraStackRangeCb, + arg); + }, + ¶ms->argument->frontier); + } + + params->callback({}, params->argument); + }, + ¶ms); + + UnlockAllocator(); + UnlockThreadRegistry(); +} + +} // namespace __lsan + +// This is declared (in extern "C") by <zircon/sanitizer.h>. +// _Exit calls this directly to intercept and change the status value. +int __sanitizer_process_exit_hook(int status) { + return __lsan::ExitHook(status); +} + +#endif diff --git a/gnu/llvm/compiler-rt/lib/lsan/lsan_common_linux.cpp b/gnu/llvm/compiler-rt/lib/lsan/lsan_common_linux.cpp index ea1a4a2f569..c97ef31593d 100644 --- a/gnu/llvm/compiler-rt/lib/lsan/lsan_common_linux.cpp +++ b/gnu/llvm/compiler-rt/lib/lsan/lsan_common_linux.cpp @@ -134,7 +134,8 @@ static int LockStuffAndStopTheWorldCallback(struct dl_phdr_info *info, // while holding the libdl lock in the parent thread, we can safely reenter it // in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr() // callback in the parent thread. -void LockStuffAndStopTheWorld(StopTheWorldCallback callback, void *argument) { +void LockStuffAndStopTheWorld(StopTheWorldCallback callback, + CheckForLeaksParam *argument) { DoStopTheWorldParam param = {callback, argument}; dl_iterate_phdr(LockStuffAndStopTheWorldCallback, ¶m); } diff --git a/gnu/llvm/compiler-rt/lib/lsan/lsan_common_mac.cpp b/gnu/llvm/compiler-rt/lib/lsan/lsan_common_mac.cpp index c1804e93c11..8516a176eb4 100644 --- a/gnu/llvm/compiler-rt/lib/lsan/lsan_common_mac.cpp +++ b/gnu/llvm/compiler-rt/lib/lsan/lsan_common_mac.cpp @@ -193,7 +193,8 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) { // causes rare race conditions. void HandleLeaks() {} -void LockStuffAndStopTheWorld(StopTheWorldCallback callback, void *argument) { +void LockStuffAndStopTheWorld(StopTheWorldCallback callback, + CheckForLeaksParam *argument) { LockThreadRegistry(); LockAllocator(); StopTheWorld(callback, argument); diff --git a/gnu/llvm/compiler-rt/lib/lsan/lsan_fuchsia.cpp b/gnu/llvm/compiler-rt/lib/lsan/lsan_fuchsia.cpp new file mode 100644 index 00000000000..40e65c6fb72 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/lsan/lsan_fuchsia.cpp @@ -0,0 +1,123 @@ +//=-- lsan_fuchsia.cpp ---------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Standalone LSan RTL code specific to Fuchsia. +// +//===---------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" + +#if SANITIZER_FUCHSIA +#include <zircon/sanitizer.h> + +#include "lsan.h" +#include "lsan_allocator.h" + +using namespace __lsan; + +namespace __lsan { + +void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {} + +ThreadContext::ThreadContext(int tid) : ThreadContextLsanBase(tid) {} + +struct OnCreatedArgs { + uptr stack_begin, stack_end; +}; + +// On Fuchsia, the stack bounds of a new thread are available before +// the thread itself has started running. +void ThreadContext::OnCreated(void *arg) { + // Stack bounds passed through from __sanitizer_before_thread_create_hook + // or InitializeMainThread. + auto args = reinterpret_cast<const OnCreatedArgs *>(arg); + stack_begin_ = args->stack_begin; + stack_end_ = args->stack_end; +} + +struct OnStartedArgs { + uptr cache_begin, cache_end; +}; + +void ThreadContext::OnStarted(void *arg) { + auto args = reinterpret_cast<const OnStartedArgs *>(arg); + cache_begin_ = args->cache_begin; + cache_end_ = args->cache_end; +} + +void ThreadStart(u32 tid) { + OnStartedArgs args; + GetAllocatorCacheRange(&args.cache_begin, &args.cache_end); + CHECK_EQ(args.cache_end - args.cache_begin, sizeof(AllocatorCache)); + ThreadContextLsanBase::ThreadStart(tid, GetTid(), ThreadType::Regular, &args); +} + +void InitializeMainThread() { + OnCreatedArgs args; + __sanitizer::GetThreadStackTopAndBottom(true, &args.stack_end, + &args.stack_begin); + u32 tid = ThreadCreate(0, GetThreadSelf(), true, &args); + CHECK_EQ(tid, 0); + ThreadStart(tid); +} + +void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) { + GetThreadRegistryLocked()->RunCallbackForEachThreadLocked( + [](ThreadContextBase *tctx, void *arg) { + auto ctx = static_cast<ThreadContext *>(tctx); + static_cast<decltype(caches)>(arg)->push_back(ctx->cache_begin()); + }, + caches); +} + +} // namespace __lsan + +// These are declared (in extern "C") by <zircon/sanitizer.h>. +// The system runtime will call our definitions directly. + +// This is called before each thread creation is attempted. So, in +// its first call, the calling thread is the initial and sole thread. +void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached, + const char *name, void *stack_base, + size_t stack_size) { + uptr user_id = reinterpret_cast<uptr>(thread); + ENSURE_LSAN_INITED; + EnsureMainThreadIDIsCorrect(); + OnCreatedArgs args; + args.stack_begin = reinterpret_cast<uptr>(stack_base); + args.stack_end = args.stack_begin + stack_size; + u32 parent_tid = GetCurrentThread(); + u32 tid = ThreadCreate(parent_tid, user_id, detached, &args); + return reinterpret_cast<void *>(static_cast<uptr>(tid)); +} + +// This is called after creating a new thread (in the creating thread), +// with the pointer returned by __sanitizer_before_thread_create_hook (above). +void __sanitizer_thread_create_hook(void *hook, thrd_t thread, int error) { + u32 tid = static_cast<u32>(reinterpret_cast<uptr>(hook)); + // On success, there is nothing to do here. + if (error != thrd_success) { + // Clean up the thread registry for the thread creation that didn't happen. + GetThreadRegistryLocked()->FinishThread(tid); + } +} + +// This is called in the newly-created thread before it runs anything else, +// with the pointer returned by __sanitizer_before_thread_create_hook (above). +void __sanitizer_thread_start_hook(void *hook, thrd_t self) { + u32 tid = static_cast<u32>(reinterpret_cast<uptr>(hook)); + ThreadStart(tid); +} + +// Each thread runs this just before it exits, +// with the pointer returned by BeforeThreadCreateHook (above). +// All per-thread destructors have already been called. +void __sanitizer_thread_exit_hook(void *hook, thrd_t self) { ThreadFinish(); } + +#endif // SANITIZER_FUCHSIA diff --git a/gnu/llvm/compiler-rt/lib/lsan/lsan_fuchsia.h b/gnu/llvm/compiler-rt/lib/lsan/lsan_fuchsia.h new file mode 100644 index 00000000000..65d20ea2114 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/lsan/lsan_fuchsia.h @@ -0,0 +1,35 @@ +//=-- lsan_fuchsia.h ---------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Standalone LSan RTL code specific to Fuchsia. +// +//===---------------------------------------------------------------------===// + +#ifndef LSAN_FUCHSIA_H +#define LSAN_FUCHSIA_H + +#include "lsan_thread.h" +#include "sanitizer_common/sanitizer_platform.h" + +#if !SANITIZER_FUCHSIA +#error "lsan_fuchsia.h is used only on Fuchsia systems (SANITIZER_FUCHSIA)" +#endif + +namespace __lsan { + +class ThreadContext : public ThreadContextLsanBase { + public: + explicit ThreadContext(int tid); + void OnCreated(void *arg) override; + void OnStarted(void *arg) override; +}; + +} // namespace __lsan + +#endif // LSAN_FUCHSIA_H diff --git a/gnu/llvm/compiler-rt/lib/lsan/lsan_interceptors.cpp b/gnu/llvm/compiler-rt/lib/lsan/lsan_interceptors.cpp index f642bb807bc..9ce9b78c5a5 100644 --- a/gnu/llvm/compiler-rt/lib/lsan/lsan_interceptors.cpp +++ b/gnu/llvm/compiler-rt/lib/lsan/lsan_interceptors.cpp @@ -22,7 +22,9 @@ #include "sanitizer_common/sanitizer_platform_interceptors.h" #include "sanitizer_common/sanitizer_platform_limits_netbsd.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" +#if SANITIZER_POSIX #include "sanitizer_common/sanitizer_posix.h" +#endif #include "sanitizer_common/sanitizer_tls_get_addr.h" #include "lsan.h" #include "lsan_allocator.h" @@ -61,6 +63,9 @@ INTERCEPTOR(void, free, void *p) { } INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { + // This hack is not required for Fuchsia because there are no dlsym calls + // involved in setting up interceptors. +#if !SANITIZER_FUCHSIA if (lsan_init_is_running) { // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. const uptr kCallocPoolSize = 1024; @@ -72,6 +77,7 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { CHECK(allocated < kCallocPoolSize); return mem; } +#endif // !SANITIZER_FUCHSIA ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; return lsan_calloc(nmemb, size, stack); @@ -100,7 +106,7 @@ INTERCEPTOR(void*, valloc, uptr size) { GET_STACK_TRACE_MALLOC; return lsan_valloc(size, stack); } -#endif +#endif // !SANITIZER_MAC #if SANITIZER_INTERCEPT_MEMALIGN INTERCEPTOR(void*, memalign, uptr alignment, uptr size) { @@ -307,7 +313,7 @@ INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) ///// Thread initialization and finalization. ///// -#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD +#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD && !SANITIZER_FUCHSIA static unsigned g_thread_finalize_key; static void thread_finalize(void *v) { @@ -394,6 +400,8 @@ INTERCEPTOR(char *, strerror, int errnum) { #define LSAN_MAYBE_INTERCEPT_STRERROR #endif +#if SANITIZER_POSIX + struct ThreadParam { void *(*callback)(void *arg); void *param; @@ -416,7 +424,6 @@ extern "C" void *__lsan_thread_start_func(void *arg) { int tid = 0; while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) internal_sched_yield(); - SetCurrentThread(tid); ThreadStart(tid, GetTid()); atomic_store(&p->tid, 0, memory_order_release); return callback(param); @@ -477,9 +484,13 @@ INTERCEPTOR(void, _exit, int status) { #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name) #include "sanitizer_common/sanitizer_signal_interceptors.inc" +#endif // SANITIZER_POSIX + namespace __lsan { void InitializeInterceptors() { + // Fuchsia doesn't use interceptors that require any setup. +#if !SANITIZER_FUCHSIA InitializeSignalInterceptors(); INTERCEPT_FUNCTION(malloc); @@ -515,6 +526,8 @@ void InitializeInterceptors() { Die(); } #endif + +#endif // !SANITIZER_FUCHSIA } } // namespace __lsan diff --git a/gnu/llvm/compiler-rt/lib/lsan/lsan_linux.cpp b/gnu/llvm/compiler-rt/lib/lsan/lsan_linux.cpp index 14a42b75d2a..47c2f21b5a6 100644 --- a/gnu/llvm/compiler-rt/lib/lsan/lsan_linux.cpp +++ b/gnu/llvm/compiler-rt/lib/lsan/lsan_linux.cpp @@ -6,13 +6,13 @@ // //===----------------------------------------------------------------------===// // -// This file is a part of LeakSanitizer. Linux/NetBSD-specific code. +// This file is a part of LeakSanitizer. Linux/NetBSD/Fuchsia-specific code. // //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_platform.h" -#if SANITIZER_LINUX || SANITIZER_NETBSD +#if SANITIZER_LINUX || SANITIZER_NETBSD || SANITIZER_FUCHSIA #include "lsan_allocator.h" @@ -29,4 +29,4 @@ void ReplaceSystemMalloc() {} } // namespace __lsan -#endif // SANITIZER_LINUX || SANITIZER_NETBSD +#endif // SANITIZER_LINUX || SANITIZER_NETBSD || SANITIZER_FUCHSIA diff --git a/gnu/llvm/compiler-rt/lib/lsan/lsan_posix.cpp b/gnu/llvm/compiler-rt/lib/lsan/lsan_posix.cpp new file mode 100644 index 00000000000..8e05915dd1b --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/lsan/lsan_posix.cpp @@ -0,0 +1,96 @@ +//=-- lsan_posix.cpp -----------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Standalone LSan RTL code common to POSIX-like systems. +// +//===---------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" + +#if SANITIZER_POSIX +#include "lsan.h" +#include "lsan_allocator.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "sanitizer_common/sanitizer_tls_get_addr.h" + +namespace __lsan { + +ThreadContext::ThreadContext(int tid) : ThreadContextLsanBase(tid) {} + +struct OnStartedArgs { + uptr stack_begin; + uptr stack_end; + uptr cache_begin; + uptr cache_end; + uptr tls_begin; + uptr tls_end; + DTLS *dtls; +}; + +void ThreadContext::OnStarted(void *arg) { + auto args = reinterpret_cast<const OnStartedArgs *>(arg); + stack_begin_ = args->stack_begin; + stack_end_ = args->stack_end; + tls_begin_ = args->tls_begin; + tls_end_ = args->tls_end; + cache_begin_ = args->cache_begin; + cache_end_ = args->cache_end; + dtls_ = args->dtls; +} + +void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type) { + OnStartedArgs args; + uptr stack_size = 0; + uptr tls_size = 0; + GetThreadStackAndTls(tid == 0, &args.stack_begin, &stack_size, + &args.tls_begin, &tls_size); + args.stack_end = args.stack_begin + stack_size; + args.tls_end = args.tls_begin + tls_size; + GetAllocatorCacheRange(&args.cache_begin, &args.cache_end); + args.dtls = DTLS_Get(); + ThreadContextLsanBase::ThreadStart(tid, os_id, thread_type, &args); +} + +bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, + uptr *tls_begin, uptr *tls_end, uptr *cache_begin, + uptr *cache_end, DTLS **dtls) { + ThreadContext *context = static_cast<ThreadContext *>( + GetThreadRegistryLocked()->FindThreadContextByOsIDLocked(os_id)); + if (!context) + return false; + *stack_begin = context->stack_begin(); + *stack_end = context->stack_end(); + *tls_begin = context->tls_begin(); + *tls_end = context->tls_end(); + *cache_begin = context->cache_begin(); + *cache_end = context->cache_end(); + *dtls = context->dtls(); + return true; +} + +void InitializeMainThread() { + u32 tid = ThreadCreate(0, 0, true); + CHECK_EQ(tid, 0); + ThreadStart(tid, GetTid()); +} + +static void OnStackUnwind(const SignalContext &sig, const void *, + BufferedStackTrace *stack) { + stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context, + common_flags()->fast_unwind_on_fatal); +} + +void LsanOnDeadlySignal(int signo, void *siginfo, void *context) { + HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind, + nullptr); +} + +} // namespace __lsan + +#endif // SANITIZER_POSIX diff --git a/gnu/llvm/compiler-rt/lib/lsan/lsan_posix.h b/gnu/llvm/compiler-rt/lib/lsan/lsan_posix.h new file mode 100644 index 00000000000..840e427c55e --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/lsan/lsan_posix.h @@ -0,0 +1,49 @@ +//=-- lsan_posix.h -----------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Standalone LSan RTL code common to POSIX-like systems. +// +//===---------------------------------------------------------------------===// + +#ifndef LSAN_POSIX_H +#define LSAN_POSIX_H + +#include "lsan_thread.h" +#include "sanitizer_common/sanitizer_platform.h" + +#if !SANITIZER_POSIX +#error "lsan_posix.h is used only on POSIX-like systems (SANITIZER_POSIX)" +#endif + +namespace __sanitizer { +struct DTLS; +} + +namespace __lsan { + +class ThreadContext : public ThreadContextLsanBase { + public: + explicit ThreadContext(int tid); + void OnStarted(void *arg) override; + uptr tls_begin() { return tls_begin_; } + uptr tls_end() { return tls_end_; } + DTLS *dtls() { return dtls_; } + + private: + uptr tls_begin_ = 0; + uptr tls_end_ = 0; + DTLS *dtls_ = nullptr; +}; + +void ThreadStart(u32 tid, tid_t os_id, + ThreadType thread_type = ThreadType::Regular); + +} // namespace __lsan + +#endif // LSAN_POSIX_H diff --git a/gnu/llvm/compiler-rt/lib/lsan/lsan_thread.cpp b/gnu/llvm/compiler-rt/lib/lsan/lsan_thread.cpp index 84e7ce61b97..40bdc254bb6 100644 --- a/gnu/llvm/compiler-rt/lib/lsan/lsan_thread.cpp +++ b/gnu/llvm/compiler-rt/lib/lsan/lsan_thread.cpp @@ -13,12 +13,13 @@ #include "lsan_thread.h" +#include "lsan.h" +#include "lsan_allocator.h" +#include "lsan_common.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_thread_registry.h" #include "sanitizer_common/sanitizer_tls_get_addr.h" -#include "lsan_allocator.h" -#include "lsan_common.h" namespace __lsan { @@ -26,7 +27,7 @@ static ThreadRegistry *thread_registry; static ThreadContextBase *CreateThreadContext(u32 tid) { void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext"); - return new(mem) ThreadContext(tid); + return new (mem) ThreadContext(tid); } static const uptr kMaxThreads = 1 << 13; @@ -34,59 +35,26 @@ static const uptr kThreadQuarantineSize = 64; void InitializeThreadRegistry() { static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)]; - thread_registry = new(thread_registry_placeholder) - ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize); + thread_registry = new (thread_registry_placeholder) + ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize); } -ThreadContext::ThreadContext(int tid) - : ThreadContextBase(tid), - stack_begin_(0), - stack_end_(0), - cache_begin_(0), - cache_end_(0), - tls_begin_(0), - tls_end_(0), - dtls_(nullptr) {} - -struct OnStartedArgs { - uptr stack_begin, stack_end, - cache_begin, cache_end, - tls_begin, tls_end; - DTLS *dtls; -}; - -void ThreadContext::OnStarted(void *arg) { - OnStartedArgs *args = reinterpret_cast<OnStartedArgs *>(arg); - stack_begin_ = args->stack_begin; - stack_end_ = args->stack_end; - tls_begin_ = args->tls_begin; - tls_end_ = args->tls_end; - cache_begin_ = args->cache_begin; - cache_end_ = args->cache_end; - dtls_ = args->dtls; -} +ThreadContextLsanBase::ThreadContextLsanBase(int tid) + : ThreadContextBase(tid) {} -void ThreadContext::OnFinished() { +void ThreadContextLsanBase::OnFinished() { AllocatorThreadFinish(); DTLS_Destroy(); } -u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) { - return thread_registry->CreateThread(user_id, detached, parent_tid, - /* arg */ nullptr); +u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached, void *arg) { + return thread_registry->CreateThread(user_id, detached, parent_tid, arg); } -void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type) { - OnStartedArgs args; - uptr stack_size = 0; - uptr tls_size = 0; - GetThreadStackAndTls(tid == 0, &args.stack_begin, &stack_size, - &args.tls_begin, &tls_size); - args.stack_end = args.stack_begin + stack_size; - args.tls_end = args.tls_begin + tls_size; - GetAllocatorCacheRange(&args.cache_begin, &args.cache_end); - args.dtls = DTLS_Get(); - thread_registry->StartThread(tid, os_id, thread_type, &args); +void ThreadContextLsanBase::ThreadStart(u32 tid, tid_t os_id, + ThreadType thread_type, void *arg) { + thread_registry->StartThread(tid, os_id, thread_type, arg); + SetCurrentThread(tid); } void ThreadFinish() { @@ -95,7 +63,8 @@ void ThreadFinish() { } ThreadContext *CurrentThreadContext() { - if (!thread_registry) return nullptr; + if (!thread_registry) + return nullptr; if (GetCurrentThread() == kInvalidTid) return nullptr; // No lock needed when getting current thread. @@ -111,12 +80,12 @@ static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) { } u32 ThreadTid(uptr uid) { - return thread_registry->FindThread(FindThreadByUid, (void*)uid); + return thread_registry->FindThread(FindThreadByUid, (void *)uid); } void ThreadJoin(u32 tid) { CHECK_NE(tid, kInvalidTid); - thread_registry->JoinThread(tid, /* arg */nullptr); + thread_registry->JoinThread(tid, /* arg */ nullptr); } void EnsureMainThreadIDIsCorrect() { @@ -126,37 +95,16 @@ void EnsureMainThreadIDIsCorrect() { ///// Interface to the common LSan module. ///// -bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, - uptr *tls_begin, uptr *tls_end, uptr *cache_begin, - uptr *cache_end, DTLS **dtls) { - ThreadContext *context = static_cast<ThreadContext *>( - thread_registry->FindThreadContextByOsIDLocked(os_id)); - if (!context) return false; - *stack_begin = context->stack_begin(); - *stack_end = context->stack_end(); - *tls_begin = context->tls_begin(); - *tls_end = context->tls_end(); - *cache_begin = context->cache_begin(); - *cache_end = context->cache_end(); - *dtls = context->dtls(); - return true; -} - void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, - void *arg) { -} + void *arg) {} -void LockThreadRegistry() { - thread_registry->Lock(); -} +void LockThreadRegistry() { thread_registry->Lock(); } -void UnlockThreadRegistry() { - thread_registry->Unlock(); -} +void UnlockThreadRegistry() { thread_registry->Unlock(); } ThreadRegistry *GetThreadRegistryLocked() { thread_registry->CheckLocked(); return thread_registry; } -} // namespace __lsan +} // namespace __lsan diff --git a/gnu/llvm/compiler-rt/lib/lsan/lsan_thread.h b/gnu/llvm/compiler-rt/lib/lsan/lsan_thread.h index b869d066d9d..0ab1582de66 100644 --- a/gnu/llvm/compiler-rt/lib/lsan/lsan_thread.h +++ b/gnu/llvm/compiler-rt/lib/lsan/lsan_thread.h @@ -16,38 +16,36 @@ #include "sanitizer_common/sanitizer_thread_registry.h" -namespace __sanitizer { -struct DTLS; -} - namespace __lsan { -class ThreadContext : public ThreadContextBase { +class ThreadContextLsanBase : public ThreadContextBase { public: - explicit ThreadContext(int tid); - void OnStarted(void *arg) override; + explicit ThreadContextLsanBase(int tid); void OnFinished() override; uptr stack_begin() { return stack_begin_; } uptr stack_end() { return stack_end_; } - uptr tls_begin() { return tls_begin_; } - uptr tls_end() { return tls_end_; } uptr cache_begin() { return cache_begin_; } uptr cache_end() { return cache_end_; } - DTLS *dtls() { return dtls_; } - private: - uptr stack_begin_, stack_end_, - cache_begin_, cache_end_, - tls_begin_, tls_end_; - DTLS *dtls_; + // The argument is passed on to the subclass's OnStarted member function. + static void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type, + void *onstarted_arg); + + protected: + uptr stack_begin_ = 0; + uptr stack_end_ = 0; + uptr cache_begin_ = 0; + uptr cache_end_ = 0; }; +// This subclass of ThreadContextLsanBase is declared in an OS-specific header. +class ThreadContext; + void InitializeThreadRegistry(); +void InitializeMainThread(); -void ThreadStart(u32 tid, tid_t os_id, - ThreadType thread_type = ThreadType::Regular); +u32 ThreadCreate(u32 tid, uptr uid, bool detached, void *arg = nullptr); void ThreadFinish(); -u32 ThreadCreate(u32 tid, uptr uid, bool detached); void ThreadJoin(u32 tid); u32 ThreadTid(uptr uid); @@ -55,6 +53,7 @@ u32 GetCurrentThread(); void SetCurrentThread(u32 tid); ThreadContext *CurrentThreadContext(); void EnsureMainThreadIDIsCorrect(); + } // namespace __lsan #endif // LSAN_THREAD_H diff --git a/gnu/llvm/compiler-rt/lib/msan/msan.cpp b/gnu/llvm/compiler-rt/lib/msan/msan.cpp index 7095ee1bf20..9afc7b026a8 100644 --- a/gnu/llvm/compiler-rt/lib/msan/msan.cpp +++ b/gnu/llvm/compiler-rt/lib/msan/msan.cpp @@ -380,6 +380,28 @@ void __msan_warning_noreturn() { Die(); } +void __msan_warning_with_origin(u32 origin) { + GET_CALLER_PC_BP_SP; + (void)sp; + PrintWarningWithOrigin(pc, bp, origin); + if (__msan::flags()->halt_on_error) { + if (__msan::flags()->print_stats) + ReportStats(); + Printf("Exiting\n"); + Die(); + } +} + +void __msan_warning_with_origin_noreturn(u32 origin) { + GET_CALLER_PC_BP_SP; + (void)sp; + PrintWarningWithOrigin(pc, bp, origin); + if (__msan::flags()->print_stats) + ReportStats(); + Printf("Exiting\n"); + Die(); +} + static void OnStackUnwind(const SignalContext &sig, const void *, BufferedStackTrace *stack) { stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context, @@ -617,34 +639,41 @@ u32 __msan_get_umr_origin() { } u16 __sanitizer_unaligned_load16(const uu16 *p) { - *(uu16 *)&__msan_retval_tls[0] = *(uu16 *)MEM_TO_SHADOW((uptr)p); + internal_memcpy(&__msan_retval_tls[0], (void *)MEM_TO_SHADOW((uptr)p), + sizeof(uu16)); if (__msan_get_track_origins()) __msan_retval_origin_tls = GetOriginIfPoisoned((uptr)p, sizeof(*p)); return *p; } u32 __sanitizer_unaligned_load32(const uu32 *p) { - *(uu32 *)&__msan_retval_tls[0] = *(uu32 *)MEM_TO_SHADOW((uptr)p); + internal_memcpy(&__msan_retval_tls[0], (void *)MEM_TO_SHADOW((uptr)p), + sizeof(uu32)); if (__msan_get_track_origins()) __msan_retval_origin_tls = GetOriginIfPoisoned((uptr)p, sizeof(*p)); return *p; } u64 __sanitizer_unaligned_load64(const uu64 *p) { - __msan_retval_tls[0] = *(uu64 *)MEM_TO_SHADOW((uptr)p); + internal_memcpy(&__msan_retval_tls[0], (void *)MEM_TO_SHADOW((uptr)p), + sizeof(uu64)); if (__msan_get_track_origins()) __msan_retval_origin_tls = GetOriginIfPoisoned((uptr)p, sizeof(*p)); return *p; } void __sanitizer_unaligned_store16(uu16 *p, u16 x) { - u16 s = *(uu16 *)&__msan_param_tls[1]; - *(uu16 *)MEM_TO_SHADOW((uptr)p) = s; + static_assert(sizeof(uu16) == sizeof(u16), "incompatible types"); + u16 s; + internal_memcpy(&s, &__msan_param_tls[1], sizeof(uu16)); + internal_memcpy((void *)MEM_TO_SHADOW((uptr)p), &s, sizeof(uu16)); if (s && __msan_get_track_origins()) if (uu32 o = __msan_param_origin_tls[2]) SetOriginIfPoisoned((uptr)p, (uptr)&s, sizeof(s), o); *p = x; } void __sanitizer_unaligned_store32(uu32 *p, u32 x) { - u32 s = *(uu32 *)&__msan_param_tls[1]; - *(uu32 *)MEM_TO_SHADOW((uptr)p) = s; + static_assert(sizeof(uu32) == sizeof(u32), "incompatible types"); + u32 s; + internal_memcpy(&s, &__msan_param_tls[1], sizeof(uu32)); + internal_memcpy((void *)MEM_TO_SHADOW((uptr)p), &s, sizeof(uu32)); if (s && __msan_get_track_origins()) if (uu32 o = __msan_param_origin_tls[2]) SetOriginIfPoisoned((uptr)p, (uptr)&s, sizeof(s), o); diff --git a/gnu/llvm/compiler-rt/lib/msan/msan.h b/gnu/llvm/compiler-rt/lib/msan/msan.h index 12aeaa43519..e794c7c15f8 100644 --- a/gnu/llvm/compiler-rt/lib/msan/msan.h +++ b/gnu/llvm/compiler-rt/lib/msan/msan.h @@ -181,6 +181,20 @@ const MappingDesc kMemoryLayout[] = { #define MEM_TO_SHADOW(mem) (LINEARIZE_MEM((mem)) + 0x080000000000ULL) #define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x140000000000ULL) +#elif SANITIZER_LINUX && SANITIZER_S390_64 +const MappingDesc kMemoryLayout[] = { + {0x000000000000ULL, 0x040000000000ULL, MappingDesc::APP, "low memory"}, + {0x040000000000ULL, 0x080000000000ULL, MappingDesc::INVALID, "invalid"}, + {0x080000000000ULL, 0x180000000000ULL, MappingDesc::SHADOW, "shadow"}, + {0x180000000000ULL, 0x1C0000000000ULL, MappingDesc::INVALID, "invalid"}, + {0x1C0000000000ULL, 0x2C0000000000ULL, MappingDesc::ORIGIN, "origin"}, + {0x2C0000000000ULL, 0x440000000000ULL, MappingDesc::INVALID, "invalid"}, + {0x440000000000ULL, 0x500000000000ULL, MappingDesc::APP, "high memory"}}; + +#define MEM_TO_SHADOW(mem) \ + ((((uptr)(mem)) & ~0xC00000000000ULL) + 0x080000000000ULL) +#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x140000000000ULL) + #elif SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 64 // Low memory: main binary, MAP_32BIT mappings and modules diff --git a/gnu/llvm/compiler-rt/lib/msan/msan_allocator.cpp b/gnu/llvm/compiler-rt/lib/msan/msan_allocator.cpp index a08c1a00d2e..68be794106b 100644 --- a/gnu/llvm/compiler-rt/lib/msan/msan_allocator.cpp +++ b/gnu/llvm/compiler-rt/lib/msan/msan_allocator.cpp @@ -93,6 +93,20 @@ struct AP64 { // Allocator64 parameters. Deliberately using a short name. }; typedef SizeClassAllocator64<AP64> PrimaryAllocator; +#elif defined(__s390x__) +static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G + +struct AP64 { // Allocator64 parameters. Deliberately using a short name. + static const uptr kSpaceBeg = 0x440000000000; + static const uptr kSpaceSize = 0x020000000000; // 2T. + static const uptr kMetadataSize = sizeof(Metadata); + typedef DefaultSizeClassMap SizeClassMap; + typedef MsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + using AddressSpaceView = LocalAddressSpaceView; +}; + +typedef SizeClassAllocator64<AP64> PrimaryAllocator; #elif defined(__aarch64__) static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G diff --git a/gnu/llvm/compiler-rt/lib/msan/msan_interceptors.cpp b/gnu/llvm/compiler-rt/lib/msan/msan_interceptors.cpp index 1c6956eca0f..6459c7a593e 100644 --- a/gnu/llvm/compiler-rt/lib/msan/msan_interceptors.cpp +++ b/gnu/llvm/compiler-rt/lib/msan/msan_interceptors.cpp @@ -824,30 +824,6 @@ INTERCEPTOR(int, prlimit64, int pid, int resource, void *new_rlimit, #define MSAN_MAYBE_INTERCEPT_PRLIMIT64 #endif -#if SANITIZER_FREEBSD -// FreeBSD's <sys/utsname.h> define uname() as -// static __inline int uname(struct utsname *name) { -// return __xuname(SYS_NMLN, (void*)name); -// } -INTERCEPTOR(int, __xuname, int size, void *utsname) { - ENSURE_MSAN_INITED(); - int res = REAL(__xuname)(size, utsname); - if (!res) - __msan_unpoison(utsname, __sanitizer::struct_utsname_sz); - return res; -} -#define MSAN_INTERCEPT_UNAME INTERCEPT_FUNCTION(__xuname) -#else -INTERCEPTOR(int, uname, struct utsname *utsname) { - ENSURE_MSAN_INITED(); - int res = REAL(uname)(utsname); - if (!res) - __msan_unpoison(utsname, __sanitizer::struct_utsname_sz); - return res; -} -#define MSAN_INTERCEPT_UNAME INTERCEPT_FUNCTION(uname) -#endif - INTERCEPTOR(int, gethostname, char *name, SIZE_T len) { ENSURE_MSAN_INITED(); int res = REAL(gethostname)(name, len); @@ -953,7 +929,9 @@ void __sanitizer_dtor_callback(const void *data, uptr size) { template <class Mmap> static void *mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T length, int prot, int flags, int fd, OFF64_T offset) { - if (addr && !MEM_IS_APP(addr)) { + SIZE_T rounded_length = RoundUpTo(length, GetPageSize()); + void *end_addr = (char *)addr + (rounded_length - 1); + if (addr && (!MEM_IS_APP(addr) || !MEM_IS_APP(end_addr))) { if (flags & map_fixed) { errno = errno_EINVAL; return (void *)-1; @@ -962,7 +940,18 @@ static void *mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T length, } } void *res = real_mmap(addr, length, prot, flags, fd, offset); - if (res != (void *)-1) __msan_unpoison(res, RoundUpTo(length, GetPageSize())); + if (res != (void *)-1) { + void *end_res = (char *)res + (rounded_length - 1); + if (MEM_IS_APP(res) && MEM_IS_APP(end_res)) { + __msan_unpoison(res, rounded_length); + } else { + // Application has attempted to map more memory than is supported by + // MSAN. Act as if we ran out of memory. + internal_munmap(res, length); + errno = errno_ENOMEM; + return (void *)-1; + } + } return res; } @@ -1315,6 +1304,8 @@ int OnExit() { ForEachMappedRegion(map, __msan_unpoison); \ } while (false) +#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!msan_inited) + #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ if (MsanThread *t = GetCurrentThread()) { \ *begin = t->tls_begin(); \ @@ -1692,7 +1683,6 @@ void InitializeInterceptors() { MSAN_MAYBE_INTERCEPT_GETRLIMIT64; MSAN_MAYBE_INTERCEPT_PRLIMIT; MSAN_MAYBE_INTERCEPT_PRLIMIT64; - MSAN_INTERCEPT_UNAME; INTERCEPT_FUNCTION(gethostname); MSAN_MAYBE_INTERCEPT_EPOLL_WAIT; MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT; diff --git a/gnu/llvm/compiler-rt/lib/msan/msan_interface_internal.h b/gnu/llvm/compiler-rt/lib/msan/msan_interface_internal.h index 1abbba018c9..9e3db06bd64 100644 --- a/gnu/llvm/compiler-rt/lib/msan/msan_interface_internal.h +++ b/gnu/llvm/compiler-rt/lib/msan/msan_interface_internal.h @@ -46,6 +46,12 @@ using __sanitizer::u32; using __sanitizer::u16; using __sanitizer::u8; +// Versions of the above which take Origin as a parameter +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_warning_with_origin(u32 origin); +SANITIZER_INTERFACE_ATTRIBUTE __attribute__((noreturn)) void +__msan_warning_with_origin_noreturn(u32 origin); + SANITIZER_INTERFACE_ATTRIBUTE void __msan_maybe_warning_1(u8 s, u32 o); SANITIZER_INTERFACE_ATTRIBUTE diff --git a/gnu/llvm/compiler-rt/lib/msan/msan_origin.h b/gnu/llvm/compiler-rt/lib/msan/msan_origin.h index 26a4e7eb90c..e291f538cbd 100644 --- a/gnu/llvm/compiler-rt/lib/msan/msan_origin.h +++ b/gnu/llvm/compiler-rt/lib/msan/msan_origin.h @@ -57,7 +57,7 @@ class Origin { u32 raw_id() const { return raw_id_; } bool isHeapOrigin() const { - // 1xxx xxxx xxxx xxxx + // 0xxx xxxx xxxx xxxx return raw_id_ >> kHeapShift == 0; } bool isStackOrigin() const { diff --git a/gnu/llvm/compiler-rt/lib/msan/tests/msan_test.cpp b/gnu/llvm/compiler-rt/lib/msan/tests/msan_test.cpp index 2d67b6ff645..53b9a3e563e 100644 --- a/gnu/llvm/compiler-rt/lib/msan/tests/msan_test.cpp +++ b/gnu/llvm/compiler-rt/lib/msan/tests/msan_test.cpp @@ -3275,11 +3275,9 @@ static void *SmallStackThread_threadfn(void* data) { } #ifdef PTHREAD_STACK_MIN -# define SMALLSTACKSIZE PTHREAD_STACK_MIN -# define SMALLPRESTACKSIZE PTHREAD_STACK_MIN +constexpr int kThreadStackMin = PTHREAD_STACK_MIN; #else -# define SMALLSTACKSIZE 64 * 1024 -# define SMALLPRESTACKSIZE 16 * 1024 +constexpr int kThreadStackMin = 0; #endif TEST(MemorySanitizer, SmallStackThread) { @@ -3289,7 +3287,7 @@ TEST(MemorySanitizer, SmallStackThread) { int res; res = pthread_attr_init(&attr); ASSERT_EQ(0, res); - res = pthread_attr_setstacksize(&attr, SMALLSTACKSIZE); + res = pthread_attr_setstacksize(&attr, std::max(kThreadStackMin, 64 * 1024)); ASSERT_EQ(0, res); res = pthread_create(&t, &attr, SmallStackThread_threadfn, NULL); ASSERT_EQ(0, res); @@ -3306,7 +3304,7 @@ TEST(MemorySanitizer, SmallPreAllocatedStackThread) { res = pthread_attr_init(&attr); ASSERT_EQ(0, res); void *stack; - const size_t kStackSize = SMALLPRESTACKSIZE; + const size_t kStackSize = std::max(kThreadStackMin, 32 * 1024); res = posix_memalign(&stack, 4096, kStackSize); ASSERT_EQ(0, res); res = pthread_attr_setstack(&attr, stack, kStackSize); diff --git a/gnu/llvm/compiler-rt/lib/profile/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/profile/CMakeLists.txt index 955d0bf7293..29c6c02f2d0 100644 --- a/gnu/llvm/compiler-rt/lib/profile/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/lib/profile/CMakeLists.txt @@ -1,11 +1,11 @@ CHECK_CXX_SOURCE_COMPILES(" -#ifdef _MSC_VER -#include <Intrin.h> /* Workaround for PR19898. */ +#ifdef _WIN32 +#include <intrin.h> /* Workaround for PR19898. */ #include <windows.h> #endif int main() { -#ifdef _MSC_VER +#ifdef _WIN32 volatile LONG val = 1; MemoryBarrier(); InterlockedCompareExchange(&val, 0, 1); @@ -51,7 +51,9 @@ add_compiler_rt_component(profile) set(PROFILE_SOURCES GCDAProfiling.c InstrProfiling.c + InstrProfilingInternal.c InstrProfilingValue.c + InstrProfilingBiasVar.c InstrProfilingBuffer.c InstrProfilingFile.c InstrProfilingMerge.c diff --git a/gnu/llvm/compiler-rt/lib/profile/GCDAProfiling.c b/gnu/llvm/compiler-rt/lib/profile/GCDAProfiling.c index 124be3c13af..82369357e98 100644 --- a/gnu/llvm/compiler-rt/lib/profile/GCDAProfiling.c +++ b/gnu/llvm/compiler-rt/lib/profile/GCDAProfiling.c @@ -66,6 +66,16 @@ typedef unsigned long long uint64_t; /* #define DEBUG_GCDAPROFILING */ +enum { + GCOV_DATA_MAGIC = 0x67636461, // "gcda" + + GCOV_TAG_FUNCTION = 0x01000000, + GCOV_TAG_COUNTER_ARCS = 0x01a10000, + // GCOV_TAG_OBJECT_SUMMARY superseded GCOV_TAG_PROGRAM_SUMMARY in GCC 9. + GCOV_TAG_OBJECT_SUMMARY = 0xa1000000, + GCOV_TAG_PROGRAM_SUMMARY = 0xa3000000, +}; + /* * --- GCOV file format I/O primitives --- */ @@ -89,6 +99,7 @@ static uint64_t cur_buffer_size = 0; static uint64_t cur_pos = 0; static uint64_t file_size = 0; static int new_file = 0; +static int gcov_version; #if defined(_WIN32) static HANDLE mmap_handle = NULL; #endif @@ -199,17 +210,6 @@ static void write_64bit_value(uint64_t i) { write_32bit_value(hi); } -static uint32_t length_of_string(const char *s) { - return (strlen(s) / 4) + 1; -} - -static void write_string(const char *s) { - uint32_t len = length_of_string(s); - write_32bit_value(len); - write_bytes(s, strlen(s)); - write_bytes("\0\0\0\0", 4 - (strlen(s) % 4)); -} - static uint32_t read_32bit_value() { uint32_t val; @@ -221,18 +221,6 @@ static uint32_t read_32bit_value() { return val; } -static uint32_t read_le_32bit_value() { - uint32_t val = 0; - int i; - - if (new_file) - return (uint32_t)-1; - - for (i = 0; i < 4; i++) - val |= write_buffer[cur_pos++] << (8*i); - return val; -} - static uint64_t read_64bit_value() { // GCOV uses a lo-/hi-word format even on big-endian systems. // See also GCOVBuffer::readInt64 in LLVM. @@ -261,8 +249,8 @@ static int map_file() { fseek(output_file, 0L, SEEK_END); file_size = ftell(output_file); - /* A size of 0 is invalid to `mmap'. Return a fail here, but don't issue an - * error message because it should "just work" for the user. */ + /* A size of 0 means the file has been created just now (possibly by another + * process in lock-after-open race condition). No need to mmap. */ if (file_size == 0) return -1; @@ -345,30 +333,36 @@ static void unmap_file() { * started at a time. */ COMPILER_RT_VISIBILITY -void llvm_gcda_start_file(const char *orig_filename, const char version[4], +void llvm_gcda_start_file(const char *orig_filename, uint32_t version, uint32_t checksum) { const char *mode = "r+b"; filename = mangle_filename(orig_filename); /* Try just opening the file. */ - new_file = 0; fd = open(filename, O_RDWR | O_BINARY); if (fd == -1) { - /* Try opening the file, creating it if necessary. */ - new_file = 1; - mode = "w+b"; - fd = open(filename, O_RDWR | O_CREAT | O_BINARY, 0644); - if (fd == -1) { + /* Try creating the file. */ + fd = open(filename, O_RDWR | O_CREAT | O_EXCL | O_BINARY, 0644); + if (fd != -1) { + mode = "w+b"; + } else { /* Try creating the directories first then opening the file. */ __llvm_profile_recursive_mkdir(filename); - fd = open(filename, O_RDWR | O_CREAT | O_BINARY, 0644); - if (fd == -1) { - /* Bah! It's hopeless. */ - int errnum = errno; - fprintf(stderr, "profiling: %s: cannot open: %s\n", filename, - strerror(errnum)); - return; + fd = open(filename, O_RDWR | O_CREAT | O_EXCL | O_BINARY, 0644); + if (fd != -1) { + mode = "w+b"; + } else { + /* Another process may have created the file just now. + * Try opening it without O_CREAT and O_EXCL. */ + fd = open(filename, O_RDWR | O_BINARY); + if (fd == -1) { + /* Bah! It's hopeless. */ + int errnum = errno; + fprintf(stderr, "profiling: %s: cannot open: %s\n", filename, + strerror(errnum)); + return; + } } } } @@ -381,27 +375,30 @@ void llvm_gcda_start_file(const char *orig_filename, const char version[4], output_file = fdopen(fd, mode); /* Initialize the write buffer. */ + new_file = 0; write_buffer = NULL; cur_buffer_size = 0; cur_pos = 0; - if (new_file) { + if (map_file() == -1) { + /* The file has been created just now (file_size == 0) or mmap failed + * unexpectedly. In the latter case, try to recover by clobbering. */ + new_file = 1; + write_buffer = NULL; resize_write_buffer(WRITE_BUFFER_SIZE); memset(write_buffer, 0, WRITE_BUFFER_SIZE); - } else { - if (map_file() == -1) { - /* mmap failed, try to recover by clobbering */ - new_file = 1; - write_buffer = NULL; - cur_buffer_size = 0; - resize_write_buffer(WRITE_BUFFER_SIZE); - memset(write_buffer, 0, WRITE_BUFFER_SIZE); - } } /* gcda file, version, stamp checksum. */ - write_bytes("adcg", 4); - write_bytes(version, 4); + { + uint8_t c3 = version >> 24; + uint8_t c2 = (version >> 16) & 255; + uint8_t c1 = (version >> 8) & 255; + gcov_version = c3 >= 'A' ? (c3 - 'A') * 100 + (c2 - '0') * 10 + c1 - '0' + : (c3 - '0') * 10 + c1 - '0'; + } + write_32bit_value(GCOV_DATA_MAGIC); + write_32bit_value(version); write_32bit_value(checksum); #ifdef DEBUG_GCDAPROFILING @@ -436,30 +433,25 @@ void llvm_gcda_increment_indirect_counter(uint32_t *predecessor, } COMPILER_RT_VISIBILITY -void llvm_gcda_emit_function(uint32_t ident, const char *function_name, - uint32_t func_checksum, uint8_t use_extra_checksum, +void llvm_gcda_emit_function(uint32_t ident, uint32_t func_checksum, uint32_t cfg_checksum) { uint32_t len = 2; + int use_extra_checksum = gcov_version >= 47; if (use_extra_checksum) len++; #ifdef DEBUG_GCDAPROFILING - fprintf(stderr, "llvmgcda: function id=0x%08x name=%s\n", ident, - function_name ? function_name : "NULL"); + fprintf(stderr, "llvmgcda: function id=0x%08x\n", ident); #endif if (!output_file) return; /* function tag */ - write_bytes("\0\0\0\1", 4); - if (function_name) - len += 1 + length_of_string(function_name); + write_32bit_value(GCOV_TAG_FUNCTION); write_32bit_value(len); write_32bit_value(ident); write_32bit_value(func_checksum); if (use_extra_checksum) write_32bit_value(cfg_checksum); - if (function_name) - write_string(function_name); } COMPILER_RT_VISIBILITY @@ -471,11 +463,11 @@ void llvm_gcda_emit_arcs(uint32_t num_counters, uint64_t *counters) { if (!output_file) return; - val = read_le_32bit_value(); + val = read_32bit_value(); if (val != (uint32_t)-1) { /* There are counters present in the file. Merge them. */ - if (val != 0x01a10000) { + if (val != GCOV_TAG_COUNTER_ARCS) { fprintf(stderr, "profiling: %s: cannot merge previous GCDA file: " "corrupt arc tag (0x%08x)\n", filename, val); @@ -498,7 +490,7 @@ void llvm_gcda_emit_arcs(uint32_t num_counters, uint64_t *counters) { cur_pos = save_cur_pos; /* Counter #1 (arcs) tag */ - write_bytes("\0\0\xa1\1", 4); + write_32bit_value(GCOV_TAG_COUNTER_ARCS); write_32bit_value(num_counters * 2); for (i = 0; i < num_counters; ++i) { counters[i] += (old_ctrs ? old_ctrs[i] : 0); @@ -516,8 +508,6 @@ void llvm_gcda_emit_arcs(uint32_t num_counters, uint64_t *counters) { COMPILER_RT_VISIBILITY void llvm_gcda_summary_info() { - const uint32_t obj_summary_len = 9; /* Length for gcov compatibility. */ - uint32_t i; uint32_t runs = 1; static uint32_t run_counted = 0; // We only want to increase the run count once. uint32_t val = 0; @@ -525,46 +515,52 @@ void llvm_gcda_summary_info() { if (!output_file) return; - val = read_le_32bit_value(); + val = read_32bit_value(); if (val != (uint32_t)-1) { /* There are counters present in the file. Merge them. */ - if (val != 0xa1000000) { - fprintf(stderr, "profiling: %s: cannot merge previous run count: " - "corrupt object tag (0x%08x)\n", + if (val != (gcov_version >= 90 ? GCOV_TAG_OBJECT_SUMMARY + : GCOV_TAG_PROGRAM_SUMMARY)) { + fprintf(stderr, + "profiling: %s: cannot merge previous run count: " + "corrupt object tag (0x%08x)\n", filename, val); return; } val = read_32bit_value(); /* length */ - if (val != obj_summary_len) { - fprintf(stderr, "profiling: %s: cannot merge previous run count: " - "mismatched object length (%d)\n", - filename, val); - return; + uint32_t prev_runs; + if (gcov_version < 90) { + read_32bit_value(); + read_32bit_value(); + prev_runs = read_32bit_value(); + } else { + prev_runs = read_32bit_value(); + read_32bit_value(); } - - read_32bit_value(); /* checksum, unused */ - read_32bit_value(); /* num, unused */ - uint32_t prev_runs = read_32bit_value(); + for (uint32_t i = gcov_version < 90 ? 3 : 2; i < val; ++i) + read_32bit_value(); /* Add previous run count to new counter, if not already counted before. */ runs = run_counted ? prev_runs : prev_runs + 1; } cur_pos = save_cur_pos; - /* Object summary tag */ - write_bytes("\0\0\0\xa1", 4); - write_32bit_value(obj_summary_len); - write_32bit_value(0); /* checksum, unused */ - write_32bit_value(0); /* num, unused */ - write_32bit_value(runs); - for (i = 3; i < obj_summary_len; ++i) + if (gcov_version >= 90) { + write_32bit_value(GCOV_TAG_OBJECT_SUMMARY); + write_32bit_value(2); + write_32bit_value(runs); + write_32bit_value(0); // sum_max + } else { + // Before gcov 4.8 (r190952), GCOV_TAG_SUMMARY_LENGTH was 9. r190952 set + // GCOV_TAG_SUMMARY_LENGTH to 22. We simply use the smallest length which + // can make gcov read "Runs:". + write_32bit_value(GCOV_TAG_PROGRAM_SUMMARY); + write_32bit_value(3); write_32bit_value(0); - - /* Program summary tag */ - write_bytes("\0\0\0\xa3", 4); /* tag indicates 1 program */ - write_32bit_value(0); /* 0 length */ + write_32bit_value(0); + write_32bit_value(runs); + } run_counted = 1; @@ -616,8 +612,17 @@ void llvm_writeout_files(void) { } } -COMPILER_RT_VISIBILITY -void llvm_delete_writeout_function_list(void) { +#ifndef _WIN32 +// __attribute__((destructor)) and destructors whose priorities are greater than +// 100 run before this function and can thus be tracked. The priority is +// compatible with GCC 7 onwards. +#if __GNUC__ >= 9 +#pragma GCC diagnostic ignored "-Wprio-ctor-dtor" +#endif +__attribute__((destructor(100))) +#endif +static void llvm_writeout_and_clear(void) { + llvm_writeout_files(); fn_list_remove(&writeout_fn_list); } @@ -698,8 +703,9 @@ void llvm_gcov_init(fn_ptr wfn, fn_ptr ffn, fn_ptr rfn) { /* Make sure we write out the data and delete the data structures. */ atexit(llvm_delete_reset_function_list); atexit(llvm_delete_flush_function_list); - atexit(llvm_delete_writeout_function_list); - atexit(llvm_writeout_files); +#ifdef _WIN32 + atexit(llvm_writeout_and_clear); +#endif } } diff --git a/gnu/llvm/compiler-rt/lib/profile/InstrProfiling.c b/gnu/llvm/compiler-rt/lib/profile/InstrProfiling.c index 087d1cdd2ef..31a9fe99629 100644 --- a/gnu/llvm/compiler-rt/lib/profile/InstrProfiling.c +++ b/gnu/llvm/compiler-rt/lib/profile/InstrProfiling.c @@ -25,18 +25,8 @@ COMPILER_RT_VISIBILITY uint64_t __llvm_profile_get_magic(void) { : (INSTR_PROF_RAW_MAGIC_32); } -static unsigned ProfileDumped = 0; - -COMPILER_RT_VISIBILITY unsigned lprofProfileDumped() { - return ProfileDumped; -} - -COMPILER_RT_VISIBILITY void lprofSetProfileDumped() { - ProfileDumped = 1; -} - COMPILER_RT_VISIBILITY void __llvm_profile_set_dumped() { - lprofSetProfileDumped(); + lprofSetProfileDumped(1); } /* Return the number of bytes needed to add to SizeInBytes to make it @@ -80,5 +70,5 @@ COMPILER_RT_VISIBILITY void __llvm_profile_reset_counters(void) { } } } - ProfileDumped = 0; + lprofSetProfileDumped(0); } diff --git a/gnu/llvm/compiler-rt/lib/profile/InstrProfiling.h b/gnu/llvm/compiler-rt/lib/profile/InstrProfiling.h index 3a3bab3d0b4..d7a7c32332c 100644 --- a/gnu/llvm/compiler-rt/lib/profile/InstrProfiling.h +++ b/gnu/llvm/compiler-rt/lib/profile/InstrProfiling.h @@ -218,6 +218,9 @@ int __llvm_profile_register_write_file_atexit(void); /*! \brief Initialize file handling. */ void __llvm_profile_initialize_file(void); +/*! \brief Initialize the profile runtime. */ +void __llvm_profile_initialize(void); + /*! * \brief Return path prefix (excluding the base filename) of the profile data. * This is useful for users using \c -fprofile-generate=./path_prefix who do @@ -307,4 +310,11 @@ extern uint64_t INSTR_PROF_RAW_VERSION_VAR; /* __llvm_profile_raw_version */ */ extern char INSTR_PROF_PROFILE_NAME_VAR[1]; /* __llvm_profile_filename. */ +/*! + * This variable is a weak symbol defined in InstrProfilingBiasVar.c. It + * allows compiler instrumentation to provide overriding definition with + * value from compiler command line. This variable has hidden visibility. + */ +COMPILER_RT_VISIBILITY extern intptr_t __llvm_profile_counter_bias; + #endif /* PROFILE_INSTRPROFILING_H_ */ diff --git a/gnu/llvm/compiler-rt/lib/profile/InstrProfilingBiasVar.c b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingBiasVar.c new file mode 100644 index 00000000000..05745fd858d --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingBiasVar.c @@ -0,0 +1,15 @@ +/*===- InstrProfilingBiasVar.c - profile counter bias variable setup ------===*\ +|* +|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +|* See https://llvm.org/LICENSE.txt for license information. +|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +|* +\*===----------------------------------------------------------------------===*/ + +#include "InstrProfiling.h" + +/* The runtime should only provide its own definition of this symbol when the + * user has not specified one. Set this up by moving the runtime's copy of this + * symbol to an object file within the archive. + */ +COMPILER_RT_WEAK intptr_t __llvm_profile_counter_bias = -1; diff --git a/gnu/llvm/compiler-rt/lib/profile/InstrProfilingBuffer.c b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingBuffer.c index 174280fd4b5..5ee44785a7a 100644 --- a/gnu/llvm/compiler-rt/lib/profile/InstrProfilingBuffer.c +++ b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingBuffer.c @@ -62,7 +62,8 @@ void __llvm_profile_get_padding_sizes_for_counters( uint64_t DataSize, uint64_t CountersSize, uint64_t NamesSize, uint64_t *PaddingBytesBeforeCounters, uint64_t *PaddingBytesAfterCounters, uint64_t *PaddingBytesAfterNames) { - if (!__llvm_profile_is_continuous_mode_enabled()) { + if (!__llvm_profile_is_continuous_mode_enabled() || + lprofRuntimeCounterRelocation()) { *PaddingBytesBeforeCounters = 0; *PaddingBytesAfterCounters = 0; *PaddingBytesAfterNames = __llvm_profile_get_num_padding_bytes(NamesSize); diff --git a/gnu/llvm/compiler-rt/lib/profile/InstrProfilingFile.c b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingFile.c index 7f3727eed92..9e1a54a0c37 100644 --- a/gnu/llvm/compiler-rt/lib/profile/InstrProfilingFile.c +++ b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingFile.c @@ -448,6 +448,99 @@ static void unlockProfile(int *ProfileRequiresUnlock, FILE *File) { } #endif // !defined(__Fuchsia__) && !defined(_WIN32) +static int writeMMappedFile(FILE *OutputFile, char **Profile) { + if (!OutputFile) + return -1; + + /* Write the data into a file. */ + setupIOBuffer(); + ProfDataWriter fileWriter; + initFileWriter(&fileWriter, OutputFile); + if (lprofWriteData(&fileWriter, NULL, 0)) { + PROF_ERR("Failed to write profile: %s\n", strerror(errno)); + return -1; + } + fflush(OutputFile); + + /* Get the file size. */ + uint64_t FileSize = ftell(OutputFile); + + /* Map the profile. */ + *Profile = (char *)mmap( + NULL, FileSize, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(OutputFile), 0); + if (*Profile == MAP_FAILED) { + PROF_ERR("Unable to mmap profile: %s\n", strerror(errno)); + return -1; + } + + return 0; +} + +static void relocateCounters(void) { + if (!__llvm_profile_is_continuous_mode_enabled() || + !lprofRuntimeCounterRelocation()) + return; + + /* Get the sizes of various profile data sections. Taken from + * __llvm_profile_get_size_for_buffer(). */ + const __llvm_profile_data *DataBegin = __llvm_profile_begin_data(); + const __llvm_profile_data *DataEnd = __llvm_profile_end_data(); + uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd); + const uint64_t CountersOffset = sizeof(__llvm_profile_header) + + (DataSize * sizeof(__llvm_profile_data)); + + int Length = getCurFilenameLength(); + char *FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1); + const char *Filename = getCurFilename(FilenameBuf, 0); + if (!Filename) + return; + + FILE *File = NULL; + char *Profile = NULL; + + if (!doMerging()) { + File = fopen(Filename, "w+b"); + if (!File) + return; + + if (writeMMappedFile(File, &Profile) == -1) { + fclose(File); + return; + } + } else { + File = lprofOpenFileEx(Filename); + if (!File) + return; + + uint64_t ProfileFileSize = 0; + if (getProfileFileSizeForMerging(File, &ProfileFileSize) == -1) { + lprofUnlockFileHandle(File); + fclose(File); + return; + } + + if (!ProfileFileSize) { + if (writeMMappedFile(File, &Profile) == -1) { + fclose(File); + return; + } + } else { + /* The merged profile has a non-zero length. Check that it is compatible + * with the data in this process. */ + if (mmapProfileForMerging(File, ProfileFileSize, &Profile) == -1) { + fclose(File); + return; + } + } + + lprofUnlockFileHandle(File); + } + + /* Update the profile fields based on the current mapping. */ + __llvm_profile_counter_bias = (intptr_t)Profile - + (uintptr_t)__llvm_profile_begin_counters() + CountersOffset; +} + static void initializeProfileForContinuousMode(void) { if (!__llvm_profile_is_continuous_mode_enabled()) return; @@ -715,7 +808,12 @@ static void parseAndSetFilename(const char *FilenamePat, } truncateCurrentFile(); - initializeProfileForContinuousMode(); + if (__llvm_profile_is_continuous_mode_enabled()) { + if (lprofRuntimeCounterRelocation()) + relocateCounters(); + else + initializeProfileForContinuousMode(); + } } /* Return buffer length that is required to store the current profile @@ -854,10 +952,10 @@ const char *__llvm_profile_get_filename(void) { return FilenameBuf; } -/* This method is invoked by the runtime initialization hook - * InstrProfilingRuntime.o if it is linked in. Both user specified +/* This API initializes the file handling, both user specified * profile path via -fprofile-instr-generate= and LLVM_PROFILE_FILE - * environment variable can override this default value. */ + * environment variable can override this default value. + */ COMPILER_RT_VISIBILITY void __llvm_profile_initialize_file(void) { const char *EnvFilenamePat; @@ -865,6 +963,9 @@ void __llvm_profile_initialize_file(void) { ProfileNameSpecifier PNS = PNS_unknown; int hasCommandLineOverrider = (INSTR_PROF_PROFILE_NAME_VAR[0] != 0); + if (__llvm_profile_counter_bias != -1) + lprofSetRuntimeCounterRelocation(1); + EnvFilenamePat = getFilenamePatFromEnv(); if (EnvFilenamePat) { /* Pass CopyFilenamePat = 1, to ensure that the filename would be valid @@ -882,6 +983,16 @@ void __llvm_profile_initialize_file(void) { parseAndSetFilename(SelectedPat, PNS, 0); } +/* This method is invoked by the runtime initialization hook + * InstrProfilingRuntime.o if it is linked in. + */ +COMPILER_RT_VISIBILITY +void __llvm_profile_initialize(void) { + __llvm_profile_initialize_file(); + if (!__llvm_profile_is_continuous_mode_enabled()) + __llvm_profile_register_write_file_atexit(); +} + /* This API is directly called by the user application code. It has the * highest precedence compared with LLVM_PROFILE_FILE environment variable * and command line option -fprofile-instr-generate=<profile_name>. @@ -951,7 +1062,7 @@ int __llvm_profile_dump(void) { "in profile name or change profile name before dumping.\n", "online profile merging is not on"); int rc = __llvm_profile_write_file(); - lprofSetProfileDumped(); + lprofSetProfileDumped(1); return rc; } diff --git a/gnu/llvm/compiler-rt/lib/profile/InstrProfilingInternal.c b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingInternal.c new file mode 100644 index 00000000000..d58bc19ad11 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingInternal.c @@ -0,0 +1,33 @@ +/*===- InstrProfilingInternal.c - Support library for PGO instrumentation -===*\ +|* +|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +|* See https://llvm.org/LICENSE.txt for license information. +|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +|* +\*===----------------------------------------------------------------------===*/ + +#if !defined(__Fuchsia__) + +#include "InstrProfilingInternal.h" + +static unsigned ProfileDumped = 0; + +COMPILER_RT_VISIBILITY unsigned lprofProfileDumped() { + return ProfileDumped; +} + +COMPILER_RT_VISIBILITY void lprofSetProfileDumped(unsigned Value) { + ProfileDumped = Value; +} + +static unsigned RuntimeCounterRelocation = 0; + +COMPILER_RT_VISIBILITY unsigned lprofRuntimeCounterRelocation(void) { + return RuntimeCounterRelocation; +} + +COMPILER_RT_VISIBILITY void lprofSetRuntimeCounterRelocation(unsigned Value) { + RuntimeCounterRelocation = Value; +} + +#endif diff --git a/gnu/llvm/compiler-rt/lib/profile/InstrProfilingInternal.h b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingInternal.h index 0cea4876f0a..904bd394592 100644 --- a/gnu/llvm/compiler-rt/lib/profile/InstrProfilingInternal.h +++ b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingInternal.h @@ -181,8 +181,12 @@ uint64_t lprofGetLoadModuleSignature(); * Return non zero value if the profile data has already been * dumped to the file. */ -unsigned lprofProfileDumped(); -void lprofSetProfileDumped(); +unsigned lprofProfileDumped(void); +void lprofSetProfileDumped(unsigned); + +/* Return non zero value if counters are being relocated at runtime. */ +unsigned lprofRuntimeCounterRelocation(void); +void lprofSetRuntimeCounterRelocation(unsigned); COMPILER_RT_VISIBILITY extern void (*FreeHook)(void *); COMPILER_RT_VISIBILITY extern uint8_t *DynamicBufferIOBuffer; diff --git a/gnu/llvm/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c index 23b7efbe672..d8b7fa21d25 100644 --- a/gnu/llvm/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c +++ b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c @@ -34,16 +34,15 @@ #include "InstrProfilingInternal.h" #include "InstrProfilingUtil.h" -/* VMO that contains the coverage data shared across all modules. This symbol - * has default visibility and is exported in each module (executable or DSO) - * that statically links in the profiling runtime. - */ -zx_handle_t __llvm_profile_vmo; -/* Current offset within the VMO where data should be written next. This symbol - * has default visibility and is exported in each module (executable or DSO) - * that statically links in the profiling runtime. - */ -uint64_t __llvm_profile_offset; +COMPILER_RT_VISIBILITY unsigned lprofProfileDumped() { + return 1; +} +COMPILER_RT_VISIBILITY void lprofSetProfileDumped(unsigned Value) {} + +COMPILER_RT_VISIBILITY unsigned lprofRuntimeCounterRelocation(void) { + return 1; +} +COMPILER_RT_VISIBILITY void lprofSetRuntimeCounterRelocation(unsigned Value) {} static const char ProfileSinkName[] = "llvm-profile"; @@ -58,65 +57,24 @@ static inline void lprofWrite(const char *fmt, ...) { __sanitizer_log_write(s, ret + 1); } -static void createVMO() { - /* Don't create VMO if it has been alread created. */ - if (__llvm_profile_vmo != ZX_HANDLE_INVALID) - return; - - /* Get information about the current process. */ - zx_info_handle_basic_t Info; - zx_status_t Status = - _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &Info, - sizeof(Info), NULL, NULL); - if (Status != ZX_OK) { - lprofWrite("LLVM Profile: cannot get info about current process: %s\n", - _zx_status_get_string(Status)); - return; - } - - /* Create VMO to hold the profile data. */ - Status = _zx_vmo_create(0, ZX_VMO_RESIZABLE, &__llvm_profile_vmo); - if (Status != ZX_OK) { - lprofWrite("LLVM Profile: cannot create VMO: %s\n", - _zx_status_get_string(Status)); - return; - } - - /* Give the VMO a name including our process KOID so it's easy to spot. */ - char VmoName[ZX_MAX_NAME_LEN]; - snprintf(VmoName, sizeof(VmoName), "%s.%" PRIu64, ProfileSinkName, Info.koid); - _zx_object_set_property(__llvm_profile_vmo, ZX_PROP_NAME, VmoName, - strlen(VmoName)); - - /* Duplicate the handle since __sanitizer_publish_data consumes it. */ - zx_handle_t Handle; - Status = - _zx_handle_duplicate(__llvm_profile_vmo, ZX_RIGHT_SAME_RIGHTS, &Handle); - if (Status != ZX_OK) { - lprofWrite("LLVM Profile: cannot duplicate VMO handle: %s\n", - _zx_status_get_string(Status)); - _zx_handle_close(__llvm_profile_vmo); - __llvm_profile_vmo = ZX_HANDLE_INVALID; - return; - } - - /* Publish the VMO which contains profile data to the system. */ - __sanitizer_publish_data(ProfileSinkName, Handle); - - /* Use the dumpfile symbolizer markup element to write the name of VMO. */ - lprofWrite("LLVM Profile: {{{dumpfile:%s:%s}}}\n", ProfileSinkName, VmoName); -} +struct lprofVMOWriterCtx { + /* VMO that contains the profile data for this module. */ + zx_handle_t Vmo; + /* Current offset within the VMO where data should be written next. */ + uint64_t Offset; +}; static uint32_t lprofVMOWriter(ProfDataWriter *This, ProfDataIOVec *IOVecs, uint32_t NumIOVecs) { + struct lprofVMOWriterCtx *Ctx = (struct lprofVMOWriterCtx *)This->WriterCtx; + /* Compute the total length of data to be written. */ size_t Length = 0; for (uint32_t I = 0; I < NumIOVecs; I++) Length += IOVecs[I].ElmSize * IOVecs[I].NumElm; /* Resize the VMO to ensure there's sufficient space for the data. */ - zx_status_t Status = - _zx_vmo_set_size(__llvm_profile_vmo, __llvm_profile_offset + Length); + zx_status_t Status = _zx_vmo_set_size(Ctx->Vmo, Ctx->Offset + Length); if (Status != ZX_OK) return -1; @@ -124,74 +82,112 @@ static uint32_t lprofVMOWriter(ProfDataWriter *This, ProfDataIOVec *IOVecs, for (uint32_t I = 0; I < NumIOVecs; I++) { size_t Length = IOVecs[I].ElmSize * IOVecs[I].NumElm; if (IOVecs[I].Data) { - Status = _zx_vmo_write(__llvm_profile_vmo, IOVecs[I].Data, - __llvm_profile_offset, Length); + Status = _zx_vmo_write(Ctx->Vmo, IOVecs[I].Data, Ctx->Offset, Length); if (Status != ZX_OK) return -1; } else if (IOVecs[I].UseZeroPadding) { /* Resizing the VMO should zero fill. */ } - __llvm_profile_offset += Length; + Ctx->Offset += Length; } + /* Record the profile size as a property of the VMO. */ + _zx_object_set_property(Ctx->Vmo, ZX_PROP_VMO_CONTENT_SIZE, &Ctx->Offset, + sizeof(Ctx->Offset)); + return 0; } -static void initVMOWriter(ProfDataWriter *This) { +static void initVMOWriter(ProfDataWriter *This, struct lprofVMOWriterCtx *Ctx) { This->Write = lprofVMOWriter; - This->WriterCtx = NULL; + This->WriterCtx = Ctx; } -static int dump(void) { - if (lprofProfileDumped()) { - lprofWrite("LLVM Profile: data not published: already written.\n"); - return 0; - } - +/* This method is invoked by the runtime initialization hook + * InstrProfilingRuntime.o if it is linked in. */ +COMPILER_RT_VISIBILITY +void __llvm_profile_initialize(void) { /* Check if there is llvm/runtime version mismatch. */ if (GET_VERSION(__llvm_profile_get_version()) != INSTR_PROF_RAW_VERSION) { lprofWrite("LLVM Profile: runtime and instrumentation version mismatch: " "expected %d, but got %d\n", INSTR_PROF_RAW_VERSION, (int)GET_VERSION(__llvm_profile_get_version())); - return -1; + return; } - /* Write the profile data into the mapped region. */ - ProfDataWriter VMOWriter; - initVMOWriter(&VMOWriter); - if (lprofWriteData(&VMOWriter, lprofGetVPDataReader(), 0) != 0) - return -1; + /* This symbol is defined as weak and initialized to -1 by the runtimer, but + * compiler will generate a strong definition initialized to 0 when runtime + * counter relocation is used. */ + if (__llvm_profile_counter_bias == -1) { + lprofWrite("LLVM Profile: counter relocation at runtime is required\n"); + return; + } - return 0; -} + const __llvm_profile_data *DataBegin = __llvm_profile_begin_data(); + const __llvm_profile_data *DataEnd = __llvm_profile_end_data(); + const uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd); + const uint64_t CountersOffset = + sizeof(__llvm_profile_header) + (DataSize * sizeof(__llvm_profile_data)); -COMPILER_RT_VISIBILITY -int __llvm_profile_dump(void) { - int rc = dump(); - lprofSetProfileDumped(); - return rc; -} + zx_status_t Status; -static void dumpWithoutReturn(void) { dump(); } + /* Create VMO to hold the profile data. */ + zx_handle_t Vmo = ZX_HANDLE_INVALID; + Status = _zx_vmo_create(0, ZX_VMO_RESIZABLE, &Vmo); + if (Status != ZX_OK) { + lprofWrite("LLVM Profile: cannot create VMO: %s\n", + _zx_status_get_string(Status)); + return; + } -/* This method is invoked by the runtime initialization hook - * InstrProfilingRuntime.o if it is linked in. - */ -COMPILER_RT_VISIBILITY -void __llvm_profile_initialize_file(void) { createVMO(); } + /* Give the VMO a name that includes the module signature. */ + char VmoName[ZX_MAX_NAME_LEN]; + snprintf(VmoName, sizeof(VmoName), "%" PRIu64 ".profraw", + lprofGetLoadModuleSignature()); + _zx_object_set_property(Vmo, ZX_PROP_NAME, VmoName, strlen(VmoName)); -COMPILER_RT_VISIBILITY -int __llvm_profile_register_write_file_atexit(void) { - static bool HasBeenRegistered = false; + /* Write the profile data into the mapped region. */ + ProfDataWriter VMOWriter; + struct lprofVMOWriterCtx Ctx = {.Vmo = Vmo, .Offset = 0}; + initVMOWriter(&VMOWriter, &Ctx); + if (lprofWriteData(&VMOWriter, 0, 0) != 0) { + lprofWrite("LLVM Profile: failed to write data\n"); + _zx_handle_close(Vmo); + return; + } - if (HasBeenRegistered) - return 0; + uint64_t Len = 0; + Status = _zx_vmo_get_size(Vmo, &Len); + if (Status != ZX_OK) { + lprofWrite("LLVM Profile: failed to get the VMO size: %s\n", + _zx_status_get_string(Status)); + _zx_handle_close(Vmo); + return; + } - lprofSetupValueProfiler(); + uintptr_t Mapping; + Status = + _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0, + Vmo, 0, Len, &Mapping); + if (Status != ZX_OK) { + lprofWrite("LLVM Profile: failed to map the VMO: %s\n", + _zx_status_get_string(Status)); + _zx_handle_close(Vmo); + return; + } + + /* Publish the VMO which contains profile data to the system. Note that this + * also consumes the VMO handle. */ + __sanitizer_publish_data(ProfileSinkName, Vmo); + + /* Use the dumpfile symbolizer markup element to write the name of VMO. */ + lprofWrite("LLVM Profile: {{{dumpfile:%s:%s}}}\n", ProfileSinkName, VmoName); - HasBeenRegistered = true; - return atexit(dumpWithoutReturn); + /* Update the profile fields based on the current mapping. */ + __llvm_profile_counter_bias = (intptr_t)Mapping - + (uintptr_t)__llvm_profile_begin_counters() + + CountersOffset; } #endif diff --git a/gnu/llvm/compiler-rt/lib/profile/InstrProfilingPort.h b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingPort.h index 20cf5d660c6..4493dd512ff 100644 --- a/gnu/llvm/compiler-rt/lib/profile/InstrProfilingPort.h +++ b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingPort.h @@ -53,9 +53,9 @@ #endif #if COMPILER_RT_HAS_ATOMICS == 1 -#ifdef _MSC_VER +#ifdef _WIN32 #include <windows.h> -#if _MSC_VER < 1900 +#if defined(_MSC_VER) && _MSC_VER < 1900 #define snprintf _snprintf #endif #if defined(_WIN64) @@ -73,7 +73,7 @@ (DomType *)InterlockedExchangeAdd((LONG volatile *)&PtrVar, \ (LONG)sizeof(DomType) * PtrIncr) #endif -#else /* !defined(_MSC_VER) */ +#else /* !defined(_WIN32) */ #define COMPILER_RT_BOOL_CMPXCHG(Ptr, OldV, NewV) \ __sync_bool_compare_and_swap(Ptr, OldV, NewV) #define COMPILER_RT_PTR_FETCH_ADD(DomType, PtrVar, PtrIncr) \ diff --git a/gnu/llvm/compiler-rt/lib/profile/InstrProfilingRuntime.cpp b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingRuntime.cpp index 5dff09d7063..4ea2bb263f5 100644 --- a/gnu/llvm/compiler-rt/lib/profile/InstrProfilingRuntime.cpp +++ b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingRuntime.cpp @@ -19,9 +19,7 @@ namespace { class RegisterRuntime { public: RegisterRuntime() { - __llvm_profile_initialize_file(); - if (!__llvm_profile_is_continuous_mode_enabled()) - __llvm_profile_register_write_file_atexit(); + __llvm_profile_initialize(); } }; diff --git a/gnu/llvm/compiler-rt/lib/profile/InstrProfilingUtil.h b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingUtil.h index f0e29a8803a..5f5c85091fe 100644 --- a/gnu/llvm/compiler-rt/lib/profile/InstrProfilingUtil.h +++ b/gnu/llvm/compiler-rt/lib/profile/InstrProfilingUtil.h @@ -30,11 +30,13 @@ int lprofUnlockFileHandle(FILE *F); * lock for exclusive access. The caller will block * if the lock is already held by another process. */ FILE *lprofOpenFileEx(const char *Filename); -/* PS4 doesn't have setenv/getenv. Define a shim. */ +/* PS4 doesn't have setenv/getenv/fork. Define a shim. */ #if __ORBIS__ +#include <sys/types.h> static inline char *getenv(const char *name) { return NULL; } static inline int setenv(const char *name, const char *value, int overwrite) { return 0; } +static pid_t fork() { return -1; } #endif /* #if __ORBIS__ */ /* GCOV_PREFIX and GCOV_PREFIX_STRIP support */ diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/sanitizer_common/CMakeLists.txt index 0b9c4dc1a3f..97e6b1ac921 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/CMakeLists.txt @@ -29,11 +29,13 @@ set(SANITIZER_SOURCES_NOTERMINATION sanitizer_printf.cpp sanitizer_procmaps_common.cpp sanitizer_procmaps_bsd.cpp + sanitizer_procmaps_fuchsia.cpp sanitizer_procmaps_linux.cpp sanitizer_procmaps_mac.cpp sanitizer_procmaps_solaris.cpp sanitizer_rtems.cpp sanitizer_solaris.cpp + sanitizer_stoptheworld_fuchsia.cpp sanitizer_stoptheworld_mac.cpp sanitizer_suppressions.cpp sanitizer_tls_get_addr.cpp @@ -162,6 +164,7 @@ set(SANITIZER_IMPL_HEADERS sanitizer_platform_limits_solaris.h sanitizer_posix.h sanitizer_procmaps.h + sanitizer_ptrauth.h sanitizer_quarantine.h sanitizer_report_decorator.h sanitizer_ring_buffer.h diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp index 906d4af7f5e..ec77b9cbfee 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp @@ -25,7 +25,7 @@ const char *PrimaryAllocatorName = "SizeClassAllocator"; const char *SecondaryAllocatorName = "LargeMmapAllocator"; // ThreadSanitizer for Go uses libc malloc/free. -#if SANITIZER_GO || defined(SANITIZER_USE_MALLOC) +#if defined(SANITIZER_USE_MALLOC) # if SANITIZER_LINUX && !SANITIZER_ANDROID extern "C" void *__libc_malloc(uptr size); # if !SANITIZER_GO diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h index 90603280e7c..1d9a29c70f3 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -72,11 +72,15 @@ class SizeClassAllocator64 { void Init(s32 release_to_os_interval_ms) { uptr TotalSpaceSize = kSpaceSize + AdditionalSize(); if (kUsingConstantSpaceBeg) { + CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize)); CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize, PrimaryAllocatorName, kSpaceBeg)); } else { - NonConstSpaceBeg = address_range.Init(TotalSpaceSize, - PrimaryAllocatorName); + // Combined allocator expects that an 2^N allocation is always aligned to + // 2^N. For this to work, the start of the space needs to be aligned as + // high as the largest size class (which also needs to be a power of 2). + NonConstSpaceBeg = address_range.InitAligned( + TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName); CHECK_NE(NonConstSpaceBeg, ~(uptr)0); } SetReleaseToOSIntervalMs(release_to_os_interval_ms); @@ -220,7 +224,7 @@ class SizeClassAllocator64 { // Test-only. void TestOnlyUnmap() { - UnmapWithCallbackOrDie(SpaceBeg(), kSpaceSize + AdditionalSize()); + UnmapWithCallbackOrDie((uptr)address_range.base(), address_range.size()); } static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats, diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp index f5f9f49d8cf..87efda5bd37 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp @@ -274,6 +274,7 @@ uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len) { return name_len; } +#if !SANITIZER_GO void PrintCmdline() { char **argv = GetArgv(); if (!argv) return; @@ -282,6 +283,7 @@ void PrintCmdline() { Printf("%s ", argv[i]); Printf("\n\n"); } +#endif // Malloc hooks. static const int kMaxMallocFreeHooks = 5; diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common.h index 3b52172c483..07b307a602c 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common.h @@ -143,6 +143,7 @@ void RunFreeHooks(const void *ptr); class ReservedAddressRange { public: uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0); + uptr InitAligned(uptr size, uptr align, const char *name = nullptr); uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr); uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr); void Unmap(uptr addr, uptr size); @@ -855,7 +856,7 @@ INLINE uptr GetPthreadDestructorIterations() { #endif } -void *internal_start_thread(void(*func)(void*), void *arg); +void *internal_start_thread(void *(*func)(void*), void *arg); void internal_join_thread(void *th); void MaybeStartBackgroudThread(); @@ -977,6 +978,20 @@ INLINE u32 GetNumberOfCPUsCached() { return NumberOfCPUsCached; } +template <typename T> +class ArrayRef { + public: + ArrayRef() {} + ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {} + + T *begin() { return begin_; } + T *end() { return end_; } + + private: + T *begin_ = nullptr; + T *end_ = nullptr; +}; + } // namespace __sanitizer inline void *operator new(__sanitizer::operator_new_size_type size, diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc index 2a4ab7e67a5..d7e0bba7629 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc @@ -113,6 +113,7 @@ #define setitimer __setitimer50 #define setlocale __setlocale50 #define shmctl __shmctl50 +#define sigaltstack __sigaltstack14 #define sigemptyset __sigemptyset14 #define sigfillset __sigfillset14 #define sigpending __sigpending14 @@ -133,11 +134,7 @@ extern const short *_tolower_tab_; // Platform-specific options. #if SANITIZER_MAC -namespace __sanitizer { -bool PlatformHasDifferentMemcpyAndMemmove(); -} -#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \ - (__sanitizer::PlatformHasDifferentMemcpyAndMemmove()) +#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false #elif SANITIZER_WINDOWS64 #define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false #else @@ -2202,6 +2199,24 @@ INTERCEPTOR(int, clock_settime, u32 clk_id, const void *tp) { #define INIT_CLOCK_GETTIME #endif +#if SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID +INTERCEPTOR(int, clock_getcpuclockid, pid_t pid, + __sanitizer_clockid_t *clockid) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, clock_getcpuclockid, pid, clockid); + int res = REAL(clock_getcpuclockid)(pid, clockid); + if (!res && clockid) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, clockid, sizeof *clockid); + } + return res; +} + +#define INIT_CLOCK_GETCPUCLOCKID \ + COMMON_INTERCEPT_FUNCTION(clock_getcpuclockid); +#else +#define INIT_CLOCK_GETCPUCLOCKID +#endif + #if SANITIZER_INTERCEPT_GETITIMER INTERCEPTOR(int, getitimer, int which, void *curr_value) { void *ctx; @@ -3095,6 +3110,34 @@ INTERCEPTOR(int, sendmmsg, int fd, struct __sanitizer_mmsghdr *msgvec, #define INIT_SENDMMSG #endif +#if SANITIZER_INTERCEPT_SYSMSG +INTERCEPTOR(int, msgsnd, int msqid, const void *msgp, SIZE_T msgsz, + int msgflg) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, msgsnd, msqid, msgp, msgsz, msgflg); + if (msgp) + COMMON_INTERCEPTOR_READ_RANGE(ctx, msgp, sizeof(long) + msgsz); + int res = REAL(msgsnd)(msqid, msgp, msgsz, msgflg); + return res; +} + +INTERCEPTOR(SSIZE_T, msgrcv, int msqid, void *msgp, SIZE_T msgsz, + long msgtyp, int msgflg) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, msgrcv, msqid, msgp, msgsz, msgtyp, msgflg); + SSIZE_T len = REAL(msgrcv)(msqid, msgp, msgsz, msgtyp, msgflg); + if (len != -1) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msgp, sizeof(long) + len); + return len; +} + +#define INIT_SYSMSG \ + COMMON_INTERCEPT_FUNCTION(msgsnd); \ + COMMON_INTERCEPT_FUNCTION(msgrcv); +#else +#define INIT_SYSMSG +#endif + #if SANITIZER_INTERCEPT_GETPEERNAME INTERCEPTOR(int, getpeername, int sockfd, void *addr, unsigned *addrlen) { void *ctx; @@ -6432,12 +6475,11 @@ INTERCEPTOR(SSIZE_T, recvfrom, int fd, void *buf, SIZE_T len, int flags, if (srcaddr) srcaddr_sz = *addrlen; (void)srcaddr_sz; // prevent "set but not used" warning SSIZE_T res = REAL(recvfrom)(fd, buf, len, flags, srcaddr, addrlen); - if (res > 0) { + if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, Min((SIZE_T)res, len)); - if (srcaddr) - COMMON_INTERCEPTOR_INITIALIZE_RANGE(srcaddr, - Min((SIZE_T)*addrlen, srcaddr_sz)); - } + if (res >= 0 && srcaddr) + COMMON_INTERCEPTOR_INITIALIZE_RANGE(srcaddr, + Min((SIZE_T)*addrlen, srcaddr_sz)); return res; } #define INIT_RECV_RECVFROM \ @@ -7275,23 +7317,26 @@ INTERCEPTOR(int, setttyentpath, char *path) { #endif #if SANITIZER_INTERCEPT_PROTOENT -INTERCEPTOR(struct __sanitizer_protoent *, getprotoent) { - void *ctx; - COMMON_INTERCEPTOR_ENTER(ctx, getprotoent); - struct __sanitizer_protoent *p = REAL(getprotoent)(); - if (p) { - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); +static void write_protoent(void *ctx, struct __sanitizer_protoent *p) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1); - SIZE_T pp_size = 1; // One handles the trailing \0 + SIZE_T pp_size = 1; // One handles the trailing \0 - for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size) - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1); + for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1); - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases, - pp_size * sizeof(char **)); - } + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases, + pp_size * sizeof(char **)); +} + +INTERCEPTOR(struct __sanitizer_protoent *, getprotoent) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getprotoent); + struct __sanitizer_protoent *p = REAL(getprotoent)(); + if (p) + write_protoent(ctx, p); return p; } @@ -7301,19 +7346,8 @@ INTERCEPTOR(struct __sanitizer_protoent *, getprotobyname, const char *name) { if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); struct __sanitizer_protoent *p = REAL(getprotobyname)(name); - if (p) { - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); - - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1); - - SIZE_T pp_size = 1; // One handles the trailing \0 - - for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size) - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1); - - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases, - pp_size * sizeof(char **)); - } + if (p) + write_protoent(ctx, p); return p; } @@ -7321,19 +7355,8 @@ INTERCEPTOR(struct __sanitizer_protoent *, getprotobynumber, int proto) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getprotobynumber, proto); struct __sanitizer_protoent *p = REAL(getprotobynumber)(proto); - if (p) { - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); - - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1); - - SIZE_T pp_size = 1; // One handles the trailing \0 - - for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size) - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1); - - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases, - pp_size * sizeof(char **)); - } + if (p) + write_protoent(ctx, p); return p; } #define INIT_PROTOENT \ @@ -7344,6 +7367,58 @@ INTERCEPTOR(struct __sanitizer_protoent *, getprotobynumber, int proto) { #define INIT_PROTOENT #endif +#if SANITIZER_INTERCEPT_PROTOENT_R +INTERCEPTOR(int, getprotoent_r, struct __sanitizer_protoent *result_buf, + char *buf, SIZE_T buflen, struct __sanitizer_protoent **result) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getprotoent_r, result_buf, buf, buflen, + result); + int res = REAL(getprotoent_r)(result_buf, buf, buflen, result); + + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result); + if (!res && *result) + write_protoent(ctx, *result); + return res; +} + +INTERCEPTOR(int, getprotobyname_r, const char *name, + struct __sanitizer_protoent *result_buf, char *buf, SIZE_T buflen, + struct __sanitizer_protoent **result) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getprotobyname_r, name, result_buf, buf, + buflen, result); + if (name) + COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); + int res = REAL(getprotobyname_r)(name, result_buf, buf, buflen, result); + + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result); + if (!res && *result) + write_protoent(ctx, *result); + return res; +} + +INTERCEPTOR(int, getprotobynumber_r, int num, + struct __sanitizer_protoent *result_buf, char *buf, + SIZE_T buflen, struct __sanitizer_protoent **result) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getprotobynumber_r, num, result_buf, buf, + buflen, result); + int res = REAL(getprotobynumber_r)(num, result_buf, buf, buflen, result); + + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result); + if (!res && *result) + write_protoent(ctx, *result); + return res; +} + +#define INIT_PROTOENT_R \ + COMMON_INTERCEPT_FUNCTION(getprotoent_r); \ + COMMON_INTERCEPT_FUNCTION(getprotobyname_r); \ + COMMON_INTERCEPT_FUNCTION(getprotobynumber_r); +#else +#define INIT_PROTOENT_R +#endif + #if SANITIZER_INTERCEPT_NETENT INTERCEPTOR(struct __sanitizer_netent *, getnetent) { void *ctx; @@ -9731,6 +9806,59 @@ INTERCEPTOR(void, qsort_r, void *base, SIZE_T nmemb, SIZE_T size, #define INIT_QSORT_R #endif +#if SANITIZER_INTERCEPT_SIGALTSTACK +INTERCEPTOR(int, sigaltstack, void *ss, void *oss) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, sigaltstack, ss, oss); + int r = REAL(sigaltstack)(ss, oss); + if (r == 0 && oss != nullptr) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oss, struct_stack_t_sz); + } + return r; +} +#define INIT_SIGALTSTACK COMMON_INTERCEPT_FUNCTION(sigaltstack) +#else +#define INIT_SIGALTSTACK +#endif + +#if SANITIZER_INTERCEPT_UNAME +INTERCEPTOR(int, uname, struct utsname *utsname) { +#if SANITIZER_LINUX + if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) + return internal_uname(utsname); +#endif + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, uname, utsname); + int res = REAL(uname)(utsname); + if (!res) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, utsname, + __sanitizer::struct_utsname_sz); + return res; +} +#define INIT_UNAME COMMON_INTERCEPT_FUNCTION(uname) +#else +#define INIT_UNAME +#endif + +#if SANITIZER_INTERCEPT___XUNAME +// FreeBSD's <sys/utsname.h> define uname() as +// static __inline int uname(struct utsname *name) { +// return __xuname(SYS_NMLN, (void*)name); +// } +INTERCEPTOR(int, __xuname, int size, void *utsname) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, __xuname, size, utsname); + int res = REAL(__xuname)(size, utsname); + if (!res) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, utsname, + __sanitizer::struct_utsname_sz); + return res; +} +#define INIT___XUNAME COMMON_INTERCEPT_FUNCTION(__xuname) +#else +#define INIT___XUNAME +#endif + #include "sanitizer_common_interceptors_netbsd_compat.inc" static void InitializeCommonInterceptors() { @@ -9804,6 +9932,7 @@ static void InitializeCommonInterceptors() { INIT_FGETGRENT_R; INIT_SETPWENT; INIT_CLOCK_GETTIME; + INIT_CLOCK_GETCPUCLOCKID; INIT_GETITIMER; INIT_TIME; INIT_GLOB; @@ -9830,6 +9959,7 @@ static void InitializeCommonInterceptors() { INIT_SENDMSG; INIT_RECVMMSG; INIT_SENDMMSG; + INIT_SYSMSG; INIT_GETPEERNAME; INIT_IOCTL; INIT_INET_ATON; @@ -9993,6 +10123,7 @@ static void InitializeCommonInterceptors() { INIT_STRMODE; INIT_TTYENT; INIT_PROTOENT; + INIT_PROTOENT_R; INIT_NETENT; INIT_GETMNTINFO; INIT_MI_VECTOR_HASH; @@ -10036,6 +10167,9 @@ static void InitializeCommonInterceptors() { INIT_GETENTROPY; INIT_QSORT; INIT_QSORT_R; + INIT_SIGALTSTACK; + INIT_UNAME; + INIT___XUNAME; INIT___PRINTF_CHK; } diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp index 27d6a177760..0c918ebb4a9 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp @@ -30,7 +30,7 @@ SANITIZER_WEAK_ATTRIBUTE StackDepotStats *StackDepotGetStats() { return nullptr; } -void BackgroundThread(void *arg) { +void *BackgroundThread(void *arg) { const uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb; const uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb; const bool heap_profile = common_flags()->heap_profile; @@ -129,6 +129,16 @@ void SetSandboxingCallback(void (*f)()) { sandboxing_callback = f; } +uptr ReservedAddressRange::InitAligned(uptr size, uptr align, + const char *name) { + CHECK(IsPowerOfTwo(align)); + if (align <= GetPageSizeCached()) + return Init(size, name); + uptr start = Init(size + align, name); + start += align - (start & (align - 1)); + return start; +} + } // namespace __sanitizer SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify, diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc index 31ff48cfd2c..532ac9ead34 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc @@ -2885,6 +2885,23 @@ POST_SYSCALL(getrandom)(long res, void *buf, uptr count, long flags) { POST_WRITE(buf, res); } } + +PRE_SYSCALL(sigaltstack)(const void *ss, void *oss) { + if (ss != nullptr) { + PRE_READ(ss, struct_stack_t_sz); + } + if (oss != nullptr) { + PRE_WRITE(oss, struct_stack_t_sz); + } +} + +POST_SYSCALL(sigaltstack)(long res, void *ss, void *oss) { + if (res == 0) { + if (oss != nullptr) { + POST_WRITE(oss, struct_stack_t_sz); + } + } +} } // extern "C" #undef PRE_SYSCALL diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp index f18cee66b84..a52db08433e 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp @@ -27,15 +27,15 @@ #include "sanitizer_platform.h" #if SANITIZER_FUCHSIA +#include <zircon/process.h> +#include <zircon/sanitizer.h> +#include <zircon/syscalls.h> + #include "sanitizer_atomic.h" #include "sanitizer_common.h" #include "sanitizer_internal_defs.h" #include "sanitizer_symbolizer_fuchsia.h" -#include <zircon/process.h> -#include <zircon/sanitizer.h> -#include <zircon/syscalls.h> - using namespace __sanitizer; namespace __sancov { @@ -82,7 +82,8 @@ class TracePcGuardController final { void TracePcGuard(u32 *guard, uptr pc) { atomic_uint32_t *guard_ptr = reinterpret_cast<atomic_uint32_t *>(guard); u32 idx = atomic_exchange(guard_ptr, 0, memory_order_relaxed); - if (idx > 0) array_[idx] = pc; + if (idx > 0) + array_[idx] = pc; } void Dump() { @@ -140,6 +141,10 @@ class TracePcGuardController final { internal_getpid()); _zx_object_set_property(vmo_, ZX_PROP_NAME, vmo_name_, internal_strlen(vmo_name_)); + uint64_t size = DataSize(); + status = _zx_object_set_property(vmo_, ZX_PROP_VMO_CONTENT_SIZE, &size, + sizeof(size)); + CHECK_EQ(status, ZX_OK); // Map the largest possible view we might need into the VMO. Later // we might need to increase the VMO's size before we can use larger @@ -172,6 +177,10 @@ class TracePcGuardController final { zx_status_t status = _zx_vmo_set_size(vmo_, DataSize()); CHECK_EQ(status, ZX_OK); + uint64_t size = DataSize(); + status = _zx_object_set_property(vmo_, ZX_PROP_VMO_CONTENT_SIZE, &size, + sizeof(size)); + CHECK_EQ(status, ZX_OK); return first_index; } @@ -204,13 +213,15 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(const uptr *pcs, } SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32 *guard) { - if (!*guard) return; + if (!*guard) + return; __sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1); } SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init, u32 *start, u32 *end) { - if (start == end || *start) return; + if (start == end || *start) + return; __sancov::pc_guard_controller.InitTracePcGuard(start, end); } diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_coverage_interface.inc b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_coverage_interface.inc index 7beeff7e8af..d7ab0c3d98c 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_coverage_interface.inc +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_coverage_interface.inc @@ -29,4 +29,5 @@ INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard_init) INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_indir) INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_switch) INTERFACE_WEAK_FUNCTION(__sanitizer_cov_8bit_counters_init) +INTERFACE_WEAK_FUNCTION(__sanitizer_cov_bool_flag_init) INTERFACE_WEAK_FUNCTION(__sanitizer_cov_pcs_init) diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp index 6a75792f926..73ebeb5fa14 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp @@ -207,6 +207,7 @@ SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {} SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {} SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {} SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_8bit_counters_init, void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_bool_flag_init, void) {} SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, void) {} } // extern "C" // Weak definition for code instrumented with -fsanitize-coverage=stack-depth diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_file.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_file.h index 4a78a0e0ac8..26681f0493d 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_file.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_file.h @@ -87,8 +87,8 @@ bool IsAbsolutePath(const char *path); // The child process will close all fds after STDERR_FILENO // before passing control to a program. pid_t StartSubprocess(const char *filename, const char *const argv[], - fd_t stdin_fd = kInvalidFd, fd_t stdout_fd = kInvalidFd, - fd_t stderr_fd = kInvalidFd); + const char *const envp[], fd_t stdin_fd = kInvalidFd, + fd_t stdout_fd = kInvalidFd, fd_t stderr_fd = kInvalidFd); // Checks if specified process is still running bool IsProcessRunning(pid_t pid); // Waits for the process to finish and returns its exit code. diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_freebsd.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_freebsd.h index 64cb21f1c3d..82b227eab6d 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_freebsd.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_freebsd.h @@ -19,11 +19,11 @@ // x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in // 32-bit mode. #if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) -# include <osreldate.h> -# if __FreeBSD_version <= 902001 // v9.2 -# include <link.h> -# include <sys/param.h> -# include <ucontext.h> +#include <osreldate.h> +#if __FreeBSD_version <= 902001 // v9.2 +#include <link.h> +#include <sys/param.h> +#include <ucontext.h> namespace __sanitizer { @@ -68,8 +68,8 @@ typedef struct __xmcontext { } xmcontext_t; typedef struct __xucontext { - sigset_t uc_sigmask; - xmcontext_t uc_mcontext; + sigset_t uc_sigmask; + xmcontext_t uc_mcontext; struct __ucontext *uc_link; stack_t uc_stack; @@ -122,15 +122,16 @@ struct xdl_phdr_info { void *dlpi_tls_data; }; -typedef int (*__xdl_iterate_hdr_callback)(struct xdl_phdr_info*, size_t, void*); -typedef int xdl_iterate_phdr_t(__xdl_iterate_hdr_callback, void*); +typedef int (*__xdl_iterate_hdr_callback)(struct xdl_phdr_info *, size_t, + void *); +typedef int xdl_iterate_phdr_t(__xdl_iterate_hdr_callback, void *); #define xdl_iterate_phdr(callback, param) \ - (((xdl_iterate_phdr_t*) dl_iterate_phdr)((callback), (param))) + (((xdl_iterate_phdr_t *)dl_iterate_phdr)((callback), (param))) } // namespace __sanitizer -# endif // __FreeBSD_version <= 902001 +#endif // __FreeBSD_version <= 902001 #endif // SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) #endif // SANITIZER_FREEBSD_H diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp index 6e2c6137f0c..6d1ad794677 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp @@ -66,6 +66,10 @@ uptr internal_getpid() { return pid; } +int internal_dlinfo(void *handle, int request, void *p) { + UNIMPLEMENTED(); +} + uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); } tid_t GetTid() { return GetThreadSelf(); } diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.h index 5a2ad32b411..96f9cde7ef1 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.h @@ -18,12 +18,18 @@ #include "sanitizer_common.h" #include <zircon/sanitizer.h> +#include <zircon/syscalls/object.h> namespace __sanitizer { extern uptr MainThreadStackBase, MainThreadStackSize; extern sanitizer_shadow_bounds_t ShadowBounds; +struct MemoryMappingLayoutData { + InternalMmapVector<zx_info_maps_t> data; + size_t current; // Current index into the vector. +}; + } // namespace __sanitizer #endif // SANITIZER_FUCHSIA diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc index d0cc4da9755..576807ea3a6 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc @@ -446,9 +446,6 @@ static void ioctl_table_fill() { _(STICIO_STOPQ, NONE, 0); /* Entries from file: dev/usb/ukyopon.h */ _(UKYOPON_IDENTIFY, WRITE, struct_ukyopon_identify_sz); - /* Entries from file: dev/usb/urio.h */ - _(URIO_SEND_COMMAND, READWRITE, struct_urio_command_sz); - _(URIO_RECV_COMMAND, READWRITE, struct_urio_command_sz); /* Entries from file: dev/usb/usb.h */ _(USB_REQUEST, READWRITE, struct_usb_ctl_request_sz); _(USB_SETDEBUG, READ, sizeof(int)); @@ -1405,6 +1402,9 @@ static void ioctl_table_fill() { /* Entries from file: dev/filemon/filemon.h (compat <= 9.99.26) */ _(FILEMON_SET_FD, READWRITE, sizeof(int)); _(FILEMON_SET_PID, READWRITE, sizeof(int)); + /* Entries from file: dev/usb/urio.h (compat <= 9.99.43) */ + _(URIO_SEND_COMMAND, READWRITE, struct_urio_command_sz); + _(URIO_RECV_COMMAND, READWRITE, struct_urio_command_sz); #undef _ } // NOLINT diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_interface_internal.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_interface_internal.h index c110eff130f..be8023e9e16 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_interface_internal.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_interface_internal.h @@ -109,8 +109,10 @@ extern "C" { __sanitizer::u32*); SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void __sanitizer_cov_8bit_counters_init(); - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_cov_pcs_init(); + SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void + __sanitizer_cov_bool_flag_init(); + SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void + __sanitizer_cov_pcs_init(); } // extern "C" #endif // SANITIZER_INTERFACE_INTERNAL_H diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_libc.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_libc.h index 3d5db35d68b..ec0a6ded009 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_libc.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_libc.h @@ -72,6 +72,8 @@ unsigned int internal_sleep(unsigned int seconds); uptr internal_getpid(); uptr internal_getppid(); +int internal_dlinfo(void *handle, int request, void *p); + // Threading uptr internal_sched_yield(); diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp index 84453f1bd30..470f4b70f05 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp @@ -26,7 +26,7 @@ #include "sanitizer_placement_new.h" #include "sanitizer_procmaps.h" -#if SANITIZER_LINUX +#if SANITIZER_LINUX && !SANITIZER_GO #include <asm/param.h> #endif @@ -552,13 +552,14 @@ const char *GetEnv(const char *name) { #endif } -#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && !SANITIZER_OPENBSD +#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && !SANITIZER_OPENBSD && \ + !SANITIZER_GO extern "C" { SANITIZER_WEAK_ATTRIBUTE extern void *__libc_stack_end; } #endif -#if !SANITIZER_GO && !SANITIZER_FREEBSD && !SANITIZER_NETBSD && \ +#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && \ !SANITIZER_OPENBSD static void ReadNullSepFileToArray(const char *path, char ***arr, int arr_size) { @@ -604,16 +605,21 @@ static void GetArgsAndEnv(char ***argv, char ***envp) { #else // SANITIZER_FREEBSD #if !SANITIZER_GO if (&__libc_stack_end) { -#endif // !SANITIZER_GO uptr* stack_end = (uptr*)__libc_stack_end; - int argc = *stack_end; + // Normally argc can be obtained from *stack_end, however, on ARM glibc's + // _start clobbers it: + // https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/arm/start.S;hb=refs/heads/release/2.31/master#l75 + // Do not special-case ARM and infer argc from argv everywhere. + int argc = 0; + while (stack_end[argc + 1]) argc++; *argv = (char**)(stack_end + 1); *envp = (char**)(stack_end + argc + 2); -#if !SANITIZER_GO } else { +#endif // !SANITIZER_GO static const int kMaxArgv = 2000, kMaxEnvp = 2000; ReadNullSepFileToArray("/proc/self/cmdline", argv, kMaxArgv); ReadNullSepFileToArray("/proc/self/environ", envp, kMaxEnvp); +#if !SANITIZER_GO } #endif // !SANITIZER_GO #endif // SANITIZER_FREEBSD @@ -735,6 +741,14 @@ uptr internal_getppid() { return internal_syscall(SYSCALL(getppid)); } +int internal_dlinfo(void *handle, int request, void *p) { +#if SANITIZER_FREEBSD + return dlinfo(handle, request, p); +#else + UNIMPLEMENTED(); +#endif +} + uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) { #if SANITIZER_FREEBSD return internal_syscall(SYSCALL(getdirentries), fd, (uptr)dirp, count, NULL); @@ -847,9 +861,8 @@ uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set, #else __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set; __sanitizer_kernel_sigset_t *k_oldset = (__sanitizer_kernel_sigset_t *)oldset; - return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how, - (uptr)&k_set->sig[0], (uptr)&k_oldset->sig[0], - sizeof(__sanitizer_kernel_sigset_t)); + return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how, (uptr)k_set, + (uptr)k_oldset, sizeof(__sanitizer_kernel_sigset_t)); #endif } @@ -1006,9 +1019,8 @@ static uptr GetKernelAreaSize() { // is modified (e.g. under schroot) so check this as well. struct utsname uname_info; int pers = personality(0xffffffffUL); - if (!(pers & PER_MASK) - && uname(&uname_info) == 0 - && internal_strstr(uname_info.machine, "64")) + if (!(pers & PER_MASK) && internal_uname(&uname_info) == 0 && + internal_strstr(uname_info.machine, "64")) return 0; #endif // SANITIZER_ANDROID @@ -1063,7 +1075,8 @@ uptr GetMaxUserVirtualAddress() { #if !SANITIZER_ANDROID uptr GetPageSize() { -#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__)) +#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__)) && \ + defined(EXEC_PAGESIZE) return EXEC_PAGESIZE; #elif SANITIZER_FREEBSD || SANITIZER_NETBSD // Use sysctl as sysconf can trigger interceptors internally. @@ -1619,6 +1632,12 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, } #endif // defined(__x86_64__) && SANITIZER_LINUX +#if SANITIZER_LINUX +int internal_uname(struct utsname *buf) { + return internal_syscall(SYSCALL(uname), buf); +} +#endif + #if SANITIZER_ANDROID #if __ANDROID_API__ < 21 extern "C" __attribute__((weak)) int dl_iterate_phdr( @@ -1701,7 +1720,7 @@ HandleSignalMode GetHandleSignalMode(int signum) { } #if !SANITIZER_GO -void *internal_start_thread(void(*func)(void *arg), void *arg) { +void *internal_start_thread(void *(*func)(void *arg), void *arg) { // Start the thread with signals blocked, otherwise it can steal user signals. __sanitizer_sigset_t set, old; internal_sigfillset(&set); @@ -1712,7 +1731,7 @@ void *internal_start_thread(void(*func)(void *arg), void *arg) { #endif internal_sigprocmask(SIG_SETMASK, &set, &old); void *th; - real_pthread_create(&th, nullptr, (void*(*)(void *arg))func, arg); + real_pthread_create(&th, nullptr, func, arg); internal_sigprocmask(SIG_SETMASK, &old, nullptr); return th; } @@ -1721,7 +1740,7 @@ void internal_join_thread(void *th) { real_pthread_join(th, nullptr); } #else -void *internal_start_thread(void (*func)(void *), void *arg) { return 0; } +void *internal_start_thread(void *(*func)(void *), void *arg) { return 0; } void internal_join_thread(void *th) {} #endif @@ -1846,6 +1865,105 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { #endif u32 instr = *(u32 *)pc; return (instr >> 21) & 1 ? WRITE: READ; +#elif defined(__riscv) + unsigned long pc = ucontext->uc_mcontext.__gregs[REG_PC]; + unsigned faulty_instruction = *(uint16_t *)pc; + +#if defined(__riscv_compressed) + if ((faulty_instruction & 0x3) != 0x3) { // it's a compressed instruction + // set op_bits to the instruction bits [1, 0, 15, 14, 13] + unsigned op_bits = + ((faulty_instruction & 0x3) << 3) | (faulty_instruction >> 13); + unsigned rd = faulty_instruction & 0xF80; // bits 7-11, inclusive + switch (op_bits) { + case 0b10'010: // c.lwsp (rd != x0) +#if __riscv_xlen == 64 + case 0b10'011: // c.ldsp (rd != x0) +#endif + return rd ? SignalContext::READ : SignalContext::UNKNOWN; + case 0b00'010: // c.lw +#if __riscv_flen >= 32 && __riscv_xlen == 32 + case 0b10'011: // c.flwsp +#endif +#if __riscv_flen >= 32 || __riscv_xlen == 64 + case 0b00'011: // c.flw / c.ld +#endif +#if __riscv_flen == 64 + case 0b00'001: // c.fld + case 0b10'001: // c.fldsp +#endif + return SignalContext::READ; + case 0b00'110: // c.sw + case 0b10'110: // c.swsp +#if __riscv_flen >= 32 || __riscv_xlen == 64 + case 0b00'111: // c.fsw / c.sd + case 0b10'111: // c.fswsp / c.sdsp +#endif +#if __riscv_flen == 64 + case 0b00'101: // c.fsd + case 0b10'101: // c.fsdsp +#endif + return SignalContext::WRITE; + default: + return SignalContext::UNKNOWN; + } + } +#endif + + unsigned opcode = faulty_instruction & 0x7f; // lower 7 bits + unsigned funct3 = (faulty_instruction >> 12) & 0x7; // bits 12-14, inclusive + switch (opcode) { + case 0b0000011: // loads + switch (funct3) { + case 0b000: // lb + case 0b001: // lh + case 0b010: // lw +#if __riscv_xlen == 64 + case 0b011: // ld +#endif + case 0b100: // lbu + case 0b101: // lhu + return SignalContext::READ; + default: + return SignalContext::UNKNOWN; + } + case 0b0100011: // stores + switch (funct3) { + case 0b000: // sb + case 0b001: // sh + case 0b010: // sw +#if __riscv_xlen == 64 + case 0b011: // sd +#endif + return SignalContext::WRITE; + default: + return SignalContext::UNKNOWN; + } +#if __riscv_flen >= 32 + case 0b0000111: // floating-point loads + switch (funct3) { + case 0b010: // flw +#if __riscv_flen == 64 + case 0b011: // fld +#endif + return SignalContext::READ; + default: + return SignalContext::UNKNOWN; + } + case 0b0100111: // floating-point stores + switch (funct3) { + case 0b010: // fsw +#if __riscv_flen == 64 + case 0b011: // fsd +#endif + return SignalContext::WRITE; + default: + return SignalContext::UNKNOWN; + } +#endif + default: + return SignalContext::UNKNOWN; + } #else (void)ucontext; return UNKNOWN; // FIXME: Implement. @@ -1924,13 +2042,13 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { # ifndef REG_EBP # define REG_EBP 6 // REG_FP # endif -# ifndef REG_ESP -# define REG_ESP 17 // REG_SP +# ifndef REG_UESP +# define REG_UESP 17 // REG_SP # endif # endif *pc = ucontext->uc_mcontext.gregs[REG_EIP]; *bp = ucontext->uc_mcontext.gregs[REG_EBP]; - *sp = ucontext->uc_mcontext.gregs[REG_ESP]; + *sp = ucontext->uc_mcontext.gregs[REG_UESP]; # endif #elif defined(__powerpc__) || defined(__powerpc64__) ucontext_t *ucontext = (ucontext_t*)context; @@ -2011,7 +2129,9 @@ void CheckASLR() { } if (UNLIKELY(paxflags & CTL_PROC_PAXFLAGS_ASLR)) { - Printf("This sanitizer is not compatible with enabled ASLR\n"); + Printf("This sanitizer is not compatible with enabled ASLR.\n" + "To disable ASLR, please run \"paxctl +a %s\" and try again.\n", + GetArgv()[0]); Die(); } #elif SANITIZER_PPC64V2 @@ -2090,7 +2210,7 @@ void CheckNoDeepBind(const char *filename, int flag) { if (flag & RTLD_DEEPBIND) { Report( "You are trying to dlopen a %s shared library with RTLD_DEEPBIND flag" - " which is incompatibe with sanitizer runtime " + " which is incompatible with sanitizer runtime " "(see https://github.com/google/sanitizers/issues/611 for details" "). If you want to run %s library under sanitizers please remove " "RTLD_DEEPBIND from dlopen flags.\n", diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_linux.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_linux.h index c28347ad963..c162d1ca5d2 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_linux.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_linux.h @@ -25,6 +25,7 @@ #include "sanitizer_posix.h" struct link_map; // Opaque type returned by dlopen(). +struct utsname; namespace __sanitizer { // Dirent structure for getdents(). Note that this structure is different from @@ -65,6 +66,7 @@ void internal_sigdelset(__sanitizer_sigset_t *set, int signum); uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr); #endif +int internal_uname(struct utsname *buf); #elif SANITIZER_FREEBSD void internal_sigdelset(__sanitizer_sigset_t *set, int signum); #elif SANITIZER_NETBSD diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp index edbe8402808..4d17c9686e4 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp @@ -35,6 +35,10 @@ #include <sys/resource.h> #include <syslog.h> +#if !defined(ElfW) +#define ElfW(type) Elf_##type +#endif + #if SANITIZER_FREEBSD #include <pthread_np.h> #include <osreldate.h> diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp index 9e3b4f13a43..bb2f5b5f9f7 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp @@ -15,14 +15,15 @@ #if SANITIZER_LINUX && SANITIZER_S390 -#include "sanitizer_libc.h" -#include "sanitizer_linux.h" - +#include <dlfcn.h> #include <errno.h> #include <sys/syscall.h> #include <sys/utsname.h> #include <unistd.h> +#include "sanitizer_libc.h" +#include "sanitizer_linux.h" + namespace __sanitizer { // --------------- sanitizer_libc.h @@ -123,7 +124,7 @@ static bool FixedCVE_2016_2143() { struct utsname buf; unsigned int major, minor, patch = 0; // This should never fail, but just in case... - if (uname(&buf)) + if (internal_uname(&buf)) return false; const char *ptr = buf.release; major = internal_simple_strtoll(ptr, &ptr, 10); diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp index ea4bd02aa92..7a3dfbcc276 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp @@ -27,9 +27,9 @@ #include "sanitizer_flags.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" -#include "sanitizer_placement_new.h" #include "sanitizer_platform_limits_posix.h" #include "sanitizer_procmaps.h" +#include "sanitizer_ptrauth.h" #if !SANITIZER_IOS #include <crt_externs.h> // for _NSGetEnviron @@ -208,6 +208,10 @@ uptr internal_getpid() { return getpid(); } +int internal_dlinfo(void *handle, int request, void *p) { + UNIMPLEMENTED(); +} + int internal_sigaction(int signum, const void *act, void *oldact) { return sigaction(signum, (const struct sigaction *)act, (struct sigaction *)oldact); @@ -242,7 +246,8 @@ int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp, (size_t)newlen); } -static fd_t internal_spawn_impl(const char *argv[], pid_t *pid) { +static fd_t internal_spawn_impl(const char *argv[], const char *envp[], + pid_t *pid) { fd_t master_fd = kInvalidFd; fd_t slave_fd = kInvalidFd; @@ -298,8 +303,8 @@ static fd_t internal_spawn_impl(const char *argv[], pid_t *pid) { // posix_spawn char **argv_casted = const_cast<char **>(argv); - char **env = GetEnviron(); - res = posix_spawn(pid, argv[0], &acts, &attrs, argv_casted, env); + char **envp_casted = const_cast<char **>(envp); + res = posix_spawn(pid, argv[0], &acts, &attrs, argv_casted, envp_casted); if (res != 0) return kInvalidFd; // Disable echo in the new terminal, disable CR. @@ -316,7 +321,7 @@ static fd_t internal_spawn_impl(const char *argv[], pid_t *pid) { return fd; } -fd_t internal_spawn(const char *argv[], pid_t *pid) { +fd_t internal_spawn(const char *argv[], const char *envp[], pid_t *pid) { // The client program may close its stdin and/or stdout and/or stderr thus // allowing open/posix_openpt to reuse file descriptors 0, 1 or 2. In this // case the communication is broken if either the parent or the child tries to @@ -331,7 +336,7 @@ fd_t internal_spawn(const char *argv[], pid_t *pid) { break; } - fd_t fd = internal_spawn_impl(argv, pid); + fd_t fd = internal_spawn_impl(argv, envp, pid); for (; count > 0; count--) { internal_close(low_fds[count]); @@ -382,7 +387,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, // pthread_get_stacksize_np() returns an incorrect stack size for the main // thread on Mavericks. See // https://github.com/google/sanitizers/issues/261 - if ((GetMacosVersion() >= MACOS_VERSION_MAVERICKS) && at_initialization && + if ((GetMacosAlignedVersion() >= MacosVersion(10, 9)) && at_initialization && stacksize == (1 << 19)) { struct rlimit rl; CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0); @@ -601,68 +606,59 @@ HandleSignalMode GetHandleSignalMode(int signum) { return result; } -MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED; +// This corresponds to Triple::getMacOSXVersion() in the Clang driver. +static MacosVersion GetMacosAlignedVersionInternal() { + u16 kernel_major = GetDarwinKernelVersion().major; + // Darwin 0-3 -> unsupported + // Darwin 4-19 -> macOS 10.x + // Darwin 20+ -> macOS 11+ + CHECK_GE(kernel_major, 4); + u16 major, minor; + if (kernel_major < 20) { + major = 10; + minor = kernel_major - 4; + } else { + major = 11 + kernel_major - 20; + minor = 0; + } + return MacosVersion(major, minor); +} -MacosVersion GetMacosVersionInternal() { - int mib[2] = { CTL_KERN, KERN_OSRELEASE }; - char version[100]; - uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]); - for (uptr i = 0; i < maxlen; i++) version[i] = '\0'; - // Get the version length. - CHECK_NE(internal_sysctl(mib, 2, 0, &len, 0, 0), -1); - CHECK_LT(len, maxlen); - CHECK_NE(internal_sysctl(mib, 2, version, &len, 0, 0), -1); +static_assert(sizeof(MacosVersion) == sizeof(atomic_uint32_t::Type), + "MacosVersion cache size"); +static atomic_uint32_t cached_macos_version; - // Expect <major>.<minor>(.<patch>) - CHECK_GE(len, 3); - const char *p = version; - int major = internal_simple_strtoll(p, &p, /*base=*/10); - if (*p != '.') return MACOS_VERSION_UNKNOWN; - p += 1; - int minor = internal_simple_strtoll(p, &p, /*base=*/10); - if (*p != '.') return MACOS_VERSION_UNKNOWN; - - switch (major) { - case 9: return MACOS_VERSION_LEOPARD; - case 10: return MACOS_VERSION_SNOW_LEOPARD; - case 11: return MACOS_VERSION_LION; - case 12: return MACOS_VERSION_MOUNTAIN_LION; - case 13: return MACOS_VERSION_MAVERICKS; - case 14: return MACOS_VERSION_YOSEMITE; - case 15: return MACOS_VERSION_EL_CAPITAN; - case 16: return MACOS_VERSION_SIERRA; - case 17: - // Not a typo, 17.5 Darwin Kernel Version maps to High Sierra 10.13.4. - if (minor >= 5) - return MACOS_VERSION_HIGH_SIERRA_DOT_RELEASE_4; - return MACOS_VERSION_HIGH_SIERRA; - case 18: return MACOS_VERSION_MOJAVE; - case 19: return MACOS_VERSION_CATALINA; - default: - if (major < 9) return MACOS_VERSION_UNKNOWN; - return MACOS_VERSION_UNKNOWN_NEWER; +MacosVersion GetMacosAlignedVersion() { + atomic_uint32_t::Type result = + atomic_load(&cached_macos_version, memory_order_acquire); + if (!result) { + MacosVersion version = GetMacosAlignedVersionInternal(); + result = *reinterpret_cast<atomic_uint32_t::Type *>(&version); + atomic_store(&cached_macos_version, result, memory_order_release); } + return *reinterpret_cast<MacosVersion *>(&result); } -MacosVersion GetMacosVersion() { - atomic_uint32_t *cache = - reinterpret_cast<atomic_uint32_t*>(&cached_macos_version); - MacosVersion result = - static_cast<MacosVersion>(atomic_load(cache, memory_order_acquire)); - if (result == MACOS_VERSION_UNINITIALIZED) { - result = GetMacosVersionInternal(); - atomic_store(cache, result, memory_order_release); - } - return result; +void ParseVersion(const char *vers, u16 *major, u16 *minor) { + // Format: <major>.<minor>.<patch>\0 + CHECK_GE(internal_strlen(vers), 5); + const char *p = vers; + *major = internal_simple_strtoll(p, &p, /*base=*/10); + CHECK_EQ(*p, '.'); + p += 1; + *minor = internal_simple_strtoll(p, &p, /*base=*/10); } -bool PlatformHasDifferentMemcpyAndMemmove() { - // On OS X 10.7 memcpy() and memmove() are both resolved - // into memmove$VARIANT$sse42. - // See also https://github.com/google/sanitizers/issues/34. - // TODO(glider): need to check dynamically that memcpy() and memmove() are - // actually the same function. - return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD; +DarwinKernelVersion GetDarwinKernelVersion() { + char buf[100]; + size_t len = sizeof(buf); + int res = internal_sysctlbyname("kern.osrelease", buf, &len, nullptr, 0); + CHECK_EQ(res, 0); + + u16 major, minor; + ParseVersion(buf, &major, &minor); + + return DarwinKernelVersion(major, minor); } uptr GetRSS() { @@ -677,13 +673,13 @@ uptr GetRSS() { return info.resident_size; } -void *internal_start_thread(void(*func)(void *arg), void *arg) { +void *internal_start_thread(void *(*func)(void *arg), void *arg) { // Start the thread with signals blocked, otherwise it can steal user signals. __sanitizer_sigset_t set, old; internal_sigfillset(&set); internal_sigprocmask(SIG_SETMASK, &set, &old); pthread_t th; - pthread_create(&th, 0, (void*(*)(void *arg))func, arg); + pthread_create(&th, 0, func, arg); internal_sigprocmask(SIG_SETMASK, &old, 0); return th; } @@ -711,7 +707,7 @@ void LogFullErrorReport(const char *buffer) { #if !SANITIZER_GO // Log with os_trace. This will make it into the crash log. #if SANITIZER_OS_TRACE - if (GetMacosVersion() >= MACOS_VERSION_YOSEMITE) { + if (GetMacosAlignedVersion() >= MacosVersion(10, 10)) { // os_trace requires the message (format parameter) to be a string literal. if (internal_strncmp(SanitizerToolName, "AddressSanitizer", sizeof("AddressSanitizer") - 1) == 0) @@ -760,16 +756,24 @@ bool SignalContext::IsTrueFaultingAddress() const { return si->si_signo == SIGSEGV && si->si_code != 0; } +#if defined(__aarch64__) && defined(arm_thread_state64_get_sp) + #define AARCH64_GET_REG(r) \ + (uptr)ptrauth_strip( \ + (void *)arm_thread_state64_get_##r(ucontext->uc_mcontext->__ss), 0) +#else + #define AARCH64_GET_REG(r) ucontext->uc_mcontext->__ss.__##r +#endif + static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { ucontext_t *ucontext = (ucontext_t*)context; # if defined(__aarch64__) - *pc = ucontext->uc_mcontext->__ss.__pc; + *pc = AARCH64_GET_REG(pc); # if defined(__IPHONE_8_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0 - *bp = ucontext->uc_mcontext->__ss.__fp; + *bp = AARCH64_GET_REG(fp); # else - *bp = ucontext->uc_mcontext->__ss.__lr; + *bp = AARCH64_GET_REG(lr); # endif - *sp = ucontext->uc_mcontext->__ss.__sp; + *sp = AARCH64_GET_REG(sp); # elif defined(__x86_64__) *pc = ucontext->uc_mcontext->__ss.__rip; *bp = ucontext->uc_mcontext->__ss.__rbp; @@ -787,13 +791,16 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { # endif } -void SignalContext::InitPcSpBp() { GetPcSpBp(context, &pc, &sp, &bp); } +void SignalContext::InitPcSpBp() { + addr = (uptr)ptrauth_strip((void *)addr, 0); + GetPcSpBp(context, &pc, &sp, &bp); +} void InitializePlatformEarly() { - // Only use xnu_fast_mmap when on x86_64 and the OS supports it. + // Only use xnu_fast_mmap when on x86_64 and the kernel supports it. use_xnu_fast_mmap = #if defined(__x86_64__) - GetMacosVersion() >= MACOS_VERSION_HIGH_SIERRA_DOT_RELEASE_4; + GetDarwinKernelVersion() >= DarwinKernelVersion(17, 5); #else false; #endif @@ -847,9 +854,9 @@ bool DyldNeedsEnvVariable() { if (!&dyldVersionNumber) return true; // If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if // DYLD_INSERT_LIBRARIES is not set. However, checking OS version via - // GetMacosVersion() doesn't work for the simulator. Let's instead check - // `dyldVersionNumber`, which is exported by dyld, against a known version - // number from the first OS release where this appeared. + // GetMacosAlignedVersion() doesn't work for the simulator. Let's instead + // check `dyldVersionNumber`, which is exported by dyld, against a known + // version number from the first OS release where this appeared. return dyldVersionNumber < kMinDyldVersionWithAutoInterposition; } @@ -1123,6 +1130,8 @@ void SignalContext::DumpAllRegisters(void *context) { ucontext_t *ucontext = (ucontext_t*)context; # define DUMPREG64(r) \ Printf("%s = 0x%016llx ", #r, ucontext->uc_mcontext->__ss.__ ## r); +# define DUMPREGA64(r) \ + Printf(" %s = 0x%016llx ", #r, AARCH64_GET_REG(r)); # define DUMPREG32(r) \ Printf("%s = 0x%08x ", #r, ucontext->uc_mcontext->__ss.__ ## r); # define DUMPREG_(r) Printf(" "); DUMPREG(r); @@ -1148,7 +1157,7 @@ void SignalContext::DumpAllRegisters(void *context) { DUMPREG(x[16]); DUMPREG(x[17]); DUMPREG(x[18]); DUMPREG(x[19]); Printf("\n"); DUMPREG(x[20]); DUMPREG(x[21]); DUMPREG(x[22]); DUMPREG(x[23]); Printf("\n"); DUMPREG(x[24]); DUMPREG(x[25]); DUMPREG(x[26]); DUMPREG(x[27]); Printf("\n"); - DUMPREG(x[28]); DUMPREG___(fp); DUMPREG___(lr); DUMPREG___(sp); Printf("\n"); + DUMPREG(x[28]); DUMPREGA64(fp); DUMPREGA64(lr); DUMPREGA64(sp); Printf("\n"); # elif defined(__arm__) # define DUMPREG(r) DUMPREG32(r) DUMPREG_(r[0]); DUMPREG_(r[1]); DUMPREG_(r[2]); DUMPREG_(r[3]); Printf("\n"); diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_mac.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_mac.h index 2257883084e..90ecff4815c 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_mac.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_mac.h @@ -30,25 +30,32 @@ struct MemoryMappingLayoutData { bool current_instrumented; }; -enum MacosVersion { - MACOS_VERSION_UNINITIALIZED = 0, - MACOS_VERSION_UNKNOWN, - MACOS_VERSION_LEOPARD, - MACOS_VERSION_SNOW_LEOPARD, - MACOS_VERSION_LION, - MACOS_VERSION_MOUNTAIN_LION, - MACOS_VERSION_MAVERICKS, - MACOS_VERSION_YOSEMITE, - MACOS_VERSION_EL_CAPITAN, - MACOS_VERSION_SIERRA, - MACOS_VERSION_HIGH_SIERRA, - MACOS_VERSION_HIGH_SIERRA_DOT_RELEASE_4, - MACOS_VERSION_MOJAVE, - MACOS_VERSION_CATALINA, - MACOS_VERSION_UNKNOWN_NEWER +template <typename VersionType> +struct VersionBase { + u16 major; + u16 minor; + + VersionBase(u16 major, u16 minor) : major(major), minor(minor) {} + + bool operator==(const VersionType &other) const { + return major == other.major && minor == other.minor; + } + bool operator>=(const VersionType &other) const { + return major > other.major || + (major == other.major && minor >= other.minor); + } +}; + +struct MacosVersion : VersionBase<MacosVersion> { + MacosVersion(u16 major, u16 minor) : VersionBase(major, minor) {} +}; + +struct DarwinKernelVersion : VersionBase<DarwinKernelVersion> { + DarwinKernelVersion(u16 major, u16 minor) : VersionBase(major, minor) {} }; -MacosVersion GetMacosVersion(); +MacosVersion GetMacosAlignedVersion(); +DarwinKernelVersion GetDarwinKernelVersion(); char **GetEnviron(); diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc index 11adbe5c25b..647bcdfe105 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc @@ -61,12 +61,10 @@ INTERCEPTOR(malloc_zone_t *, malloc_create_zone, malloc_zone_t *new_zone = (malloc_zone_t *)p; internal_memcpy(new_zone, &sanitizer_zone, sizeof(sanitizer_zone)); new_zone->zone_name = NULL; // The name will be changed anyway. - if (GetMacosVersion() >= MACOS_VERSION_LION) { - // Prevent the client app from overwriting the zone contents. - // Library functions that need to modify the zone will set PROT_WRITE on it. - // This matches the behavior of malloc_create_zone() on OSX 10.7 and higher. - mprotect(new_zone, allocated_size, PROT_READ); - } + // Prevent the client app from overwriting the zone contents. + // Library functions that need to modify the zone will set PROT_WRITE on it. + // This matches the behavior of malloc_create_zone() on OSX 10.7 and higher. + mprotect(new_zone, allocated_size, PROT_READ); // We're explicitly *NOT* registering the zone. return new_zone; } @@ -75,11 +73,9 @@ INTERCEPTOR(void, malloc_destroy_zone, malloc_zone_t *zone) { COMMON_MALLOC_ENTER(); // We don't need to do anything here. We're not registering new zones, so we // don't to unregister. Just un-mprotect and free() the zone. - if (GetMacosVersion() >= MACOS_VERSION_LION) { - uptr page_size = GetPageSizeCached(); - uptr allocated_size = RoundUpTo(sizeof(sanitizer_zone), page_size); - mprotect(zone, allocated_size, PROT_READ | PROT_WRITE); - } + uptr page_size = GetPageSizeCached(); + uptr allocated_size = RoundUpTo(sizeof(sanitizer_zone), page_size); + mprotect(zone, allocated_size, PROT_READ | PROT_WRITE); if (zone->zone_name) { COMMON_MALLOC_FREE((void *)zone->zone_name); } diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp index 49a951e04b3..d9aff51d8ae 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp @@ -265,6 +265,11 @@ uptr internal_getppid() { return _REAL(getppid); } +int internal_dlinfo(void *handle, int request, void *p) { + DEFINE__REAL(int, dlinfo, void *a, int b, void *c); + return _REAL(dlinfo, handle, request, p); +} + uptr internal_getdents(fd_t fd, void *dirp, unsigned int count) { DEFINE__REAL(int, __getdents30, int a, void *b, size_t c); return _REAL(__getdents30, fd, dirp, count); diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform.h index c68bfa25875..f0b1e04d1dd 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform.h @@ -132,6 +132,12 @@ # define SANITIZER_X32 0 #endif +#if defined(__i386__) || defined(_M_IX86) +# define SANITIZER_I386 1 +#else +# define SANITIZER_I386 0 +#endif + #if defined(__mips__) # define SANITIZER_MIPS 1 # if defined(__mips64) diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h index 4cc69af1241..e28bb937ae8 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h @@ -240,6 +240,7 @@ (SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_CLOCK_GETTIME \ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX || SI_SOLARIS) +#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID SI_LINUX #define SANITIZER_INTERCEPT_GETITIMER SI_POSIX #define SANITIZER_INTERCEPT_TIME SI_POSIX #define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID || SI_SOLARIS @@ -270,6 +271,7 @@ #define SANITIZER_INTERCEPT_SENDMSG SI_POSIX #define SANITIZER_INTERCEPT_RECVMMSG SI_LINUX #define SANITIZER_INTERCEPT_SENDMMSG SI_LINUX +#define SANITIZER_INTERCEPT_SYSMSG SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_GETPEERNAME SI_POSIX #define SANITIZER_INTERCEPT_IOCTL SI_POSIX #define SANITIZER_INTERCEPT_INET_ATON SI_POSIX @@ -341,7 +343,7 @@ #define SANITIZER_INTERCEPT_STATFS \ (SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_STATFS64 \ - ((SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID) + (((SI_MAC && !TARGET_CPU_ARM64) && !SI_IOS) || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_STATVFS \ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID @@ -544,7 +546,8 @@ #define SANITIZER_INTERCEPT_FGETLN (SI_NETBSD || SI_FREEBSD) #define SANITIZER_INTERCEPT_STRMODE (SI_NETBSD || SI_FREEBSD) #define SANITIZER_INTERCEPT_TTYENT SI_NETBSD -#define SANITIZER_INTERCEPT_PROTOENT SI_NETBSD +#define SANITIZER_INTERCEPT_PROTOENT (SI_NETBSD || SI_LINUX) +#define SANITIZER_INTERCEPT_PROTOENT_R (SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_NETENT SI_NETBSD #define SANITIZER_INTERCEPT_SETVBUF (SI_NETBSD || SI_FREEBSD || \ SI_LINUX || SI_MAC) @@ -594,7 +597,13 @@ #define SANITIZER_INTERCEPT_PTHREAD_ATFORK SI_NETBSD #define SANITIZER_INTERCEPT_GETENTROPY SI_FREEBSD #define SANITIZER_INTERCEPT_QSORT \ - (SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS) + (SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID) #define SANITIZER_INTERCEPT_QSORT_R (SI_LINUX && !SI_ANDROID) +// sigaltstack on i386 macOS cannot be intercepted due to setjmp() +// calling it and assuming that it does not clobber registers. +#define SANITIZER_INTERCEPT_SIGALTSTACK \ + (SI_POSIX && !(SANITIZER_MAC && SANITIZER_I386)) +#define SANITIZER_INTERCEPT_UNAME (SI_POSIX && !SI_FREEBSD) +#define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD #endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp index 2d1bb1a12da..dcc6c71c07d 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp @@ -15,342 +15,348 @@ #if SANITIZER_FREEBSD +#include <sys/capsicum.h> +#include <sys/consio.h> +#include <sys/filio.h> +#include <sys/ipc.h> +#include <sys/kbio.h> +#include <sys/link_elf.h> +#include <sys/mman.h> +#include <sys/mount.h> +#include <sys/mqueue.h> +#include <sys/msg.h> +#include <sys/mtio.h> +#include <sys/ptrace.h> +#include <sys/resource.h> +#include <sys/signal.h> +#include <sys/socket.h> +#include <sys/sockio.h> +#include <sys/soundcard.h> +#include <sys/stat.h> +#include <sys/statvfs.h> +#include <sys/time.h> +#include <sys/timeb.h> +#include <sys/times.h> +#include <sys/timespec.h> +#include <sys/types.h> +#include <sys/ucontext.h> +#include <sys/utsname.h> +// #include <arpa/inet.h> +#include <net/ethernet.h> +#include <net/if.h> +#include <net/ppp_defs.h> +#include <net/route.h> +#include <netdb.h> +#include <netinet/in.h> +#include <netinet/ip_mroute.h> +// #include <dirent.h> -#include <fts.h> +#include <dlfcn.h> #include <fstab.h> +#include <fts.h> +#include <glob.h> #include <grp.h> +#include <ifaddrs.h> #include <limits.h> -#include <net/if.h> -#include <netdb.h> #include <poll.h> #include <pthread.h> #include <pwd.h> #include <regex.h> +#include <semaphore.h> #include <signal.h> #include <stddef.h> -#include <sys/mman.h> -#include <sys/capsicum.h> -#include <sys/resource.h> -#include <sys/stat.h> -#include <sys/time.h> -#include <sys/times.h> -#include <sys/types.h> -#include <sys/utsname.h> -#include <termios.h> -#include <time.h> - -#include <net/route.h> -#include <sys/mount.h> -#include <sys/sockio.h> -#include <sys/socket.h> -#include <sys/filio.h> -#include <sys/signal.h> -#include <sys/timespec.h> -#include <sys/timeb.h> -#include <sys/mqueue.h> -#include <sys/msg.h> -#include <sys/ipc.h> -#include <sys/msg.h> -#include <sys/statvfs.h> -#include <sys/soundcard.h> -#include <sys/mtio.h> -#include <sys/consio.h> -#include <sys/kbio.h> -#include <sys/link_elf.h> -#include <netinet/ip_mroute.h> -#include <netinet/in.h> -#include <net/ethernet.h> -#include <net/ppp_defs.h> -#include <glob.h> #include <stdio.h> #include <stringlist.h> #include <term.h> +#include <termios.h> +#include <time.h> +#include <utime.h> #include <utmpx.h> -#include <wchar.h> #include <vis.h> +#include <wchar.h> +#include <wordexp.h> #define _KERNEL // to declare 'shminfo' structure -# include <sys/shm.h> +#include <sys/shm.h> #undef _KERNEL #undef INLINE // to avoid clashes with sanitizers' definitions #undef IOC_DIRMASK -# include <utime.h> -# include <sys/ptrace.h> -# include <semaphore.h> - -#include <ifaddrs.h> -#include <sys/ucontext.h> -#include <wordexp.h> - // Include these after system headers to avoid name clashes and ambiguities. #include "sanitizer_internal_defs.h" +#include "sanitizer_libc.h" #include "sanitizer_platform_limits_freebsd.h" namespace __sanitizer { - unsigned struct_cap_rights_sz = sizeof(cap_rights_t); - unsigned struct_utsname_sz = sizeof(struct utsname); - unsigned struct_stat_sz = sizeof(struct stat); - unsigned struct_rusage_sz = sizeof(struct rusage); - unsigned struct_tm_sz = sizeof(struct tm); - unsigned struct_passwd_sz = sizeof(struct passwd); - unsigned struct_group_sz = sizeof(struct group); - unsigned siginfo_t_sz = sizeof(siginfo_t); - unsigned struct_sigaction_sz = sizeof(struct sigaction); - unsigned struct_itimerval_sz = sizeof(struct itimerval); - unsigned pthread_t_sz = sizeof(pthread_t); - unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t); - unsigned pthread_cond_t_sz = sizeof(pthread_cond_t); - unsigned pid_t_sz = sizeof(pid_t); - unsigned timeval_sz = sizeof(timeval); - unsigned uid_t_sz = sizeof(uid_t); - unsigned gid_t_sz = sizeof(gid_t); - unsigned fpos_t_sz = sizeof(fpos_t); - unsigned mbstate_t_sz = sizeof(mbstate_t); - unsigned sigset_t_sz = sizeof(sigset_t); - unsigned struct_timezone_sz = sizeof(struct timezone); - unsigned struct_tms_sz = sizeof(struct tms); - unsigned struct_sigevent_sz = sizeof(struct sigevent); - unsigned struct_sched_param_sz = sizeof(struct sched_param); - unsigned struct_statfs_sz = sizeof(struct statfs); - unsigned struct_sockaddr_sz = sizeof(struct sockaddr); - unsigned ucontext_t_sz = sizeof(ucontext_t); - unsigned struct_rlimit_sz = sizeof(struct rlimit); - unsigned struct_timespec_sz = sizeof(struct timespec); - unsigned struct_utimbuf_sz = sizeof(struct utimbuf); - unsigned struct_itimerspec_sz = sizeof(struct itimerspec); - unsigned struct_timeb_sz = sizeof(struct timeb); - unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds); - unsigned struct_mq_attr_sz = sizeof(struct mq_attr); - unsigned struct_statvfs_sz = sizeof(struct statvfs); - unsigned struct_shminfo_sz = sizeof(struct shminfo); - unsigned struct_shm_info_sz = sizeof(struct shm_info); - unsigned struct_regmatch_sz = sizeof(regmatch_t); - unsigned struct_regex_sz = sizeof(regex_t); - unsigned struct_fstab_sz = sizeof(struct fstab); - unsigned struct_FTS_sz = sizeof(FTS); - unsigned struct_FTSENT_sz = sizeof(FTSENT); - unsigned struct_StringList_sz = sizeof(StringList); - - const uptr sig_ign = (uptr)SIG_IGN; - const uptr sig_dfl = (uptr)SIG_DFL; - const uptr sig_err = (uptr)SIG_ERR; - const uptr sa_siginfo = (uptr)SA_SIGINFO; - - int shmctl_ipc_stat = (int)IPC_STAT; - int shmctl_ipc_info = (int)IPC_INFO; - int shmctl_shm_info = (int)SHM_INFO; - int shmctl_shm_stat = (int)SHM_STAT; - unsigned struct_utmpx_sz = sizeof(struct utmpx); - - int map_fixed = MAP_FIXED; - - int af_inet = (int)AF_INET; - int af_inet6 = (int)AF_INET6; - - uptr __sanitizer_in_addr_sz(int af) { - if (af == AF_INET) - return sizeof(struct in_addr); - else if (af == AF_INET6) - return sizeof(struct in6_addr); - else - return 0; - } - - unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); - int glob_nomatch = GLOB_NOMATCH; - int glob_altdirfunc = GLOB_ALTDIRFUNC; - - unsigned path_max = PATH_MAX; - - // ioctl arguments - unsigned struct_ifreq_sz = sizeof(struct ifreq); - unsigned struct_termios_sz = sizeof(struct termios); - unsigned struct_winsize_sz = sizeof(struct winsize); +void *__sanitizer_get_link_map_by_dlopen_handle(void *handle) { + void *p = nullptr; + return internal_dlinfo(handle, RTLD_DI_LINKMAP, &p) == 0 ? p : nullptr; +} + +unsigned struct_cap_rights_sz = sizeof(cap_rights_t); +unsigned struct_utsname_sz = sizeof(struct utsname); +unsigned struct_stat_sz = sizeof(struct stat); +unsigned struct_rusage_sz = sizeof(struct rusage); +unsigned struct_tm_sz = sizeof(struct tm); +unsigned struct_passwd_sz = sizeof(struct passwd); +unsigned struct_group_sz = sizeof(struct group); +unsigned siginfo_t_sz = sizeof(siginfo_t); +unsigned struct_sigaction_sz = sizeof(struct sigaction); +unsigned struct_stack_t_sz = sizeof(stack_t); +unsigned struct_itimerval_sz = sizeof(struct itimerval); +unsigned pthread_t_sz = sizeof(pthread_t); +unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t); +unsigned pthread_cond_t_sz = sizeof(pthread_cond_t); +unsigned pid_t_sz = sizeof(pid_t); +unsigned timeval_sz = sizeof(timeval); +unsigned uid_t_sz = sizeof(uid_t); +unsigned gid_t_sz = sizeof(gid_t); +unsigned fpos_t_sz = sizeof(fpos_t); +unsigned mbstate_t_sz = sizeof(mbstate_t); +unsigned sigset_t_sz = sizeof(sigset_t); +unsigned struct_timezone_sz = sizeof(struct timezone); +unsigned struct_tms_sz = sizeof(struct tms); +unsigned struct_sigevent_sz = sizeof(struct sigevent); +unsigned struct_sched_param_sz = sizeof(struct sched_param); +unsigned struct_statfs_sz = sizeof(struct statfs); +unsigned struct_sockaddr_sz = sizeof(struct sockaddr); +unsigned ucontext_t_sz = sizeof(ucontext_t); +unsigned struct_rlimit_sz = sizeof(struct rlimit); +unsigned struct_timespec_sz = sizeof(struct timespec); +unsigned struct_utimbuf_sz = sizeof(struct utimbuf); +unsigned struct_itimerspec_sz = sizeof(struct itimerspec); +unsigned struct_timeb_sz = sizeof(struct timeb); +unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds); +unsigned struct_mq_attr_sz = sizeof(struct mq_attr); +unsigned struct_statvfs_sz = sizeof(struct statvfs); +unsigned struct_shminfo_sz = sizeof(struct shminfo); +unsigned struct_shm_info_sz = sizeof(struct shm_info); +unsigned struct_regmatch_sz = sizeof(regmatch_t); +unsigned struct_regex_sz = sizeof(regex_t); +unsigned struct_fstab_sz = sizeof(struct fstab); +unsigned struct_FTS_sz = sizeof(FTS); +unsigned struct_FTSENT_sz = sizeof(FTSENT); +unsigned struct_StringList_sz = sizeof(StringList); + +const uptr sig_ign = (uptr)SIG_IGN; +const uptr sig_dfl = (uptr)SIG_DFL; +const uptr sig_err = (uptr)SIG_ERR; +const uptr sa_siginfo = (uptr)SA_SIGINFO; + +int shmctl_ipc_stat = (int)IPC_STAT; +int shmctl_ipc_info = (int)IPC_INFO; +int shmctl_shm_info = (int)SHM_INFO; +int shmctl_shm_stat = (int)SHM_STAT; +unsigned struct_utmpx_sz = sizeof(struct utmpx); + +int map_fixed = MAP_FIXED; + +int af_inet = (int)AF_INET; +int af_inet6 = (int)AF_INET6; + +uptr __sanitizer_in_addr_sz(int af) { + if (af == AF_INET) + return sizeof(struct in_addr); + else if (af == AF_INET6) + return sizeof(struct in6_addr); + else + return 0; +} + +unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); +int glob_nomatch = GLOB_NOMATCH; +int glob_altdirfunc = GLOB_ALTDIRFUNC; + +unsigned path_max = PATH_MAX; + +// ioctl arguments +unsigned struct_ifreq_sz = sizeof(struct ifreq); +unsigned struct_termios_sz = sizeof(struct termios); +unsigned struct_winsize_sz = sizeof(struct winsize); #if SOUND_VERSION >= 0x040000 - unsigned struct_copr_buffer_sz = 0; - unsigned struct_copr_debug_buf_sz = 0; - unsigned struct_copr_msg_sz = 0; +unsigned struct_copr_buffer_sz = 0; +unsigned struct_copr_debug_buf_sz = 0; +unsigned struct_copr_msg_sz = 0; #else - unsigned struct_copr_buffer_sz = sizeof(struct copr_buffer); - unsigned struct_copr_debug_buf_sz = sizeof(struct copr_debug_buf); - unsigned struct_copr_msg_sz = sizeof(struct copr_msg); +unsigned struct_copr_buffer_sz = sizeof(struct copr_buffer); +unsigned struct_copr_debug_buf_sz = sizeof(struct copr_debug_buf); +unsigned struct_copr_msg_sz = sizeof(struct copr_msg); #endif - unsigned struct_midi_info_sz = sizeof(struct midi_info); - unsigned struct_mtget_sz = sizeof(struct mtget); - unsigned struct_mtop_sz = sizeof(struct mtop); - unsigned struct_sbi_instrument_sz = sizeof(struct sbi_instrument); - unsigned struct_seq_event_rec_sz = sizeof(struct seq_event_rec); - unsigned struct_synth_info_sz = sizeof(struct synth_info); - unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info); - unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats); - unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req); - unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req); - const unsigned long __sanitizer_bufsiz = BUFSIZ; - - const unsigned IOCTL_NOT_PRESENT = 0; - - unsigned IOCTL_FIOASYNC = FIOASYNC; - unsigned IOCTL_FIOCLEX = FIOCLEX; - unsigned IOCTL_FIOGETOWN = FIOGETOWN; - unsigned IOCTL_FIONBIO = FIONBIO; - unsigned IOCTL_FIONCLEX = FIONCLEX; - unsigned IOCTL_FIOSETOWN = FIOSETOWN; - unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI; - unsigned IOCTL_SIOCATMARK = SIOCATMARK; - unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI; - unsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR; - unsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR; - unsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF; - unsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR; - unsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS; - unsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC; - unsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU; - unsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK; - unsigned IOCTL_SIOCGPGRP = SIOCGPGRP; - unsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR; - unsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR; - unsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR; - unsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS; - unsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC; - unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU; - unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK; - unsigned IOCTL_SIOCSPGRP = SIOCSPGRP; - unsigned IOCTL_TIOCCONS = TIOCCONS; - unsigned IOCTL_TIOCEXCL = TIOCEXCL; - unsigned IOCTL_TIOCGETD = TIOCGETD; - unsigned IOCTL_TIOCGPGRP = TIOCGPGRP; - unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ; - unsigned IOCTL_TIOCMBIC = TIOCMBIC; - unsigned IOCTL_TIOCMBIS = TIOCMBIS; - unsigned IOCTL_TIOCMGET = TIOCMGET; - unsigned IOCTL_TIOCMSET = TIOCMSET; - unsigned IOCTL_TIOCNOTTY = TIOCNOTTY; - unsigned IOCTL_TIOCNXCL = TIOCNXCL; - unsigned IOCTL_TIOCOUTQ = TIOCOUTQ; - unsigned IOCTL_TIOCPKT = TIOCPKT; - unsigned IOCTL_TIOCSCTTY = TIOCSCTTY; - unsigned IOCTL_TIOCSETD = TIOCSETD; - unsigned IOCTL_TIOCSPGRP = TIOCSPGRP; - unsigned IOCTL_TIOCSTI = TIOCSTI; - unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ; - unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT; - unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT; - unsigned IOCTL_MTIOCGET = MTIOCGET; - unsigned IOCTL_MTIOCTOP = MTIOCTOP; - unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE; - unsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS; - unsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK; - unsigned IOCTL_SNDCTL_DSP_POST = SNDCTL_DSP_POST; - unsigned IOCTL_SNDCTL_DSP_RESET = SNDCTL_DSP_RESET; - unsigned IOCTL_SNDCTL_DSP_SETFMT = SNDCTL_DSP_SETFMT; - unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT = SNDCTL_DSP_SETFRAGMENT; - unsigned IOCTL_SNDCTL_DSP_SPEED = SNDCTL_DSP_SPEED; - unsigned IOCTL_SNDCTL_DSP_STEREO = SNDCTL_DSP_STEREO; - unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE = SNDCTL_DSP_SUBDIVIDE; - unsigned IOCTL_SNDCTL_DSP_SYNC = SNDCTL_DSP_SYNC; - unsigned IOCTL_SNDCTL_FM_4OP_ENABLE = SNDCTL_FM_4OP_ENABLE; - unsigned IOCTL_SNDCTL_FM_LOAD_INSTR = SNDCTL_FM_LOAD_INSTR; - unsigned IOCTL_SNDCTL_MIDI_INFO = SNDCTL_MIDI_INFO; - unsigned IOCTL_SNDCTL_MIDI_PRETIME = SNDCTL_MIDI_PRETIME; - unsigned IOCTL_SNDCTL_SEQ_CTRLRATE = SNDCTL_SEQ_CTRLRATE; - unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT = SNDCTL_SEQ_GETINCOUNT; - unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT = SNDCTL_SEQ_GETOUTCOUNT; - unsigned IOCTL_SNDCTL_SEQ_NRMIDIS = SNDCTL_SEQ_NRMIDIS; - unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS = SNDCTL_SEQ_NRSYNTHS; - unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND = SNDCTL_SEQ_OUTOFBAND; - unsigned IOCTL_SNDCTL_SEQ_PANIC = SNDCTL_SEQ_PANIC; - unsigned IOCTL_SNDCTL_SEQ_PERCMODE = SNDCTL_SEQ_PERCMODE; - unsigned IOCTL_SNDCTL_SEQ_RESET = SNDCTL_SEQ_RESET; - unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES = SNDCTL_SEQ_RESETSAMPLES; - unsigned IOCTL_SNDCTL_SEQ_SYNC = SNDCTL_SEQ_SYNC; - unsigned IOCTL_SNDCTL_SEQ_TESTMIDI = SNDCTL_SEQ_TESTMIDI; - unsigned IOCTL_SNDCTL_SEQ_THRESHOLD = SNDCTL_SEQ_THRESHOLD; - unsigned IOCTL_SNDCTL_SYNTH_INFO = SNDCTL_SYNTH_INFO; - unsigned IOCTL_SNDCTL_SYNTH_MEMAVL = SNDCTL_SYNTH_MEMAVL; - unsigned IOCTL_SNDCTL_TMR_CONTINUE = SNDCTL_TMR_CONTINUE; - unsigned IOCTL_SNDCTL_TMR_METRONOME = SNDCTL_TMR_METRONOME; - unsigned IOCTL_SNDCTL_TMR_SELECT = SNDCTL_TMR_SELECT; - unsigned IOCTL_SNDCTL_TMR_SOURCE = SNDCTL_TMR_SOURCE; - unsigned IOCTL_SNDCTL_TMR_START = SNDCTL_TMR_START; - unsigned IOCTL_SNDCTL_TMR_STOP = SNDCTL_TMR_STOP; - unsigned IOCTL_SNDCTL_TMR_TEMPO = SNDCTL_TMR_TEMPO; - unsigned IOCTL_SNDCTL_TMR_TIMEBASE = SNDCTL_TMR_TIMEBASE; - unsigned IOCTL_SOUND_MIXER_READ_ALTPCM = SOUND_MIXER_READ_ALTPCM; - unsigned IOCTL_SOUND_MIXER_READ_BASS = SOUND_MIXER_READ_BASS; - unsigned IOCTL_SOUND_MIXER_READ_CAPS = SOUND_MIXER_READ_CAPS; - unsigned IOCTL_SOUND_MIXER_READ_CD = SOUND_MIXER_READ_CD; - unsigned IOCTL_SOUND_MIXER_READ_DEVMASK = SOUND_MIXER_READ_DEVMASK; - unsigned IOCTL_SOUND_MIXER_READ_ENHANCE = SOUND_MIXER_READ_ENHANCE; - unsigned IOCTL_SOUND_MIXER_READ_IGAIN = SOUND_MIXER_READ_IGAIN; - unsigned IOCTL_SOUND_MIXER_READ_IMIX = SOUND_MIXER_READ_IMIX; - unsigned IOCTL_SOUND_MIXER_READ_LINE = SOUND_MIXER_READ_LINE; - unsigned IOCTL_SOUND_MIXER_READ_LINE1 = SOUND_MIXER_READ_LINE1; - unsigned IOCTL_SOUND_MIXER_READ_LINE2 = SOUND_MIXER_READ_LINE2; - unsigned IOCTL_SOUND_MIXER_READ_LINE3 = SOUND_MIXER_READ_LINE3; - unsigned IOCTL_SOUND_MIXER_READ_LOUD = SOUND_MIXER_READ_LOUD; - unsigned IOCTL_SOUND_MIXER_READ_MIC = SOUND_MIXER_READ_MIC; - unsigned IOCTL_SOUND_MIXER_READ_MUTE = SOUND_MIXER_READ_MUTE; - unsigned IOCTL_SOUND_MIXER_READ_OGAIN = SOUND_MIXER_READ_OGAIN; - unsigned IOCTL_SOUND_MIXER_READ_PCM = SOUND_MIXER_READ_PCM; - unsigned IOCTL_SOUND_MIXER_READ_RECLEV = SOUND_MIXER_READ_RECLEV; - unsigned IOCTL_SOUND_MIXER_READ_RECMASK = SOUND_MIXER_READ_RECMASK; - unsigned IOCTL_SOUND_MIXER_READ_RECSRC = SOUND_MIXER_READ_RECSRC; - unsigned IOCTL_SOUND_MIXER_READ_SPEAKER = SOUND_MIXER_READ_SPEAKER; - unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS = SOUND_MIXER_READ_STEREODEVS; - unsigned IOCTL_SOUND_MIXER_READ_SYNTH = SOUND_MIXER_READ_SYNTH; - unsigned IOCTL_SOUND_MIXER_READ_TREBLE = SOUND_MIXER_READ_TREBLE; - unsigned IOCTL_SOUND_MIXER_READ_VOLUME = SOUND_MIXER_READ_VOLUME; - unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM = SOUND_MIXER_WRITE_ALTPCM; - unsigned IOCTL_SOUND_MIXER_WRITE_BASS = SOUND_MIXER_WRITE_BASS; - unsigned IOCTL_SOUND_MIXER_WRITE_CD = SOUND_MIXER_WRITE_CD; - unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE = SOUND_MIXER_WRITE_ENHANCE; - unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN = SOUND_MIXER_WRITE_IGAIN; - unsigned IOCTL_SOUND_MIXER_WRITE_IMIX = SOUND_MIXER_WRITE_IMIX; - unsigned IOCTL_SOUND_MIXER_WRITE_LINE = SOUND_MIXER_WRITE_LINE; - unsigned IOCTL_SOUND_MIXER_WRITE_LINE1 = SOUND_MIXER_WRITE_LINE1; - unsigned IOCTL_SOUND_MIXER_WRITE_LINE2 = SOUND_MIXER_WRITE_LINE2; - unsigned IOCTL_SOUND_MIXER_WRITE_LINE3 = SOUND_MIXER_WRITE_LINE3; - unsigned IOCTL_SOUND_MIXER_WRITE_LOUD = SOUND_MIXER_WRITE_LOUD; - unsigned IOCTL_SOUND_MIXER_WRITE_MIC = SOUND_MIXER_WRITE_MIC; - unsigned IOCTL_SOUND_MIXER_WRITE_MUTE = SOUND_MIXER_WRITE_MUTE; - unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN = SOUND_MIXER_WRITE_OGAIN; - unsigned IOCTL_SOUND_MIXER_WRITE_PCM = SOUND_MIXER_WRITE_PCM; - unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV = SOUND_MIXER_WRITE_RECLEV; - unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC = SOUND_MIXER_WRITE_RECSRC; - unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER = SOUND_MIXER_WRITE_SPEAKER; - unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH = SOUND_MIXER_WRITE_SYNTH; - unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE = SOUND_MIXER_WRITE_TREBLE; - unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME = SOUND_MIXER_WRITE_VOLUME; - unsigned IOCTL_VT_ACTIVATE = VT_ACTIVATE; - unsigned IOCTL_VT_GETMODE = VT_GETMODE; - unsigned IOCTL_VT_OPENQRY = VT_OPENQRY; - unsigned IOCTL_VT_RELDISP = VT_RELDISP; - unsigned IOCTL_VT_SETMODE = VT_SETMODE; - unsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE; - unsigned IOCTL_GIO_SCRNMAP = GIO_SCRNMAP; - unsigned IOCTL_KDDISABIO = KDDISABIO; - unsigned IOCTL_KDENABIO = KDENABIO; - unsigned IOCTL_KDGETLED = KDGETLED; - unsigned IOCTL_KDGETMODE = KDGETMODE; - unsigned IOCTL_KDGKBMODE = KDGKBMODE; - unsigned IOCTL_KDGKBTYPE = KDGKBTYPE; - unsigned IOCTL_KDMKTONE = KDMKTONE; - unsigned IOCTL_KDSETLED = KDSETLED; - unsigned IOCTL_KDSETMODE = KDSETMODE; - unsigned IOCTL_KDSKBMODE = KDSKBMODE; - unsigned IOCTL_KIOCSOUND = KIOCSOUND; - unsigned IOCTL_PIO_SCRNMAP = PIO_SCRNMAP; - unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE; - - const int si_SEGV_MAPERR = SEGV_MAPERR; - const int si_SEGV_ACCERR = SEGV_ACCERR; - const int unvis_valid = UNVIS_VALID; - const int unvis_validpush = UNVIS_VALIDPUSH; -} // namespace __sanitizer +unsigned struct_midi_info_sz = sizeof(struct midi_info); +unsigned struct_mtget_sz = sizeof(struct mtget); +unsigned struct_mtop_sz = sizeof(struct mtop); +unsigned struct_sbi_instrument_sz = sizeof(struct sbi_instrument); +unsigned struct_seq_event_rec_sz = sizeof(struct seq_event_rec); +unsigned struct_synth_info_sz = sizeof(struct synth_info); +unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info); +unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats); +unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req); +unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req); +const unsigned long __sanitizer_bufsiz = BUFSIZ; + +const unsigned IOCTL_NOT_PRESENT = 0; + +unsigned IOCTL_FIOASYNC = FIOASYNC; +unsigned IOCTL_FIOCLEX = FIOCLEX; +unsigned IOCTL_FIOGETOWN = FIOGETOWN; +unsigned IOCTL_FIONBIO = FIONBIO; +unsigned IOCTL_FIONCLEX = FIONCLEX; +unsigned IOCTL_FIOSETOWN = FIOSETOWN; +unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI; +unsigned IOCTL_SIOCATMARK = SIOCATMARK; +unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI; +unsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR; +unsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR; +unsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF; +unsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR; +unsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS; +unsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC; +unsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU; +unsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK; +unsigned IOCTL_SIOCGPGRP = SIOCGPGRP; +unsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR; +unsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR; +unsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR; +unsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS; +unsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC; +unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU; +unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK; +unsigned IOCTL_SIOCSPGRP = SIOCSPGRP; +unsigned IOCTL_TIOCCONS = TIOCCONS; +unsigned IOCTL_TIOCEXCL = TIOCEXCL; +unsigned IOCTL_TIOCGETD = TIOCGETD; +unsigned IOCTL_TIOCGPGRP = TIOCGPGRP; +unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ; +unsigned IOCTL_TIOCMBIC = TIOCMBIC; +unsigned IOCTL_TIOCMBIS = TIOCMBIS; +unsigned IOCTL_TIOCMGET = TIOCMGET; +unsigned IOCTL_TIOCMSET = TIOCMSET; +unsigned IOCTL_TIOCNOTTY = TIOCNOTTY; +unsigned IOCTL_TIOCNXCL = TIOCNXCL; +unsigned IOCTL_TIOCOUTQ = TIOCOUTQ; +unsigned IOCTL_TIOCPKT = TIOCPKT; +unsigned IOCTL_TIOCSCTTY = TIOCSCTTY; +unsigned IOCTL_TIOCSETD = TIOCSETD; +unsigned IOCTL_TIOCSPGRP = TIOCSPGRP; +unsigned IOCTL_TIOCSTI = TIOCSTI; +unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ; +unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT; +unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT; +unsigned IOCTL_MTIOCGET = MTIOCGET; +unsigned IOCTL_MTIOCTOP = MTIOCTOP; +unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE; +unsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS; +unsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK; +unsigned IOCTL_SNDCTL_DSP_POST = SNDCTL_DSP_POST; +unsigned IOCTL_SNDCTL_DSP_RESET = SNDCTL_DSP_RESET; +unsigned IOCTL_SNDCTL_DSP_SETFMT = SNDCTL_DSP_SETFMT; +unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT = SNDCTL_DSP_SETFRAGMENT; +unsigned IOCTL_SNDCTL_DSP_SPEED = SNDCTL_DSP_SPEED; +unsigned IOCTL_SNDCTL_DSP_STEREO = SNDCTL_DSP_STEREO; +unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE = SNDCTL_DSP_SUBDIVIDE; +unsigned IOCTL_SNDCTL_DSP_SYNC = SNDCTL_DSP_SYNC; +unsigned IOCTL_SNDCTL_FM_4OP_ENABLE = SNDCTL_FM_4OP_ENABLE; +unsigned IOCTL_SNDCTL_FM_LOAD_INSTR = SNDCTL_FM_LOAD_INSTR; +unsigned IOCTL_SNDCTL_MIDI_INFO = SNDCTL_MIDI_INFO; +unsigned IOCTL_SNDCTL_MIDI_PRETIME = SNDCTL_MIDI_PRETIME; +unsigned IOCTL_SNDCTL_SEQ_CTRLRATE = SNDCTL_SEQ_CTRLRATE; +unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT = SNDCTL_SEQ_GETINCOUNT; +unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT = SNDCTL_SEQ_GETOUTCOUNT; +unsigned IOCTL_SNDCTL_SEQ_NRMIDIS = SNDCTL_SEQ_NRMIDIS; +unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS = SNDCTL_SEQ_NRSYNTHS; +unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND = SNDCTL_SEQ_OUTOFBAND; +unsigned IOCTL_SNDCTL_SEQ_PANIC = SNDCTL_SEQ_PANIC; +unsigned IOCTL_SNDCTL_SEQ_PERCMODE = SNDCTL_SEQ_PERCMODE; +unsigned IOCTL_SNDCTL_SEQ_RESET = SNDCTL_SEQ_RESET; +unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES = SNDCTL_SEQ_RESETSAMPLES; +unsigned IOCTL_SNDCTL_SEQ_SYNC = SNDCTL_SEQ_SYNC; +unsigned IOCTL_SNDCTL_SEQ_TESTMIDI = SNDCTL_SEQ_TESTMIDI; +unsigned IOCTL_SNDCTL_SEQ_THRESHOLD = SNDCTL_SEQ_THRESHOLD; +unsigned IOCTL_SNDCTL_SYNTH_INFO = SNDCTL_SYNTH_INFO; +unsigned IOCTL_SNDCTL_SYNTH_MEMAVL = SNDCTL_SYNTH_MEMAVL; +unsigned IOCTL_SNDCTL_TMR_CONTINUE = SNDCTL_TMR_CONTINUE; +unsigned IOCTL_SNDCTL_TMR_METRONOME = SNDCTL_TMR_METRONOME; +unsigned IOCTL_SNDCTL_TMR_SELECT = SNDCTL_TMR_SELECT; +unsigned IOCTL_SNDCTL_TMR_SOURCE = SNDCTL_TMR_SOURCE; +unsigned IOCTL_SNDCTL_TMR_START = SNDCTL_TMR_START; +unsigned IOCTL_SNDCTL_TMR_STOP = SNDCTL_TMR_STOP; +unsigned IOCTL_SNDCTL_TMR_TEMPO = SNDCTL_TMR_TEMPO; +unsigned IOCTL_SNDCTL_TMR_TIMEBASE = SNDCTL_TMR_TIMEBASE; +unsigned IOCTL_SOUND_MIXER_READ_ALTPCM = SOUND_MIXER_READ_ALTPCM; +unsigned IOCTL_SOUND_MIXER_READ_BASS = SOUND_MIXER_READ_BASS; +unsigned IOCTL_SOUND_MIXER_READ_CAPS = SOUND_MIXER_READ_CAPS; +unsigned IOCTL_SOUND_MIXER_READ_CD = SOUND_MIXER_READ_CD; +unsigned IOCTL_SOUND_MIXER_READ_DEVMASK = SOUND_MIXER_READ_DEVMASK; +unsigned IOCTL_SOUND_MIXER_READ_ENHANCE = SOUND_MIXER_READ_ENHANCE; +unsigned IOCTL_SOUND_MIXER_READ_IGAIN = SOUND_MIXER_READ_IGAIN; +unsigned IOCTL_SOUND_MIXER_READ_IMIX = SOUND_MIXER_READ_IMIX; +unsigned IOCTL_SOUND_MIXER_READ_LINE = SOUND_MIXER_READ_LINE; +unsigned IOCTL_SOUND_MIXER_READ_LINE1 = SOUND_MIXER_READ_LINE1; +unsigned IOCTL_SOUND_MIXER_READ_LINE2 = SOUND_MIXER_READ_LINE2; +unsigned IOCTL_SOUND_MIXER_READ_LINE3 = SOUND_MIXER_READ_LINE3; +unsigned IOCTL_SOUND_MIXER_READ_LOUD = SOUND_MIXER_READ_LOUD; +unsigned IOCTL_SOUND_MIXER_READ_MIC = SOUND_MIXER_READ_MIC; +unsigned IOCTL_SOUND_MIXER_READ_MUTE = SOUND_MIXER_READ_MUTE; +unsigned IOCTL_SOUND_MIXER_READ_OGAIN = SOUND_MIXER_READ_OGAIN; +unsigned IOCTL_SOUND_MIXER_READ_PCM = SOUND_MIXER_READ_PCM; +unsigned IOCTL_SOUND_MIXER_READ_RECLEV = SOUND_MIXER_READ_RECLEV; +unsigned IOCTL_SOUND_MIXER_READ_RECMASK = SOUND_MIXER_READ_RECMASK; +unsigned IOCTL_SOUND_MIXER_READ_RECSRC = SOUND_MIXER_READ_RECSRC; +unsigned IOCTL_SOUND_MIXER_READ_SPEAKER = SOUND_MIXER_READ_SPEAKER; +unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS = SOUND_MIXER_READ_STEREODEVS; +unsigned IOCTL_SOUND_MIXER_READ_SYNTH = SOUND_MIXER_READ_SYNTH; +unsigned IOCTL_SOUND_MIXER_READ_TREBLE = SOUND_MIXER_READ_TREBLE; +unsigned IOCTL_SOUND_MIXER_READ_VOLUME = SOUND_MIXER_READ_VOLUME; +unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM = SOUND_MIXER_WRITE_ALTPCM; +unsigned IOCTL_SOUND_MIXER_WRITE_BASS = SOUND_MIXER_WRITE_BASS; +unsigned IOCTL_SOUND_MIXER_WRITE_CD = SOUND_MIXER_WRITE_CD; +unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE = SOUND_MIXER_WRITE_ENHANCE; +unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN = SOUND_MIXER_WRITE_IGAIN; +unsigned IOCTL_SOUND_MIXER_WRITE_IMIX = SOUND_MIXER_WRITE_IMIX; +unsigned IOCTL_SOUND_MIXER_WRITE_LINE = SOUND_MIXER_WRITE_LINE; +unsigned IOCTL_SOUND_MIXER_WRITE_LINE1 = SOUND_MIXER_WRITE_LINE1; +unsigned IOCTL_SOUND_MIXER_WRITE_LINE2 = SOUND_MIXER_WRITE_LINE2; +unsigned IOCTL_SOUND_MIXER_WRITE_LINE3 = SOUND_MIXER_WRITE_LINE3; +unsigned IOCTL_SOUND_MIXER_WRITE_LOUD = SOUND_MIXER_WRITE_LOUD; +unsigned IOCTL_SOUND_MIXER_WRITE_MIC = SOUND_MIXER_WRITE_MIC; +unsigned IOCTL_SOUND_MIXER_WRITE_MUTE = SOUND_MIXER_WRITE_MUTE; +unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN = SOUND_MIXER_WRITE_OGAIN; +unsigned IOCTL_SOUND_MIXER_WRITE_PCM = SOUND_MIXER_WRITE_PCM; +unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV = SOUND_MIXER_WRITE_RECLEV; +unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC = SOUND_MIXER_WRITE_RECSRC; +unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER = SOUND_MIXER_WRITE_SPEAKER; +unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH = SOUND_MIXER_WRITE_SYNTH; +unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE = SOUND_MIXER_WRITE_TREBLE; +unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME = SOUND_MIXER_WRITE_VOLUME; +unsigned IOCTL_VT_ACTIVATE = VT_ACTIVATE; +unsigned IOCTL_VT_GETMODE = VT_GETMODE; +unsigned IOCTL_VT_OPENQRY = VT_OPENQRY; +unsigned IOCTL_VT_RELDISP = VT_RELDISP; +unsigned IOCTL_VT_SETMODE = VT_SETMODE; +unsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE; +unsigned IOCTL_GIO_SCRNMAP = GIO_SCRNMAP; +unsigned IOCTL_KDDISABIO = KDDISABIO; +unsigned IOCTL_KDENABIO = KDENABIO; +unsigned IOCTL_KDGETLED = KDGETLED; +unsigned IOCTL_KDGETMODE = KDGETMODE; +unsigned IOCTL_KDGKBMODE = KDGKBMODE; +unsigned IOCTL_KDGKBTYPE = KDGKBTYPE; +unsigned IOCTL_KDMKTONE = KDMKTONE; +unsigned IOCTL_KDSETLED = KDSETLED; +unsigned IOCTL_KDSETMODE = KDSETMODE; +unsigned IOCTL_KDSKBMODE = KDSKBMODE; +unsigned IOCTL_KIOCSOUND = KIOCSOUND; +unsigned IOCTL_PIO_SCRNMAP = PIO_SCRNMAP; +unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE; + +const int si_SEGV_MAPERR = SEGV_MAPERR; +const int si_SEGV_ACCERR = SEGV_ACCERR; +const int unvis_valid = UNVIS_VALID; +const int unvis_validpush = UNVIS_VALIDPUSH; +} // namespace __sanitizer using namespace __sanitizer; diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h index 71cf5b9c357..5e0ca9c7d78 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h @@ -18,18 +18,17 @@ #include "sanitizer_internal_defs.h" #include "sanitizer_platform.h" - #include "sanitizer_platform_limits_posix.h" -// FreeBSD's dlopen() returns a pointer to an Obj_Entry structure that -// incorporates the map structure. -# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \ - ((link_map*)((handle) == nullptr ? nullptr : ((char*)(handle) + 560))) // Get sys/_types.h, because that tells us whether 64-bit inodes are // used in struct dirent below. #include <sys/_types.h> namespace __sanitizer { +void *__sanitizer_get_link_map_by_dlopen_handle(void *handle); +#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \ + (link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle) + extern unsigned struct_utsname_sz; extern unsigned struct_stat_sz; #if defined(__powerpc64__) @@ -53,6 +52,7 @@ extern unsigned struct_timezone_sz; extern unsigned struct_tms_sz; extern unsigned struct_itimerspec_sz; extern unsigned struct_sigevent_sz; +extern unsigned struct_stack_t_sz; extern unsigned struct_sched_param_sz; extern unsigned struct_statfs64_sz; extern unsigned struct_statfs_sz; @@ -147,7 +147,7 @@ struct __sanitizer_ifaddrs { unsigned int ifa_flags; void *ifa_addr; // (struct sockaddr *) void *ifa_netmask; // (struct sockaddr *) -# undef ifa_dstaddr +#undef ifa_dstaddr void *ifa_dstaddr; // (struct sockaddr *) void *ifa_data; }; @@ -630,27 +630,27 @@ extern unsigned struct_cap_rights_sz; extern unsigned struct_fstab_sz; extern unsigned struct_StringList_sz; -} // namespace __sanitizer +} // namespace __sanitizer #define CHECK_TYPE_SIZE(TYPE) \ COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE)) -#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \ - COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *) NULL)->MEMBER) == \ - sizeof(((CLASS *) NULL)->MEMBER)); \ - COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \ +#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \ + COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \ + sizeof(((CLASS *)NULL)->MEMBER)); \ + COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \ offsetof(CLASS, MEMBER)) // For sigaction, which is a function and struct at the same time, // and thus requires explicit "struct" in sizeof() expression. -#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \ - COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *) NULL)->MEMBER) == \ - sizeof(((struct CLASS *) NULL)->MEMBER)); \ - COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \ +#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \ + COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \ + sizeof(((struct CLASS *)NULL)->MEMBER)); \ + COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \ offsetof(struct CLASS, MEMBER)) #define SIGACTION_SYMNAME sigaction #endif -#endif // SANITIZER_FREEBSD +#endif // SANITIZER_FREEBSD diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp index 48a78c8998a..25da334b63f 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp @@ -161,12 +161,121 @@ #include <net/slip.h> #include <netbt/hci.h> #include <netinet/ip_compat.h> +#if __has_include(<netinet/ip_fil.h>) #include <netinet/ip_fil.h> #include <netinet/ip_nat.h> #include <netinet/ip_proxy.h> +#else +/* Fallback for MKIPFILTER=no */ + +typedef struct ap_control { + char apc_label[16]; + char apc_config[16]; + unsigned char apc_p; + unsigned long apc_cmd; + unsigned long apc_arg; + void *apc_data; + size_t apc_dsize; +} ap_ctl_t; + +typedef struct ipftq { + ipfmutex_t ifq_lock; + unsigned int ifq_ttl; + void *ifq_head; + void **ifq_tail; + void *ifq_next; + void **ifq_pnext; + int ifq_ref; + unsigned int ifq_flags; +} ipftq_t; + +typedef struct ipfobj { + uint32_t ipfo_rev; + uint32_t ipfo_size; + void *ipfo_ptr; + int ipfo_type; + int ipfo_offset; + int ipfo_retval; + unsigned char ipfo_xxxpad[28]; +} ipfobj_t; + +#define SIOCADNAT _IOW('r', 60, struct ipfobj) +#define SIOCRMNAT _IOW('r', 61, struct ipfobj) +#define SIOCGNATS _IOWR('r', 62, struct ipfobj) +#define SIOCGNATL _IOWR('r', 63, struct ipfobj) +#define SIOCPURGENAT _IOWR('r', 100, struct ipfobj) +#endif #include <netinet6/in6_var.h> #include <netinet6/nd6.h> +#if !__NetBSD_Prereq__(9, 99, 51) #include <netsmb/smb_dev.h> +#else +struct smbioc_flags { + int ioc_level; + int ioc_mask; + int ioc_flags; +}; +struct smbioc_oshare { + int ioc_opt; + int ioc_stype; + char ioc_share[129]; + char ioc_password[129]; + uid_t ioc_owner; + gid_t ioc_group; + mode_t ioc_mode; + mode_t ioc_rights; +}; +struct smbioc_ossn { + int ioc_opt; + uint32_t ioc_svlen; + struct sockaddr *ioc_server; + uint32_t ioc_lolen; + struct sockaddr *ioc_local; + char ioc_srvname[16]; + int ioc_timeout; + int ioc_retrycount; + char ioc_localcs[16]; + char ioc_servercs[16]; + char ioc_user[129]; + char ioc_workgroup[129]; + char ioc_password[129]; + uid_t ioc_owner; + gid_t ioc_group; + mode_t ioc_mode; + mode_t ioc_rights; +}; +struct smbioc_lookup { + int ioc_level; + int ioc_flags; + struct smbioc_ossn ioc_ssn; + struct smbioc_oshare ioc_sh; +}; +struct smbioc_rq { + u_char ioc_cmd; + u_char ioc_twc; + void *ioc_twords; + u_short ioc_tbc; + void *ioc_tbytes; + int ioc_rpbufsz; + char *ioc_rpbuf; + u_char ioc_rwc; + u_short ioc_rbc; +}; +struct smbioc_rw { + u_int16_t ioc_fh; + char *ioc_base; + off_t ioc_offset; + int ioc_cnt; +}; +#define SMBIOC_OPENSESSION _IOW('n', 100, struct smbioc_ossn) +#define SMBIOC_OPENSHARE _IOW('n', 101, struct smbioc_oshare) +#define SMBIOC_REQUEST _IOWR('n', 102, struct smbioc_rq) +#define SMBIOC_T2RQ _IOWR('n', 103, struct smbioc_t2rq) +#define SMBIOC_SETFLAGS _IOW('n', 104, struct smbioc_flags) +#define SMBIOC_LOOKUP _IOW('n', 106, struct smbioc_lookup) +#define SMBIOC_READ _IOWR('n', 107, struct smbioc_rw) +#define SMBIOC_WRITE _IOWR('n', 108, struct smbioc_rw) +#endif #include <dev/biovar.h> #include <dev/bluetooth/btdev.h> #include <dev/bluetooth/btsco.h> @@ -190,7 +299,21 @@ #include <dev/sun/vuid_event.h> #include <dev/tc/sticio.h> #include <dev/usb/ukyopon.h> +#if !__NetBSD_Prereq__(9, 99, 44) #include <dev/usb/urio.h> +#else +struct urio_command { + unsigned short length; + int request; + int requesttype; + int value; + int index; + void *buffer; + int timeout; +}; +#define URIO_SEND_COMMAND _IOWR('U', 200, struct urio_command) +#define URIO_RECV_COMMAND _IOWR('U', 201, struct urio_command) +#endif #include <dev/usb/usb.h> #include <dev/usb/utoppy.h> #include <dev/vme/xio.h> @@ -199,6 +322,7 @@ #include <dev/wscons/wsdisplay_usl_io.h> #include <fs/autofs/autofs_ioctl.h> #include <dirent.h> +#include <dlfcn.h> #include <glob.h> #include <grp.h> #include <ifaddrs.h> @@ -244,9 +368,15 @@ // Include these after system headers to avoid name clashes and ambiguities. #include "sanitizer_internal_defs.h" +#include "sanitizer_libc.h" #include "sanitizer_platform_limits_netbsd.h" namespace __sanitizer { +void *__sanitizer_get_link_map_by_dlopen_handle(void* handle) { + void *p = nullptr; + return internal_dlinfo(handle, RTLD_DI_LINKMAP, &p) == 0 ? p : nullptr; +} + unsigned struct_utsname_sz = sizeof(struct utsname); unsigned struct_stat_sz = sizeof(struct stat); unsigned struct_rusage_sz = sizeof(struct rusage); @@ -255,6 +385,7 @@ unsigned struct_passwd_sz = sizeof(struct passwd); unsigned struct_group_sz = sizeof(struct group); unsigned siginfo_t_sz = sizeof(siginfo_t); unsigned struct_sigaction_sz = sizeof(struct sigaction); +unsigned struct_stack_t_sz = sizeof(stack_t); unsigned struct_itimerval_sz = sizeof(struct itimerval); unsigned pthread_t_sz = sizeof(pthread_t); unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t); diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h index 794efdb6eff..d80280d9bf8 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h @@ -19,18 +19,11 @@ #include "sanitizer_internal_defs.h" #include "sanitizer_platform.h" -#define _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, shift) \ - ((link_map *)((handle) == nullptr ? nullptr : ((char *)(handle) + (shift)))) - -#if defined(__x86_64__) -#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \ - _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, 264) -#elif defined(__i386__) -#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \ - _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, 136) -#endif - namespace __sanitizer { +void *__sanitizer_get_link_map_by_dlopen_handle(void *handle); +# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \ + (link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle) + extern unsigned struct_utsname_sz; extern unsigned struct_stat_sz; extern unsigned struct_rusage_sz; @@ -48,6 +41,7 @@ extern unsigned struct_timezone_sz; extern unsigned struct_tms_sz; extern unsigned struct_itimerspec_sz; extern unsigned struct_sigevent_sz; +extern unsigned struct_stack_t_sz; extern unsigned struct_sched_param_sz; extern unsigned struct_statfs_sz; extern unsigned struct_sockaddr_sz; diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp index 12515626ce5..1420ecbfa56 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp @@ -72,6 +72,7 @@ unsigned struct_passwd_sz = sizeof(struct passwd); unsigned struct_group_sz = sizeof(struct group); unsigned siginfo_t_sz = sizeof(siginfo_t); unsigned struct_sigaction_sz = sizeof(struct sigaction); +unsigned struct_stack_t_sz = sizeof(stack_t); unsigned struct_itimerval_sz = sizeof(struct itimerval); unsigned pthread_t_sz = sizeof(pthread_t); unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t); diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h index 6d8b062716b..8a194872360 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h @@ -50,6 +50,7 @@ extern unsigned struct_timezone_sz; extern unsigned struct_tms_sz; extern unsigned struct_itimerspec_sz; extern unsigned struct_sigevent_sz; +extern unsigned struct_stack_t_sz; extern unsigned struct_statfs_sz; extern unsigned struct_sockaddr_sz; diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp index aa845df4dde..c052aa2bc95 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp @@ -170,15 +170,16 @@ typedef struct user_fpregs elf_fpregset_t; namespace __sanitizer { unsigned struct_utsname_sz = sizeof(struct utsname); unsigned struct_stat_sz = sizeof(struct stat); -#if !SANITIZER_IOS +#if !SANITIZER_IOS && !(SANITIZER_MAC && TARGET_CPU_ARM64) unsigned struct_stat64_sz = sizeof(struct stat64); -#endif // !SANITIZER_IOS +#endif // !SANITIZER_IOS && !(SANITIZER_MAC && TARGET_CPU_ARM64) unsigned struct_rusage_sz = sizeof(struct rusage); unsigned struct_tm_sz = sizeof(struct tm); unsigned struct_passwd_sz = sizeof(struct passwd); unsigned struct_group_sz = sizeof(struct group); unsigned siginfo_t_sz = sizeof(siginfo_t); unsigned struct_sigaction_sz = sizeof(struct sigaction); + unsigned struct_stack_t_sz = sizeof(stack_t); unsigned struct_itimerval_sz = sizeof(struct itimerval); unsigned pthread_t_sz = sizeof(pthread_t); unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t); @@ -196,9 +197,9 @@ namespace __sanitizer { unsigned struct_regex_sz = sizeof(regex_t); unsigned struct_regmatch_sz = sizeof(regmatch_t); -#if SANITIZER_MAC && !SANITIZER_IOS +#if (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS unsigned struct_statfs64_sz = sizeof(struct statfs64); -#endif // SANITIZER_MAC && !SANITIZER_IOS +#endif // (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS #if !SANITIZER_ANDROID unsigned struct_fstab_sz = sizeof(struct fstab); diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h index 5337b26b29b..658b0abaece 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h @@ -47,6 +47,7 @@ extern unsigned struct_timezone_sz; extern unsigned struct_tms_sz; extern unsigned struct_itimerspec_sz; extern unsigned struct_sigevent_sz; +extern unsigned struct_stack_t_sz; extern unsigned struct_sched_param_sz; extern unsigned struct_statfs64_sz; extern unsigned struct_regex_sz; @@ -703,6 +704,12 @@ struct __sanitizer_dl_phdr_info { extern unsigned struct_ElfW_Phdr_sz; #endif +struct __sanitizer_protoent { + char *p_name; + char **p_aliases; + int p_proto; +}; + struct __sanitizer_addrinfo { int ai_flags; int ai_family; diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp index 9717d98ebf1..6ec1a1bdd11 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp @@ -72,6 +72,7 @@ namespace __sanitizer { unsigned struct_group_sz = sizeof(struct group); unsigned siginfo_t_sz = sizeof(siginfo_t); unsigned struct_sigaction_sz = sizeof(struct sigaction); + unsigned struct_stack_t_sz = sizeof(stack_t); unsigned struct_itimerval_sz = sizeof(struct itimerval); unsigned pthread_t_sz = sizeof(pthread_t); unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t); diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.h index 77ae6e6a44d..85995e79792 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.h @@ -38,6 +38,7 @@ extern unsigned struct_timezone_sz; extern unsigned struct_tms_sz; extern unsigned struct_itimerspec_sz; extern unsigned struct_sigevent_sz; +extern unsigned struct_stack_t_sz; extern unsigned struct_sched_param_sz; extern unsigned struct_statfs64_sz; extern unsigned struct_statfs_sz; diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp index d890a3a3177..e21661b42f8 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp @@ -347,9 +347,17 @@ int GetNamedMappingFd(const char *name, uptr size, int *flags) { CHECK(internal_strlen(name) < sizeof(shmname) - 10); internal_snprintf(shmname, sizeof(shmname), "/dev/shm/%zu [%s]", internal_getpid(), name); + int o_cloexec = 0; +#if defined(O_CLOEXEC) + o_cloexec = O_CLOEXEC; +#endif int fd = ReserveStandardFds( - internal_open(shmname, O_RDWR | O_CREAT | O_TRUNC | O_CLOEXEC, S_IRWXU)); + internal_open(shmname, O_RDWR | O_CREAT | O_TRUNC | o_cloexec, S_IRWXU)); CHECK_GE(fd, 0); + if (!o_cloexec) { + int res = fcntl(fd, F_SETFD, FD_CLOEXEC); + CHECK_EQ(0, res); + } int res = internal_ftruncate(fd, size); CHECK_EQ(0, res); res = internal_unlink(shmname); diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_posix.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_posix.h index 70c71f04d2d..a1b49702da2 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_posix.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_posix.h @@ -63,7 +63,7 @@ uptr internal_ptrace(int request, int pid, void *addr, void *data); uptr internal_waitpid(int pid, int *status, int options); int internal_fork(); -fd_t internal_spawn(const char *argv[], pid_t *pid); +fd_t internal_spawn(const char *argv[], const char *envp[], pid_t *pid); int internal_sysctl(const int *name, unsigned int namelen, void *oldp, uptr *oldlenp, const void *newp, uptr newlen); diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp index 304b3a01a08..f920172c06d 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp @@ -426,7 +426,8 @@ void AdjustStackSize(void *attr_) { #endif // !SANITIZER_GO pid_t StartSubprocess(const char *program, const char *const argv[], - fd_t stdin_fd, fd_t stdout_fd, fd_t stderr_fd) { + const char *const envp[], fd_t stdin_fd, fd_t stdout_fd, + fd_t stderr_fd) { auto file_closer = at_scope_exit([&] { if (stdin_fd != kInvalidFd) { internal_close(stdin_fd); @@ -469,7 +470,8 @@ pid_t StartSubprocess(const char *program, const char *const argv[], for (int fd = sysconf(_SC_OPEN_MAX); fd > 2; fd--) internal_close(fd); - execv(program, const_cast<char **>(&argv[0])); + internal_execve(program, const_cast<char **>(&argv[0]), + const_cast<char *const *>(envp)); internal__exit(1); } diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h index d0e5245f84d..665ed45fa93 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h @@ -15,18 +15,19 @@ #include "sanitizer_platform.h" -#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \ - SANITIZER_OPENBSD || SANITIZER_MAC || SANITIZER_SOLARIS +#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \ + SANITIZER_OPENBSD || SANITIZER_MAC || SANITIZER_SOLARIS || \ + SANITIZER_FUCHSIA #include "sanitizer_common.h" #include "sanitizer_internal_defs.h" +#include "sanitizer_fuchsia.h" #include "sanitizer_linux.h" #include "sanitizer_mac.h" #include "sanitizer_mutex.h" namespace __sanitizer { - // Memory protection masks. static const uptr kProtectionRead = 1; static const uptr kProtectionWrite = 2; diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_fuchsia.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_fuchsia.cpp new file mode 100644 index 00000000000..cc3e9be0645 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_fuchsia.cpp @@ -0,0 +1,80 @@ +//===-- sanitizer_procmaps_fuchsia.cpp +//----------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Information about the process mappings (Fuchsia-specific parts). +//===----------------------------------------------------------------------===// + +#include "sanitizer_platform.h" +#if SANITIZER_FUCHSIA +#include <zircon/process.h> +#include <zircon/syscalls.h> + +#include "sanitizer_common.h" +#include "sanitizer_procmaps.h" + +namespace __sanitizer { + +// The cache flag is ignored on Fuchsia because a process can always get this +// information via its process-self handle. +MemoryMappingLayout::MemoryMappingLayout(bool) { Reset(); } + +void MemoryMappingLayout::Reset() { + data_.data.clear(); + data_.current = 0; + + size_t count; + zx_status_t status = _zx_object_get_info( + _zx_process_self(), ZX_INFO_PROCESS_MAPS, nullptr, 0, nullptr, &count); + if (status != ZX_OK) { + return; + } + + size_t filled; + do { + data_.data.resize(count); + status = _zx_object_get_info( + _zx_process_self(), ZX_INFO_PROCESS_MAPS, data_.data.data(), + count * sizeof(zx_info_maps_t), &filled, &count); + if (status != ZX_OK) { + data_.data.clear(); + return; + } + } while (filled < count); +} + +MemoryMappingLayout::~MemoryMappingLayout() {} + +bool MemoryMappingLayout::Error() const { return data_.data.empty(); } + +bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) { + while (data_.current < data_.data.size()) { + const auto &entry = data_.data[data_.current++]; + if (entry.type == ZX_INFO_MAPS_TYPE_MAPPING) { + segment->start = entry.base; + segment->end = entry.base + entry.size; + segment->offset = entry.u.mapping.vmo_offset; + const auto flags = entry.u.mapping.mmu_flags; + segment->protection = + ((flags & ZX_VM_PERM_READ) ? kProtectionRead : 0) | + ((flags & ZX_VM_PERM_WRITE) ? kProtectionWrite : 0) | + ((flags & ZX_VM_PERM_EXECUTE) ? kProtectionExecute : 0); + if (segment->filename && segment->filename_size > 0) { + uptr len = Min(sizeof(entry.name), segment->filename_size) - 1; + internal_strncpy(segment->filename, entry.name, len); + segment->filename[len] = 0; + } + return true; + } + } + return false; +} + +} // namespace __sanitizer + +#endif // SANITIZER_FUCHSIA diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_ptrauth.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_ptrauth.h new file mode 100644 index 00000000000..4d0d96a64f6 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_ptrauth.h @@ -0,0 +1,21 @@ +//===-- sanitizer_ptrauth.h -------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_PTRAUTH_H +#define SANITIZER_PTRAUTH_H + +#if __has_feature(ptrauth_calls) +#include <ptrauth.h> +#else +// Copied from <ptrauth.h> +#define ptrauth_strip(__value, __key) __value +#define ptrauth_auth_data(__value, __old_key, __old_data) __value +#define ptrauth_string_discriminator(__string) ((int)0) +#endif + +#endif // SANITIZER_PTRAUTH_H diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_rtems.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_rtems.cpp index 0d2576c00ab..29bcfcfa6f1 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_rtems.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_rtems.cpp @@ -49,6 +49,10 @@ uptr internal_getpid() { return getpid(); } +int internal_dlinfo(void *handle, int request, void *p) { + UNIMPLEMENTED(); +} + bool FileExists(const char *filename) { struct stat st; if (stat(filename, &st)) diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_fuchsia.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_fuchsia.cpp new file mode 100644 index 00000000000..3a246443ed9 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_fuchsia.cpp @@ -0,0 +1,42 @@ +//===-- sanitizer_stoptheworld_fuchsia.cpp -------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// +// +// See sanitizer_stoptheworld.h for details. +// +//===---------------------------------------------------------------------===// + +#include "sanitizer_platform.h" + +#if SANITIZER_FUCHSIA + +#include <zircon/sanitizer.h> + +#include "sanitizer_stoptheworld.h" + +namespace __sanitizer { + +// The Fuchsia implementation stops the world but doesn't offer a real +// SuspendedThreadsList argument. This is enough for ASan's use case, +// and LSan does not use this API on Fuchsia. +void StopTheWorld(StopTheWorldCallback callback, void *argument) { + struct Params { + StopTheWorldCallback callback; + void *argument; + } params = {callback, argument}; + __sanitizer_memory_snapshot( + nullptr, nullptr, nullptr, nullptr, + [](zx_status_t, void *data) { + auto params = reinterpret_cast<Params *>(data); + params->callback({}, params->argument); + }, + ¶ms); +} + +} // namespace __sanitizer + +#endif // SANITIZER_FUCHSIA diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp index 9dffd21ecb7..6c577426ad5 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp @@ -50,7 +50,7 @@ struct RunThreadArgs { void *argument; }; -void RunThread(void *arg) { +void *RunThread(void *arg) { struct RunThreadArgs *run_args = (struct RunThreadArgs *)arg; SuspendedThreadsListMac suspended_threads_list; @@ -59,7 +59,7 @@ void RunThread(void *arg) { kern_return_t err = task_threads(mach_task_self(), &threads, &num_threads); if (err != KERN_SUCCESS) { VReport(1, "Failed to get threads for task (errno %d).\n", err); - return; + return nullptr; } thread_t thread_self = mach_thread_self(); @@ -76,6 +76,7 @@ void RunThread(void *arg) { for (unsigned int i = 0; i < num_suspended; ++i) { thread_resume(suspended_threads_list.GetThread(i)); } + return nullptr; } void StopTheWorld(StopTheWorldCallback callback, void *argument) { @@ -159,7 +160,11 @@ PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP( } internal_memcpy(buffer, ®s, sizeof(regs)); +#if defined(__aarch64__) && defined(arm_thread_state64_get_sp) + *sp = arm_thread_state64_get_sp(regs); +#else *sp = regs.SP_REG; +#endif // On x86_64 and aarch64, we must account for the stack redzone, which is 128 // bytes. diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp index ce2ece5f4d5..0c4b84c767a 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp @@ -126,4 +126,10 @@ Symbolizer::SymbolizerScope::~SymbolizerScope() { sym_->end_hook_(); } +void Symbolizer::LateInitializeTools() { + for (auto &tool : tools_) { + tool.LateInitialize(); + } +} + } // namespace __sanitizer diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h index 51648e2d0e8..2476b0ea7bf 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h @@ -209,6 +209,9 @@ class Symbolizer final { private: const Symbolizer *sym_; }; + + // Calls `LateInitialize()` on all items in `tools_`. + void LateInitializeTools(); }; #ifdef SANITIZER_WINDOWS diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h index c04797dd61b..e4c351e667b 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h @@ -69,6 +69,11 @@ class SymbolizerTool { virtual const char *Demangle(const char *name) { return nullptr; } + + // Called during the LateInitialize phase of Sanitizer initialization. + // Usually this is a safe place to call code that might need to use user + // memory allocators. + virtual void LateInitialize() {} }; // SymbolizerProcess encapsulates communication between the tool and @@ -86,6 +91,8 @@ class SymbolizerProcess { // Customizable by subclasses. virtual bool StartSymbolizerSubprocess(); virtual bool ReadFromSymbolizer(char *buffer, uptr max_length); + // Return the environment to run the symbolizer in. + virtual char **GetEnvP() { return GetEnviron(); } private: virtual bool ReachedEndOfOutput(const char *buffer, uptr length) const { diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp index 3b19a6836ec..490c6fe89be 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp @@ -39,9 +39,9 @@ const char *ExtractToken(const char *str, const char *delims, char **result) { } const char *ExtractInt(const char *str, const char *delims, int *result) { - char *buff; + char *buff = nullptr; const char *ret = ExtractToken(str, delims, &buff); - if (buff != 0) { + if (buff) { *result = (int)internal_atoll(buff); } InternalFree(buff); @@ -49,9 +49,9 @@ const char *ExtractInt(const char *str, const char *delims, int *result) { } const char *ExtractUptr(const char *str, const char *delims, uptr *result) { - char *buff; + char *buff = nullptr; const char *ret = ExtractToken(str, delims, &buff); - if (buff != 0) { + if (buff) { *result = (uptr)internal_atoll(buff); } InternalFree(buff); @@ -59,9 +59,9 @@ const char *ExtractUptr(const char *str, const char *delims, uptr *result) { } const char *ExtractSptr(const char *str, const char *delims, sptr *result) { - char *buff; + char *buff = nullptr; const char *ret = ExtractToken(str, delims, &buff); - if (buff != 0) { + if (buff) { *result = (sptr)internal_atoll(buff); } InternalFree(buff); @@ -83,7 +83,7 @@ const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter, SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) { BlockingMutexLock l(&mu_); - const char *module_name; + const char *module_name = nullptr; uptr module_offset; ModuleArch arch; SymbolizedStack *res = SymbolizedStack::New(addr); @@ -103,7 +103,7 @@ SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) { bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) { BlockingMutexLock l(&mu_); - const char *module_name; + const char *module_name = nullptr; uptr module_offset; ModuleArch arch; if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset, @@ -124,7 +124,7 @@ bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) { bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) { BlockingMutexLock l(&mu_); - const char *module_name; + const char *module_name = nullptr; if (!FindModuleNameAndOffsetForAddress( addr, &module_name, &info->module_offset, &info->module_arch)) return false; @@ -175,7 +175,7 @@ bool Symbolizer::FindModuleNameAndOffsetForAddress(uptr address, uptr *module_offset, ModuleArch *module_arch) { const LoadedModule *module = FindModuleForAddress(address); - if (module == nullptr) + if (!module) return false; *module_name = module->full_name(); *module_offset = address - module->base_address(); @@ -292,7 +292,7 @@ LLVMSymbolizer::LLVMSymbolizer(const char *path, LowLevelAllocator *allocator) // Windows, so extract tokens from the right hand side first. The column info is // also optional. static const char *ParseFileLineInfo(AddressInfo *info, const char *str) { - char *file_line_info = 0; + char *file_line_info = nullptr; str = ExtractToken(str, "\n", &file_line_info); CHECK(file_line_info); @@ -323,7 +323,7 @@ void ParseSymbolizePCOutput(const char *str, SymbolizedStack *res) { bool top_frame = true; SymbolizedStack *last = res; while (true) { - char *function_name = 0; + char *function_name = nullptr; str = ExtractToken(str, "\n", &function_name); CHECK(function_name); if (function_name[0] == '\0') { @@ -402,32 +402,29 @@ bool LLVMSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) { AddressInfo *info = &stack->info; const char *buf = FormatAndSendCommand( "CODE", info->module, info->module_offset, info->module_arch); - if (buf) { - ParseSymbolizePCOutput(buf, stack); - return true; - } - return false; + if (!buf) + return false; + ParseSymbolizePCOutput(buf, stack); + return true; } bool LLVMSymbolizer::SymbolizeData(uptr addr, DataInfo *info) { const char *buf = FormatAndSendCommand( "DATA", info->module, info->module_offset, info->module_arch); - if (buf) { - ParseSymbolizeDataOutput(buf, info); - info->start += (addr - info->module_offset); // Add the base address. - return true; - } - return false; + if (!buf) + return false; + ParseSymbolizeDataOutput(buf, info); + info->start += (addr - info->module_offset); // Add the base address. + return true; } bool LLVMSymbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) { const char *buf = FormatAndSendCommand( "FRAME", info->module, info->module_offset, info->module_arch); - if (buf) { - ParseSymbolizeFrameOutput(buf, &info->locals); - return true; - } - return false; + if (!buf) + return false; + ParseSymbolizeFrameOutput(buf, &info->locals); + return true; } const char *LLVMSymbolizer::FormatAndSendCommand(const char *command_prefix, @@ -435,21 +432,21 @@ const char *LLVMSymbolizer::FormatAndSendCommand(const char *command_prefix, uptr module_offset, ModuleArch arch) { CHECK(module_name); - if (arch == kModuleArchUnknown) { - if (internal_snprintf(buffer_, kBufferSize, "%s \"%s\" 0x%zx\n", - command_prefix, module_name, - module_offset) >= static_cast<int>(kBufferSize)) { - Report("WARNING: Command buffer too small"); - return nullptr; - } - } else { - if (internal_snprintf(buffer_, kBufferSize, "%s \"%s:%s\" 0x%zx\n", - command_prefix, module_name, ModuleArchToString(arch), - module_offset) >= static_cast<int>(kBufferSize)) { - Report("WARNING: Command buffer too small"); - return nullptr; - } + int size_needed = 0; + if (arch == kModuleArchUnknown) + size_needed = internal_snprintf(buffer_, kBufferSize, "%s \"%s\" 0x%zx\n", + command_prefix, module_name, module_offset); + else + size_needed = internal_snprintf(buffer_, kBufferSize, + "%s \"%s:%s\" 0x%zx\n", command_prefix, + module_name, ModuleArchToString(arch), + module_offset); + + if (size_needed >= static_cast<int>(kBufferSize)) { + Report("WARNING: Command buffer too small"); + return nullptr; } + return symbolizer_process_->SendCommand(buffer_); } @@ -492,16 +489,16 @@ const char *SymbolizerProcess::SendCommand(const char *command) { Report("WARNING: Failed to use and restart external symbolizer!\n"); failed_to_start_ = true; } - return 0; + return nullptr; } const char *SymbolizerProcess::SendCommandImpl(const char *command) { if (input_fd_ == kInvalidFd || output_fd_ == kInvalidFd) - return 0; + return nullptr; if (!WriteToSymbolizer(command, internal_strlen(command))) - return 0; + return nullptr; if (!ReadFromSymbolizer(buffer_, kBufferSize)) - return 0; + return nullptr; return buffer_; } diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp index f26efe5c50b..29cbf62acd5 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp @@ -20,6 +20,7 @@ #include <dlfcn.h> #include <errno.h> +#include <mach/mach.h> #include <stdlib.h> #include <sys/wait.h> #include <unistd.h> @@ -50,18 +51,65 @@ bool DlAddrSymbolizer::SymbolizeData(uptr addr, DataInfo *datainfo) { return true; } +#define K_ATOS_ENV_VAR "__check_mach_ports_lookup" + +// This cannot live in `AtosSymbolizerProcess` because instances of that object +// are allocated by the internal allocator which under ASan is poisoned with +// kAsanInternalHeapMagic. +static char kAtosMachPortEnvEntry[] = K_ATOS_ENV_VAR "=000000000000000"; + class AtosSymbolizerProcess : public SymbolizerProcess { public: - explicit AtosSymbolizerProcess(const char *path, pid_t parent_pid) + explicit AtosSymbolizerProcess(const char *path) : SymbolizerProcess(path, /*use_posix_spawn*/ true) { - // Put the string command line argument in the object so that it outlives - // the call to GetArgV. - internal_snprintf(pid_str_, sizeof(pid_str_), "%d", parent_pid); + pid_str_[0] = '\0'; + } + + void LateInitialize() { + if (SANITIZER_IOSSIM) { + // `putenv()` may call malloc/realloc so it is only safe to do this + // during LateInitialize() or later (i.e. we can't do this in the + // constructor). We also can't do this in `StartSymbolizerSubprocess()` + // because in TSan we switch allocators when we're symbolizing. + // We use `putenv()` rather than `setenv()` so that we can later directly + // write into the storage without LibC getting involved to change what the + // variable is set to + int result = putenv(kAtosMachPortEnvEntry); + CHECK_EQ(result, 0); + } } private: bool StartSymbolizerSubprocess() override { // Configure sandbox before starting atos process. + + // Put the string command line argument in the object so that it outlives + // the call to GetArgV. + internal_snprintf(pid_str_, sizeof(pid_str_), "%d", internal_getpid()); + + if (SANITIZER_IOSSIM) { + // `atos` in the simulator is restricted in its ability to retrieve the + // task port for the target process (us) so we need to do extra work + // to pass our task port to it. + mach_port_t ports[]{mach_task_self()}; + kern_return_t ret = + mach_ports_register(mach_task_self(), ports, /*count=*/1); + CHECK_EQ(ret, KERN_SUCCESS); + + // Set environment variable that signals to `atos` that it should look + // for our task port. We can't call `setenv()` here because it might call + // malloc/realloc. To avoid that we instead update the + // `mach_port_env_var_entry_` variable with our current PID. + uptr count = internal_snprintf(kAtosMachPortEnvEntry, + sizeof(kAtosMachPortEnvEntry), + K_ATOS_ENV_VAR "=%s", pid_str_); + CHECK_GE(count, sizeof(K_ATOS_ENV_VAR) + internal_strlen(pid_str_)); + // Document our assumption but without calling `getenv()` in normal + // builds. + DCHECK(getenv(K_ATOS_ENV_VAR)); + DCHECK_EQ(internal_strcmp(getenv(K_ATOS_ENV_VAR), pid_str_), 0); + } + return SymbolizerProcess::StartSymbolizerSubprocess(); } @@ -75,7 +123,7 @@ class AtosSymbolizerProcess : public SymbolizerProcess { argv[i++] = path_to_binary; argv[i++] = "-p"; argv[i++] = &pid_str_[0]; - if (GetMacosVersion() == MACOS_VERSION_MAVERICKS) { + if (GetMacosAlignedVersion() == MacosVersion(10, 9)) { // On Mavericks atos prints a deprecation warning which we suppress by // passing -d. The warning isn't present on other OSX versions, even the // newer ones. @@ -85,8 +133,14 @@ class AtosSymbolizerProcess : public SymbolizerProcess { } char pid_str_[16]; + // Space for `\0` in `K_ATOS_ENV_VAR` is reused for `=`. + static_assert(sizeof(kAtosMachPortEnvEntry) == + (sizeof(K_ATOS_ENV_VAR) + sizeof(pid_str_)), + "sizes should match"); }; +#undef K_ATOS_ENV_VAR + static bool ParseCommandOutput(const char *str, uptr addr, char **out_name, char **out_module, char **out_file, uptr *line, uptr *start_address) { @@ -138,7 +192,7 @@ static bool ParseCommandOutput(const char *str, uptr addr, char **out_name, } AtosSymbolizer::AtosSymbolizer(const char *path, LowLevelAllocator *allocator) - : process_(new(*allocator) AtosSymbolizerProcess(path, getpid())) {} + : process_(new (*allocator) AtosSymbolizerProcess(path)) {} bool AtosSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) { if (!process_) return false; @@ -188,6 +242,8 @@ bool AtosSymbolizer::SymbolizeData(uptr addr, DataInfo *info) { return true; } +void AtosSymbolizer::LateInitialize() { process_->LateInitialize(); } + } // namespace __sanitizer #endif // SANITIZER_MAC diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h index 68521375e64..8996131fc13 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h @@ -35,6 +35,7 @@ class AtosSymbolizer : public SymbolizerTool { bool SymbolizePC(uptr addr, SymbolizedStack *stack) override; bool SymbolizeData(uptr addr, DataInfo *info) override; + void LateInitialize() override; private: AtosSymbolizerProcess *process_; diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp index 57b4d0c9d96..2963af95360 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp @@ -94,7 +94,9 @@ Symbolizer *Symbolizer::PlatformInit() { return new (symbolizer_allocator_) Symbolizer({}); } -void Symbolizer::LateInitialize() { Symbolizer::GetOrInit(); } +void Symbolizer::LateInitialize() { + Symbolizer::GetOrInit()->LateInitializeTools(); +} void StartReportDeadlySignal() {} void ReportDeadlySignal(const SignalContext &sig, u32 tid, diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp index c123ecb1120..3c379a84802 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp @@ -78,13 +78,6 @@ static void InitializeSwiftDemangler() { // Attempts to demangle a Swift name. The demangler will return nullptr if a // non-Swift name is passed in. const char *DemangleSwift(const char *name) { - if (!name) return nullptr; - - // Check if we are dealing with a Swift mangled name first. - if (name[0] != '_' || name[1] != 'T') { - return nullptr; - } - if (swift_demangle_f) return swift_demangle_f(name, internal_strlen(name), 0, 0, 0); @@ -151,9 +144,19 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() { GetArgV(path_, argv); pid_t pid; + // Report how symbolizer is being launched for debugging purposes. + if (Verbosity() >= 3) { + // Only use `Report` for first line so subsequent prints don't get prefixed + // with current PID. + Report("Launching Symbolizer process: "); + for (unsigned index = 0; index < kArgVMax && argv[index]; ++index) + Printf("%s ", argv[index]); + Printf("\n"); + } + if (use_posix_spawn_) { #if SANITIZER_MAC - fd_t fd = internal_spawn(argv, &pid); + fd_t fd = internal_spawn(argv, const_cast<const char **>(GetEnvP()), &pid); if (fd == kInvalidFd) { Report("WARNING: failed to spawn external symbolizer (errno: %d)\n", errno); @@ -173,7 +176,7 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() { return false; } - pid = StartSubprocess(path_, argv, /* stdin */ outfd[0], + pid = StartSubprocess(path_, argv, GetEnvP(), /* stdin */ outfd[0], /* stdout */ infd[1]); if (pid < 0) { internal_close(infd[0]); @@ -311,9 +314,10 @@ class Addr2LinePool : public SymbolizerTool { #if SANITIZER_SUPPORTS_WEAK_HOOKS extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -bool __sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset, - char *Buffer, int MaxLength); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool +__sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset, + char *Buffer, int MaxLength, + bool SymbolizeInlineFrames); SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool __sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset, char *Buffer, int MaxLength); @@ -336,7 +340,8 @@ class InternalSymbolizer : public SymbolizerTool { bool SymbolizePC(uptr addr, SymbolizedStack *stack) override { bool result = __sanitizer_symbolize_code( - stack->info.module, stack->info.module_offset, buffer_, kBufferSize); + stack->info.module, stack->info.module_offset, buffer_, kBufferSize, + common_flags()->symbolize_inline_frames); if (result) ParseSymbolizePCOutput(buffer_, stack); return result; } @@ -478,7 +483,7 @@ Symbolizer *Symbolizer::PlatformInit() { } void Symbolizer::LateInitialize() { - Symbolizer::GetOrInit(); + Symbolizer::GetOrInit()->LateInitializeTools(); InitializeSwiftDemangler(); } diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp index 2808779156e..373437e7ee2 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp @@ -310,7 +310,7 @@ Symbolizer *Symbolizer::PlatformInit() { } void Symbolizer::LateInitialize() { - Symbolizer::GetOrInit(); + Symbolizer::GetOrInit()->LateInitializeTools(); } } // namespace __sanitizer diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp index 36dde49d870..fca15beb616 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp @@ -94,6 +94,10 @@ uptr internal_getpid() { return GetProcessId(GetCurrentProcess()); } +int internal_dlinfo(void *handle, int request, void *p) { + UNIMPLEMENTED(); +} + // In contrast to POSIX, on Windows GetCurrentThreadId() // returns a system-unique identifier. tid_t GetTid() { @@ -787,7 +791,7 @@ uptr GetRSS() { return counters.WorkingSetSize; } -void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; } +void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; } void internal_join_thread(void *th) { } // ---------------------- BlockingMutex ---------------- {{{1 @@ -1060,7 +1064,8 @@ char **GetEnviron() { } pid_t StartSubprocess(const char *program, const char *const argv[], - fd_t stdin_fd, fd_t stdout_fd, fd_t stderr_fd) { + const char *const envp[], fd_t stdin_fd, fd_t stdout_fd, + fd_t stderr_fd) { // FIXME: implement on this platform // Should be implemented based on // SymbolizerProcess::StarAtSymbolizerSubprocess diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/scripts/check_lint.sh b/gnu/llvm/compiler-rt/lib/sanitizer_common/scripts/check_lint.sh index 4a2febab461..630658113f6 100755 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/scripts/check_lint.sh +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/scripts/check_lint.sh @@ -19,7 +19,7 @@ fi COMMON_LINT_FILTER=-build/include,-build/header_guard,-legal/copyright,-whitespace/comments,-readability/casting,\ -build/namespaces,-build/c++11,-runtime/int -COMMON_LIT_TEST_LINT_FILTER=-whitespace/indent,-whitespace/line_length,-runtime/arrays +COMMON_LIT_TEST_LINT_FILTER=-whitespace/indent,-whitespace/line_length,-runtime/arrays,-readability/braces ASAN_RTL_LINT_FILTER=${COMMON_LINT_FILTER} ASAN_TEST_LINT_FILTER=${COMMON_LINT_FILTER},-runtime/printf,-runtime/threadsafe_fn diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/scripts/cpplint.py b/gnu/llvm/compiler-rt/lib/sanitizer_common/scripts/cpplint.py index 65baa6cc160..1262e5b12e7 100755 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/scripts/cpplint.py +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/scripts/cpplint.py @@ -3866,8 +3866,8 @@ def CheckTrailingSemicolon(filename, clean_lines, linenum, error): # Block bodies should not be followed by a semicolon. Due to C++11 # brace initialization, there are more places where semicolons are - # required than not, so we use a whitelist approach to check these - # rather than a blacklist. These are the places where "};" should + # required than not, so we use an allowed list approach to check these + # rather than an exclusion list. These are the places where "};" should # be replaced by just "}": # 1. Some flavor of block following closing parenthesis: # for (;;) {}; @@ -3924,11 +3924,11 @@ def CheckTrailingSemicolon(filename, clean_lines, linenum, error): # - INTERFACE_DEF # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: # - # We implement a whitelist of safe macros instead of a blacklist of + # We implement a list of allowed safe macros instead of a list of # unsafe macros, even though the latter appears less frequently in # google code and would have been easier to implement. This is because - # the downside for getting the whitelist wrong means some extra - # semicolons, while the downside for getting the blacklist wrong + # the downside for getting the allowed list wrong means some extra + # semicolons, while the downside for getting the exclusion list wrong # would result in compile errors. # # In addition to macros, we also don't want to warn on @@ -5124,19 +5124,19 @@ def CheckForNonConstReference(filename, clean_lines, linenum, # # We also accept & in static_assert, which looks like a function but # it's actually a declaration expression. - whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|' + allowed_functions = (r'(?:[sS]wap(?:<\w:+>)?|' r'operator\s*[<>][<>]|' r'static_assert|COMPILE_ASSERT' r')\s*\(') - if Search(whitelisted_functions, line): + if Search(allowed_functions, line): return elif not Search(r'\S+\([^)]*$', line): - # Don't see a whitelisted function on this line. Actually we + # Don't see an allowed function entry on this line. Actually we # didn't see any function name on this line, so this is likely a # multi-line parameter list. Try a bit harder to catch this case. for i in xrange(2): if (linenum > i and - Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): + Search(allowed_functions, clean_lines.elided[linenum - i - 1])): return decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp index ba285bc1e88..4902be0bf51 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include <stdio.h> + #include <string> #include "llvm/DebugInfo/Symbolize/DIPrinter.h" @@ -32,17 +33,25 @@ extern "C" { typedef uint64_t u64; bool __sanitizer_symbolize_code(const char *ModuleName, uint64_t ModuleOffset, - char *Buffer, int MaxLength) { + char *Buffer, int MaxLength, + bool SymbolizeInlineFrames) { std::string Result; { llvm::raw_string_ostream OS(Result); llvm::symbolize::DIPrinter Printer(OS); // TODO: it is neccessary to set proper SectionIndex here. // object::SectionedAddress::UndefSection works for only absolute addresses. - auto ResOrErr = getDefaultSymbolizer()->symbolizeInlinedCode( - ModuleName, - {ModuleOffset, llvm::object::SectionedAddress::UndefSection}); - Printer << (ResOrErr ? ResOrErr.get() : llvm::DIInliningInfo()); + if (SymbolizeInlineFrames) { + auto ResOrErr = getDefaultSymbolizer()->symbolizeInlinedCode( + ModuleName, + {ModuleOffset, llvm::object::SectionedAddress::UndefSection}); + Printer << (ResOrErr ? ResOrErr.get() : llvm::DIInliningInfo()); + } else { + auto ResOrErr = getDefaultSymbolizer()->symbolizeCode( + ModuleName, + {ModuleOffset, llvm::object::SectionedAddress::UndefSection}); + Printer << (ResOrErr ? ResOrErr.get() : llvm::DILineInfo()); + } } return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s", Result.c_str()) < MaxLength; diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh b/gnu/llvm/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh index be79f1df64b..a0aa79ee54b 100755 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh @@ -165,6 +165,8 @@ $SCRIPT_DIR/ar_to_bc.sh $LIBCXX_BUILD/lib/libc++.a \ $LLVM_BUILD/lib/libLLVMDebugInfoDWARF.a \ $LLVM_BUILD/lib/libLLVMSupport.a \ $LLVM_BUILD/lib/libLLVMDebugInfoPDB.a \ + $LLVM_BUILD/lib/libLLVMDebugInfoMSF.a \ + $LLVM_BUILD/lib/libLLVMDebugInfoCodeView.a \ $LLVM_BUILD/lib/libLLVMDemangle.a \ $LLVM_BUILD/lib/libLLVMMC.a \ $LLVM_BUILD/lib/libLLVMTextAPI.a \ diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt b/gnu/llvm/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt index fa42e2a0196..c3f41f19c36 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt @@ -30,6 +30,7 @@ __interceptor_pthread_once w __interceptor_pthread_setspecific w __interceptor_read w __interceptor_realpath w +__isinf U __moddi3 U __sanitizer_symbolize_code T __sanitizer_symbolize_data T @@ -47,6 +48,7 @@ catclose U catgets U catopen U ceil U +ceilf U clock_gettime U cfgetospeed U dl_iterate_phdr U @@ -151,6 +153,7 @@ vasprintf U vfprintf U vsnprintf U vsscanf U +wait4 U waitpid U wcrtomb U wcslen U diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt index 0726280e418..3c504022ebe 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt @@ -21,6 +21,7 @@ set(SANITIZER_UNITTESTS sanitizer_libc_test.cpp sanitizer_linux_test.cpp sanitizer_list_test.cpp + sanitizer_mac_test.cpp sanitizer_mutex_test.cpp sanitizer_nolibc_test.cpp sanitizer_posix_test.cpp diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp index ff1f7f9f5d2..baf9b37fb95 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp @@ -1009,7 +1009,7 @@ TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) { // Don't test OOM conditions on Win64 because it causes other tests on the same // machine to OOM. #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID -typedef __sanitizer::SizeClassMap<3, 4, 8, 63, 128, 16> SpecialSizeClassMap; +typedef __sanitizer::SizeClassMap<3, 4, 8, 38, 128, 16> SpecialSizeClassMap; template <typename AddressSpaceViewTy = LocalAddressSpaceView> struct AP64_SpecialSizeClassMap { static const uptr kSpaceBeg = kAllocatorSpace; diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/sanitizer_linux_test.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/sanitizer_linux_test.cpp index 1d8e7e8af26..cb6c0724ac8 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/sanitizer_linux_test.cpp +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/sanitizer_linux_test.cpp @@ -264,7 +264,7 @@ TEST(SanitizerCommon, StartSubprocessTest) { const char *shell = "/bin/sh"; #endif const char *argv[] = {shell, "-c", "echo -n 'hello'", (char *)NULL}; - int pid = StartSubprocess(shell, argv, + int pid = StartSubprocess(shell, argv, GetEnviron(), /* stdin */ kInvalidFd, /* stdout */ pipe_fds[1]); ASSERT_GT(pid, 0); diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/sanitizer_mac_test.cpp b/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/sanitizer_mac_test.cpp new file mode 100644 index 00000000000..c8658ea55d0 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/sanitizer_mac_test.cpp @@ -0,0 +1,62 @@ +//===-- sanitizer_mac_test.cpp --------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Tests for sanitizer_mac.{h,cpp} +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC + +#include "sanitizer_common/sanitizer_mac.h" + +#include "gtest/gtest.h" + +#include <sys/sysctl.h> // sysctlbyname +#include <mach/kern_return.h> // KERN_SUCCESS + +namespace __sanitizer { + +TEST(SanitizerMac, GetMacosAlignedVersion) { + MacosVersion vers = GetMacosAlignedVersion(); + u16 kernel_major = GetDarwinKernelVersion().major; + bool macos_11 = (kernel_major >= 20); + u16 expected_major = macos_11 ? (kernel_major - 9) : 10; + u16 expected_minor = macos_11 ? 0 : (kernel_major - 4); + EXPECT_EQ(vers.major, expected_major); + EXPECT_EQ(vers.minor, expected_minor); +} + +void ParseVersion(const char *vers, u16 *major, u16 *minor); + +TEST(SanitizerMac, ParseVersion) { + u16 major, minor; + ParseVersion("11.22.33", &major, &minor); + EXPECT_EQ(major, 11); + EXPECT_EQ(minor, 22); +} + +TEST(SanitizerMac, GetDarwinKernelVersion) { + DarwinKernelVersion vers = GetDarwinKernelVersion(); + std::ostringstream oss; + oss << vers.major << '.' << vers.minor; + std::string actual = oss.str(); + + char buf[100]; + size_t len = sizeof(buf); + int res = sysctlbyname("kern.osrelease", buf, &len, nullptr, 0); + ASSERT_EQ(res, KERN_SUCCESS); + std::string expected(buf); + + // Prefix match + ASSERT_EQ(expected.compare(0, actual.size(), actual), 0); +} + +} // namespace __sanitizer + +#endif // SANITIZER_MAC diff --git a/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/sanitizer_pthread_wrappers.h b/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/sanitizer_pthread_wrappers.h index f806ee1ea4f..5c8d3c27dc9 100644 --- a/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/sanitizer_pthread_wrappers.h +++ b/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/sanitizer_pthread_wrappers.h @@ -35,9 +35,9 @@ struct PthreadHelperCreateThreadInfo { inline DWORD WINAPI PthreadHelperThreadProc(void *arg) { PthreadHelperCreateThreadInfo *start_data = reinterpret_cast<PthreadHelperCreateThreadInfo*>(arg); - void *ret = (start_data->start_routine)(start_data->arg); + (start_data->start_routine)(start_data->arg); delete start_data; - return (DWORD)ret; + return 0; } inline void PTHREAD_CREATE(pthread_t *thread, void *attr, @@ -60,7 +60,7 @@ inline void PTHREAD_JOIN(pthread_t thread, void **value_ptr) { inline void pthread_exit(void *retval) { ASSERT_EQ(0, retval) << "Nonzero retval is not supported yet."; - ExitThread((DWORD)retval); + ExitThread(0); } #endif // _WIN32 diff --git a/gnu/llvm/compiler-rt/lib/scudo/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/scudo/CMakeLists.txt index 2a560b8fcb7..c50ea0233fa 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/lib/scudo/CMakeLists.txt @@ -38,7 +38,8 @@ if (COMPILER_RT_HAS_GWP_ASAN) # parsing mechanism of sanitizer_common. Once Scudo has its own flag parsing, # and parses GwpAsan options, you can remove this dependency. list(APPEND SCUDO_MINIMAL_OBJECT_LIBS RTGwpAsan RTGwpAsanOptionsParser - RTGwpAsanBacktraceLibc) + RTGwpAsanBacktraceLibc + RTGwpAsanSegvHandler) list(APPEND SCUDO_CFLAGS -DGWP_ASAN_HOOKS) endif() diff --git a/gnu/llvm/compiler-rt/lib/scudo/scudo_allocator.cpp b/gnu/llvm/compiler-rt/lib/scudo/scudo_allocator.cpp index b2ebc970593..d9023c2f7ab 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/scudo_allocator.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/scudo_allocator.cpp @@ -674,8 +674,12 @@ void initScudo() { gwp_asan::options::initOptions(); gwp_asan::options::Options &Opts = gwp_asan::options::getOptions(); Opts.Backtrace = gwp_asan::options::getBacktraceFunction(); - Opts.PrintBacktrace = gwp_asan::options::getPrintBacktraceFunction(); GuardedAlloc.init(Opts); + + if (Opts.InstallSignalHandlers) + gwp_asan::crash_handler::installSignalHandlers( + &GuardedAlloc, __sanitizer::Printf, + gwp_asan::options::getPrintBacktraceFunction(), Opts.Backtrace); #endif // GWP_ASAN_HOOKS } diff --git a/gnu/llvm/compiler-rt/lib/scudo/scudo_utils.cpp b/gnu/llvm/compiler-rt/lib/scudo/scudo_utils.cpp index 5e76a4a30f1..f31d68058ac 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/scudo_utils.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/scudo_utils.cpp @@ -62,6 +62,14 @@ FORMAT(1, 2) void NORETURN dieWithMessage(const char *Format, ...) { # ifndef bit_SSE4_2 # define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines. # endif + +#ifndef signature_HYGON_ebx // They are not defined in gcc. +// HYGON: "HygonGenuine". +#define signature_HYGON_ebx 0x6f677948 +#define signature_HYGON_edx 0x6e65476e +#define signature_HYGON_ecx 0x656e6975 +#endif + bool hasHardwareCRC32() { u32 Eax, Ebx, Ecx, Edx; __get_cpuid(0, &Eax, &Ebx, &Ecx, &Edx); @@ -71,7 +79,10 @@ bool hasHardwareCRC32() { const bool IsAMD = (Ebx == signature_AMD_ebx) && (Edx == signature_AMD_edx) && (Ecx == signature_AMD_ecx); - if (!IsIntel && !IsAMD) + const bool IsHygon = (Ebx == signature_HYGON_ebx) && + (Edx == signature_HYGON_edx) && + (Ecx == signature_HYGON_ecx); + if (!IsIntel && !IsAMD && !IsHygon) return false; __get_cpuid(1, &Eax, &Ebx, &Ecx, &Edx); return !!(Ecx & bit_SSE4_2); diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/scudo/standalone/CMakeLists.txt index a7249d1854a..bdaeb569efd 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/CMakeLists.txt @@ -1,10 +1,9 @@ add_compiler_rt_component(scudo_standalone) -# FIXME: GWP-ASan is temporarily disabled, re-enable once issues are fixed. -if (FALSE AND COMPILER_RT_HAS_GWP_ASAN) +if (COMPILER_RT_HAS_GWP_ASAN) add_dependencies(scudo_standalone gwp_asan) endif() -include_directories(../..) +include_directories(../.. include) set(SCUDO_CFLAGS) @@ -20,6 +19,13 @@ append_list_if(COMPILER_RT_HAS_FFREESTANDING_FLAG -ffreestanding SCUDO_CFLAGS) append_list_if(COMPILER_RT_HAS_FVISIBILITY_HIDDEN_FLAG -fvisibility=hidden SCUDO_CFLAGS) +if (COMPILER_RT_HAS_GWP_ASAN) + append_list_if(COMPILER_RT_HAS_OMIT_FRAME_POINTER_FLAG -fno-omit-frame-pointer + SCUDO_CFLAGS) + append_list_if(COMPILER_RT_HAS_OMIT_FRAME_POINTER_FLAG + -mno-omit-leaf-frame-pointer SCUDO_CFLAGS) +endif() # COMPILER_RT_HAS_GWP_ASAN + if(COMPILER_RT_DEBUG) list(APPEND SCUDO_CFLAGS -O0) else() @@ -50,7 +56,6 @@ set(SCUDO_HEADERS flags.h flags_parser.h fuchsia.h - interface.h internal_defs.h linux.h list.h @@ -72,6 +77,8 @@ set(SCUDO_HEADERS vector.h wrappers_c_checks.h wrappers_c.h + + include/scudo/interface.h ) set(SCUDO_SOURCES @@ -82,6 +89,7 @@ set(SCUDO_SOURCES flags_parser.cpp fuchsia.cpp linux.cpp + release.cpp report.cpp string_utils.cpp ) @@ -107,8 +115,9 @@ set(SCUDO_SOURCES_CXX_WRAPPERS set(SCUDO_OBJECT_LIBS) -if (FALSE AND COMPILER_RT_HAS_GWP_ASAN) - list(APPEND SCUDO_OBJECT_LIBS RTGwpAsan) +if (COMPILER_RT_HAS_GWP_ASAN) + list(APPEND SCUDO_OBJECT_LIBS + RTGwpAsan RTGwpAsanBacktraceLibc RTGwpAsanSegvHandler) list(APPEND SCUDO_CFLAGS -DGWP_ASAN_HOOKS) endif() diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/allocator_config.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/allocator_config.h index 3a5aaae7367..ad2a17ef701 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/allocator_config.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/allocator_config.h @@ -32,20 +32,23 @@ struct DefaultConfig { // 512KB regions typedef SizeClassAllocator32<SizeClassMap, 19U> Primary; #endif - typedef MapAllocator<> Secondary; + typedef MapAllocator<MapAllocatorCache<>> Secondary; template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive }; struct AndroidConfig { using SizeClassMap = AndroidSizeClassMap; #if SCUDO_CAN_USE_PRIMARY64 - // 1GB regions - typedef SizeClassAllocator64<SizeClassMap, 30U> Primary; + // 256MB regions + typedef SizeClassAllocator64<SizeClassMap, 28U, 1000, 1000, + /*MaySupportMemoryTagging=*/true> + Primary; #else - // 512KB regions - typedef SizeClassAllocator32<SizeClassMap, 19U> Primary; + // 256KB regions + typedef SizeClassAllocator32<SizeClassMap, 18U, 1000, 1000> Primary; #endif - typedef MapAllocator<> Secondary; + // Cache blocks up to 2MB + typedef MapAllocator<MapAllocatorCache<32U, 2UL << 20, 0, 1000>> Secondary; template <class A> using TSDRegistryT = TSDRegistrySharedT<A, 2U>; // Shared, max 2 TSDs. }; @@ -53,13 +56,13 @@ struct AndroidConfig { struct AndroidSvelteConfig { using SizeClassMap = SvelteSizeClassMap; #if SCUDO_CAN_USE_PRIMARY64 - // 512MB regions - typedef SizeClassAllocator64<SizeClassMap, 29U> Primary; + // 128MB regions + typedef SizeClassAllocator64<SizeClassMap, 27U, 1000, 1000> Primary; #else // 64KB regions - typedef SizeClassAllocator32<SizeClassMap, 16U> Primary; + typedef SizeClassAllocator32<SizeClassMap, 16U, 1000, 1000> Primary; #endif - typedef MapAllocator<0U> Secondary; + typedef MapAllocator<MapAllocatorCache<4U, 1UL << 18, 0, 0>> Secondary; template <class A> using TSDRegistryT = TSDRegistrySharedT<A, 1U>; // Shared, only 1 TSD. }; @@ -68,7 +71,7 @@ struct AndroidSvelteConfig { struct FuchsiaConfig { // 1GB Regions typedef SizeClassAllocator64<DefaultSizeClassMap, 30U> Primary; - typedef MapAllocator<0U> Secondary; + typedef MapAllocator<MapAllocatorNoCache> Secondary; template <class A> using TSDRegistryT = TSDRegistrySharedT<A, 8U>; // Shared, max 8 TSDs. }; diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/atomic_helpers.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/atomic_helpers.h index 6c84ba86ed3..1ea1a86ae50 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/atomic_helpers.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/atomic_helpers.h @@ -51,7 +51,7 @@ struct atomic_u32 { struct atomic_u64 { typedef u64 Type; // On 32-bit platforms u64 is not necessarily aligned on 8 bytes. - ALIGNED(8) volatile Type ValDoNotUse; + alignas(8) volatile Type ValDoNotUse; }; struct atomic_uptr { diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/benchmarks/malloc_benchmark.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/benchmarks/malloc_benchmark.cpp index 713820437b0..ce48dc02f7a 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/benchmarks/malloc_benchmark.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/benchmarks/malloc_benchmark.cpp @@ -69,12 +69,14 @@ static void BM_malloc_free_loop(benchmark::State &State) { void *Ptrs[NumIters]; for (auto _ : State) { + size_t SizeLog2 = 0; for (void *&Ptr : Ptrs) { - Ptr = Allocator->allocate(8192, scudo::Chunk::Origin::Malloc); + Ptr = Allocator->allocate(1 << SizeLog2, scudo::Chunk::Origin::Malloc); auto *Data = reinterpret_cast<uint8_t *>(Ptr); - for (size_t I = 0; I < 8192; I += PageSize) + for (size_t I = 0; I < 1 << SizeLog2; I += PageSize) Data[I] = 1; benchmark::DoNotOptimize(Ptr); + SizeLog2 = (SizeLog2 + 1) % 16; } for (void *&Ptr : Ptrs) Allocator->deallocate(Ptr, scudo::Chunk::Origin::Malloc); diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/bytemap.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/bytemap.h index a03a0c47106..e0d54f4e597 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/bytemap.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/bytemap.h @@ -17,12 +17,10 @@ namespace scudo { template <uptr Size> class FlatByteMap { public: - void initLinkerInitialized() { - Map = reinterpret_cast<u8 *>(map(nullptr, Size, "scudo:bytemap")); - } - void init() { initLinkerInitialized(); } + void initLinkerInitialized() {} + void init() { memset(Map, 0, sizeof(Map)); } - void unmapTestOnly() { unmap(reinterpret_cast<void *>(Map), Size); } + void unmapTestOnly() {} void set(uptr Index, u8 Value) { DCHECK_LT(Index, Size); @@ -38,78 +36,7 @@ public: void enable() {} private: - u8 *Map; -}; - -template <uptr Level1Size, uptr Level2Size> class TwoLevelByteMap { -public: - void initLinkerInitialized() { - Level1Map = reinterpret_cast<atomic_uptr *>( - map(nullptr, sizeof(atomic_uptr) * Level1Size, "scudo:bytemap")); - } - void init() { - Mutex.init(); - initLinkerInitialized(); - } - - void reset() { - for (uptr I = 0; I < Level1Size; I++) { - u8 *P = get(I); - if (!P) - continue; - unmap(P, Level2Size); - } - memset(Level1Map, 0, sizeof(atomic_uptr) * Level1Size); - } - - void unmapTestOnly() { - reset(); - unmap(reinterpret_cast<void *>(Level1Map), - sizeof(atomic_uptr) * Level1Size); - } - - uptr size() const { return Level1Size * Level2Size; } - - void set(uptr Index, u8 Value) { - DCHECK_LT(Index, Level1Size * Level2Size); - u8 *Level2Map = getOrCreate(Index / Level2Size); - DCHECK_EQ(0U, Level2Map[Index % Level2Size]); - Level2Map[Index % Level2Size] = Value; - } - - u8 operator[](uptr Index) const { - DCHECK_LT(Index, Level1Size * Level2Size); - u8 *Level2Map = get(Index / Level2Size); - if (!Level2Map) - return 0; - return Level2Map[Index % Level2Size]; - } - - void disable() { Mutex.lock(); } - void enable() { Mutex.unlock(); } - -private: - u8 *get(uptr Index) const { - DCHECK_LT(Index, Level1Size); - return reinterpret_cast<u8 *>( - atomic_load(&Level1Map[Index], memory_order_acquire)); - } - - u8 *getOrCreate(uptr Index) { - u8 *Res = get(Index); - if (!Res) { - ScopedLock L(Mutex); - if (!(Res = get(Index))) { - Res = reinterpret_cast<u8 *>(map(nullptr, Level2Size, "scudo:bytemap")); - atomic_store(&Level1Map[Index], reinterpret_cast<uptr>(Res), - memory_order_release); - } - } - return Res; - } - - atomic_uptr *Level1Map; - HybridMutex Mutex; + u8 Map[Size]; }; } // namespace scudo diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/checksum.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/checksum.cpp index 5de049a0931..05d4ba54bfc 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/checksum.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/checksum.cpp @@ -31,6 +31,13 @@ Checksum HashAlgorithm = {Checksum::BSD}; #define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines. #endif +#ifndef signature_HYGON_ebx // They are not defined in gcc. +// HYGON: "HygonGenuine". +#define signature_HYGON_ebx 0x6f677948 +#define signature_HYGON_edx 0x6e65476e +#define signature_HYGON_ecx 0x656e6975 +#endif + bool hasHardwareCRC32() { u32 Eax, Ebx = 0, Ecx = 0, Edx = 0; __get_cpuid(0, &Eax, &Ebx, &Ecx, &Edx); @@ -39,7 +46,10 @@ bool hasHardwareCRC32() { (Ecx == signature_INTEL_ecx); const bool IsAMD = (Ebx == signature_AMD_ebx) && (Edx == signature_AMD_edx) && (Ecx == signature_AMD_ecx); - if (!IsIntel && !IsAMD) + const bool IsHygon = (Ebx == signature_HYGON_ebx) && + (Edx == signature_HYGON_edx) && + (Ecx == signature_HYGON_ecx); + if (!IsIntel && !IsAMD && !IsHygon) return false; __get_cpuid(1, &Eax, &Ebx, &Ecx, &Edx); return !!(Ecx & bit_SSE4_2); diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/combined.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/combined.h index a0b4b2973e9..3bb41eca88f 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/combined.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/combined.h @@ -13,28 +13,36 @@ #include "common.h" #include "flags.h" #include "flags_parser.h" -#include "interface.h" #include "local_cache.h" +#include "memtag.h" #include "quarantine.h" #include "report.h" #include "secondary.h" +#include "stack_depot.h" #include "string_utils.h" #include "tsd.h" +#include "scudo/interface.h" + #ifdef GWP_ASAN_HOOKS #include "gwp_asan/guarded_pool_allocator.h" -// GWP-ASan is declared here in order to avoid indirect call overhead. It's also -// instantiated outside of the Allocator class, as the allocator is only -// zero-initialised. GWP-ASan requires constant initialisation, and the Scudo -// allocator doesn't have a constexpr constructor (see discussion here: -// https://reviews.llvm.org/D69265#inline-624315). -static gwp_asan::GuardedPoolAllocator GuardedAlloc; +#include "gwp_asan/optional/backtrace.h" +#include "gwp_asan/optional/segv_handler.h" #endif // GWP_ASAN_HOOKS extern "C" inline void EmptyCallback() {} +#ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE +// This function is not part of the NDK so it does not appear in any public +// header files. We only declare/use it when targeting the platform. +extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf, + size_t num_entries); +#endif + namespace scudo { +enum class Option { ReleaseInterval }; + template <class Params, void (*PostInitCallback)(void) = EmptyCallback> class Allocator { public: @@ -139,20 +147,29 @@ public: // Store some flags locally. Options.MayReturnNull = getFlags()->may_return_null; - Options.ZeroContents = getFlags()->zero_contents; + Options.FillContents = + getFlags()->zero_contents + ? ZeroFill + : (getFlags()->pattern_fill_contents ? PatternOrZeroFill : NoFill); Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch; Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch; + Options.TrackAllocationStacks = false; Options.QuarantineMaxChunkSize = static_cast<u32>(getFlags()->quarantine_max_chunk_size); Stats.initLinkerInitialized(); - Primary.initLinkerInitialized(getFlags()->release_to_os_interval_ms); - Secondary.initLinkerInitialized(&Stats); + const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms; + Primary.initLinkerInitialized(ReleaseToOsIntervalMs); + Secondary.initLinkerInitialized(&Stats, ReleaseToOsIntervalMs); Quarantine.init( static_cast<uptr>(getFlags()->quarantine_size_kb << 10), static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10)); + } + // Initialize the embedded GWP-ASan instance. Requires the main allocator to + // be functional, best called from PostInitCallback. + void initGwpAsan() { #ifdef GWP_ASAN_HOOKS gwp_asan::options::Options Opt; Opt.Enabled = getFlags()->GWP_ASAN_Enabled; @@ -165,8 +182,17 @@ public: getFlags()->GWP_ASAN_MaxSimultaneousAllocations; Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate; Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers; - Opt.Printf = Printf; + // Embedded GWP-ASan is locked through the Scudo atfork handler (via + // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork + // handler. + Opt.InstallForkHandlers = false; + Opt.Backtrace = gwp_asan::options::getBacktraceFunction(); GuardedAlloc.init(Opt); + + if (Opt.InstallSignalHandlers) + gwp_asan::crash_handler::installSignalHandlers( + &GuardedAlloc, Printf, gwp_asan::options::getPrintBacktraceFunction(), + Opt.Backtrace); #endif // GWP_ASAN_HOOKS } @@ -175,6 +201,11 @@ public: void unmapTestOnly() { TSDRegistry.unmapTestOnly(); Primary.unmapTestOnly(); +#ifdef GWP_ASAN_HOOKS + if (getFlags()->GWP_ASAN_InstallSignalHandlers) + gwp_asan::crash_handler::uninstallSignalHandlers(); + GuardedAlloc.uninitTestOnly(); +#endif // GWP_ASAN_HOOKS } TSDRegistryT *getTSDRegistry() { return &TSDRegistry; } @@ -195,6 +226,27 @@ public: TSD->Cache.destroy(&Stats); } + ALWAYS_INLINE void *untagPointerMaybe(void *Ptr) { + if (Primary.SupportsMemoryTagging) + return reinterpret_cast<void *>( + untagPointer(reinterpret_cast<uptr>(Ptr))); + return Ptr; + } + + NOINLINE u32 collectStackTrace() { +#ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE + // Discard collectStackTrace() frame and allocator function frame. + constexpr uptr DiscardFrames = 2; + uptr Stack[MaxTraceSize + DiscardFrames]; + uptr Size = + android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames); + Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames); + return Depot.insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size); +#else + return 0; +#endif + } + NOINLINE void *allocate(uptr Size, Chunk::Origin Origin, uptr Alignment = MinAlignment, bool ZeroContents = false) { @@ -207,7 +259,8 @@ public: } #endif // GWP_ASAN_HOOKS - ZeroContents |= static_cast<bool>(Options.ZeroContents); + FillContentsMode FillContents = + ZeroContents ? ZeroFill : Options.FillContents; if (UNLIKELY(Alignment > MaxAlignment)) { if (Options.MayReturnNull) @@ -235,22 +288,36 @@ public: } DCHECK_LE(Size, NeededSize); - void *Block; - uptr ClassId; - uptr BlockEnd; + void *Block = nullptr; + uptr ClassId = 0; + uptr SecondaryBlockEnd; if (LIKELY(PrimaryT::canAllocate(NeededSize))) { ClassId = SizeClassMap::getClassIdBySize(NeededSize); DCHECK_NE(ClassId, 0U); bool UnlockRequired; auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired); Block = TSD->Cache.allocate(ClassId); + // If the allocation failed, the most likely reason with a 32-bit primary + // is the region being full. In that event, retry in each successively + // larger class until it fits. If it fails to fit in the largest class, + // fallback to the Secondary. + if (UNLIKELY(!Block)) { + while (ClassId < SizeClassMap::LargestClassId) { + Block = TSD->Cache.allocate(++ClassId); + if (LIKELY(Block)) { + break; + } + } + if (UNLIKELY(!Block)) { + ClassId = 0; + } + } if (UnlockRequired) TSD->unlock(); - } else { - ClassId = 0; - Block = - Secondary.allocate(NeededSize, Alignment, &BlockEnd, ZeroContents); } + if (UNLIKELY(ClassId == 0)) + Block = Secondary.allocate(NeededSize, Alignment, &SecondaryBlockEnd, + FillContents); if (UNLIKELY(!Block)) { if (Options.MayReturnNull) @@ -258,16 +325,88 @@ public: reportOutOfMemory(NeededSize); } - // We only need to zero the contents for Primary backed allocations. This - // condition is not necessarily unlikely, but since memset is costly, we - // might as well mark it as such. - if (UNLIKELY(ZeroContents && ClassId)) - memset(Block, 0, PrimaryT::getSizeByClassId(ClassId)); - - const uptr UnalignedUserPtr = - reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize(); + const uptr BlockUptr = reinterpret_cast<uptr>(Block); + const uptr UnalignedUserPtr = BlockUptr + Chunk::getHeaderSize(); const uptr UserPtr = roundUpTo(UnalignedUserPtr, Alignment); + void *Ptr = reinterpret_cast<void *>(UserPtr); + void *TaggedPtr = Ptr; + if (ClassId) { + // We only need to zero or tag the contents for Primary backed + // allocations. We only set tags for primary allocations in order to avoid + // faulting potentially large numbers of pages for large secondary + // allocations. We assume that guard pages are enough to protect these + // allocations. + // + // FIXME: When the kernel provides a way to set the background tag of a + // mapping, we should be able to tag secondary allocations as well. + // + // When memory tagging is enabled, zeroing the contents is done as part of + // setting the tag. + if (UNLIKELY(useMemoryTagging())) { + uptr PrevUserPtr; + Chunk::UnpackedHeader Header; + const uptr BlockEnd = BlockUptr + PrimaryT::getSizeByClassId(ClassId); + // If possible, try to reuse the UAF tag that was set by deallocate(). + // For simplicity, only reuse tags if we have the same start address as + // the previous allocation. This handles the majority of cases since + // most allocations will not be more aligned than the minimum alignment. + // + // We need to handle situations involving reclaimed chunks, and retag + // the reclaimed portions if necessary. In the case where the chunk is + // fully reclaimed, the chunk's header will be zero, which will trigger + // the code path for new mappings and invalid chunks that prepares the + // chunk from scratch. There are three possibilities for partial + // reclaiming: + // + // (1) Header was reclaimed, data was partially reclaimed. + // (2) Header was not reclaimed, all data was reclaimed (e.g. because + // data started on a page boundary). + // (3) Header was not reclaimed, data was partially reclaimed. + // + // Case (1) will be handled in the same way as for full reclaiming, + // since the header will be zero. + // + // We can detect case (2) by loading the tag from the start + // of the chunk. If it is zero, it means that either all data was + // reclaimed (since we never use zero as the chunk tag), or that the + // previous allocation was of size zero. Either way, we need to prepare + // a new chunk from scratch. + // + // We can detect case (3) by moving to the next page (if covered by the + // chunk) and loading the tag of its first granule. If it is zero, it + // means that all following pages may need to be retagged. On the other + // hand, if it is nonzero, we can assume that all following pages are + // still tagged, according to the logic that if any of the pages + // following the next page were reclaimed, the next page would have been + // reclaimed as well. + uptr TaggedUserPtr; + if (getChunkFromBlock(BlockUptr, &PrevUserPtr, &Header) && + PrevUserPtr == UserPtr && + (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) { + uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes; + const uptr NextPage = roundUpTo(TaggedUserPtr, getPageSizeCached()); + if (NextPage < PrevEnd && loadTag(NextPage) != NextPage) + PrevEnd = NextPage; + TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr); + resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, BlockEnd); + if (Size) { + // Clear any stack metadata that may have previously been stored in + // the chunk data. + memset(TaggedPtr, 0, archMemoryTagGranuleSize()); + } + } else { + TaggedPtr = prepareTaggedChunk(Ptr, Size, BlockEnd); + } + storeAllocationStackMaybe(Ptr); + } else if (UNLIKELY(FillContents != NoFill)) { + // This condition is not necessarily unlikely, but since memset is + // costly, we might as well mark it as such. + memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte, + PrimaryT::getSizeByClassId(ClassId)); + } + } + Chunk::UnpackedHeader Header = {}; if (UNLIKELY(UnalignedUserPtr != UserPtr)) { const uptr Offset = UserPtr - UnalignedUserPtr; @@ -283,15 +422,15 @@ public: Header.ClassId = ClassId & Chunk::ClassIdMask; Header.State = Chunk::State::Allocated; Header.Origin = Origin & Chunk::OriginMask; - Header.SizeOrUnusedBytes = (ClassId ? Size : BlockEnd - (UserPtr + Size)) & - Chunk::SizeOrUnusedBytesMask; - void *Ptr = reinterpret_cast<void *>(UserPtr); + Header.SizeOrUnusedBytes = + (ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size)) & + Chunk::SizeOrUnusedBytesMask; Chunk::storeHeader(Cookie, Ptr, &Header); if (&__scudo_allocate_hook) - __scudo_allocate_hook(Ptr, Size); + __scudo_allocate_hook(TaggedPtr, Size); - return Ptr; + return TaggedPtr; } NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0, @@ -319,6 +458,8 @@ public: if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))) reportMisalignedPointer(AllocatorAction::Deallocating, Ptr); + Ptr = untagPointerMaybe(Ptr); + Chunk::UnpackedHeader Header; Chunk::loadHeader(Cookie, Ptr, &Header); @@ -346,6 +487,15 @@ public: void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) { initThreadMaybe(); + if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) { + if (Options.MayReturnNull) + return nullptr; + reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize); + } + + void *OldTaggedPtr = OldPtr; + OldPtr = untagPointerMaybe(OldPtr); + // The following cases are handled by the C wrappers. DCHECK_NE(OldPtr, nullptr); DCHECK_NE(NewSize, 0); @@ -396,16 +546,20 @@ public: // reasonable delta), we just keep the old block, and update the chunk // header to reflect the size change. if (reinterpret_cast<uptr>(OldPtr) + NewSize <= BlockEnd) { - const uptr Delta = - OldSize < NewSize ? NewSize - OldSize : OldSize - NewSize; - if (Delta <= SizeClassMap::MaxSize / 2) { + if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) { Chunk::UnpackedHeader NewHeader = OldHeader; NewHeader.SizeOrUnusedBytes = (ClassId ? NewSize : BlockEnd - (reinterpret_cast<uptr>(OldPtr) + NewSize)) & Chunk::SizeOrUnusedBytesMask; Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader); - return OldPtr; + if (UNLIKELY(ClassId && useMemoryTagging())) { + resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize, + reinterpret_cast<uptr>(OldTaggedPtr) + NewSize, + BlockEnd); + storeAllocationStackMaybe(OldPtr); + } + return OldTaggedPtr; } } @@ -416,7 +570,7 @@ public: void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment); if (NewPtr) { const uptr OldSize = getSize(OldPtr, &OldHeader); - memcpy(NewPtr, OldPtr, Min(NewSize, OldSize)); + memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize)); quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize); } return NewPtr; @@ -427,6 +581,9 @@ public: // this function finishes. We will revisit that later. void disable() { initThreadMaybe(); +#ifdef GWP_ASAN_HOOKS + GuardedAlloc.disable(); +#endif TSDRegistry.disable(); Stats.disable(); Quarantine.disable(); @@ -441,6 +598,9 @@ public: Quarantine.enable(); Stats.enable(); TSDRegistry.enable(); +#ifdef GWP_ASAN_HOOKS + GuardedAlloc.enable(); +#endif } // The function returns the amount of bytes required to store the statistics, @@ -473,6 +633,7 @@ public: void releaseToOS() { initThreadMaybe(); Primary.releaseToOS(); + Secondary.releaseToOS(); } // Iterate over all chunks and call a callback for all busy chunks located @@ -489,11 +650,19 @@ public: uptr Chunk; Chunk::UnpackedHeader Header; if (getChunkFromBlock(Block, &Chunk, &Header) && - Header.State == Chunk::State::Allocated) - Callback(Chunk, getSize(reinterpret_cast<void *>(Chunk), &Header), Arg); + Header.State == Chunk::State::Allocated) { + uptr TaggedChunk = Chunk; + if (useMemoryTagging()) + TaggedChunk = loadTag(Chunk); + Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header), + Arg); + } }; Primary.iterateOverBlocks(Lambda); Secondary.iterateOverBlocks(Lambda); +#ifdef GWP_ASAN_HOOKS + GuardedAlloc.iterate(reinterpret_cast<void *>(Base), Size, Callback, Arg); +#endif } bool canReturnNull() { @@ -501,8 +670,14 @@ public: return Options.MayReturnNull; } - // TODO(kostyak): implement this as a "backend" to mallopt. - bool setOption(UNUSED uptr Option, UNUSED uptr Value) { return false; } + bool setOption(Option O, sptr Value) { + if (O == Option::ReleaseInterval) { + Primary.setReleaseToOsIntervalMs(static_cast<s32>(Value)); + Secondary.setReleaseToOsIntervalMs(static_cast<s32>(Value)); + return true; + } + return false; + } // Return the usable size for a given chunk. Technically we lie, as we just // report the actual size of a chunk. This is done to counteract code actively @@ -519,6 +694,7 @@ public: return GuardedAlloc.getSize(Ptr); #endif // GWP_ASAN_HOOKS + Ptr = untagPointerMaybe(const_cast<void *>(Ptr)); Chunk::UnpackedHeader Header; Chunk::loadHeader(Cookie, Ptr, &Header); // Getting the usable size of a chunk only makes sense if it's allocated. @@ -543,11 +719,151 @@ public: #endif // GWP_ASAN_HOOKS if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)) return false; + Ptr = untagPointerMaybe(const_cast<void *>(Ptr)); Chunk::UnpackedHeader Header; return Chunk::isValid(Cookie, Ptr, &Header) && Header.State == Chunk::State::Allocated; } + bool useMemoryTagging() { return Primary.useMemoryTagging(); } + + void disableMemoryTagging() { Primary.disableMemoryTagging(); } + + void setTrackAllocationStacks(bool Track) { + initThreadMaybe(); + Options.TrackAllocationStacks = Track; + } + + void setFillContents(FillContentsMode FillContents) { + initThreadMaybe(); + Options.FillContents = FillContents; + } + + const char *getStackDepotAddress() const { + return reinterpret_cast<const char *>(&Depot); + } + + const char *getRegionInfoArrayAddress() const { + return Primary.getRegionInfoArrayAddress(); + } + + static uptr getRegionInfoArraySize() { + return PrimaryT::getRegionInfoArraySize(); + } + + static void getErrorInfo(struct scudo_error_info *ErrorInfo, + uintptr_t FaultAddr, const char *DepotPtr, + const char *RegionInfoPtr, const char *Memory, + const char *MemoryTags, uintptr_t MemoryAddr, + size_t MemorySize) { + *ErrorInfo = {}; + if (!PrimaryT::SupportsMemoryTagging || + MemoryAddr + MemorySize < MemoryAddr) + return; + + uptr UntaggedFaultAddr = untagPointer(FaultAddr); + u8 FaultAddrTag = extractTag(FaultAddr); + BlockInfo Info = + PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr); + + auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool { + if (Addr < MemoryAddr || + Addr + archMemoryTagGranuleSize() < Addr || + Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize) + return false; + *Data = &Memory[Addr - MemoryAddr]; + *Tag = static_cast<u8>( + MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]); + return true; + }; + + auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr, + Chunk::UnpackedHeader *Header, const u32 **Data, + u8 *Tag) { + const char *BlockBegin; + u8 BlockBeginTag; + if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag)) + return false; + uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin); + *ChunkAddr = Addr + ChunkOffset; + + const char *ChunkBegin; + if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag)) + return false; + *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>( + ChunkBegin - Chunk::getHeaderSize()); + *Data = reinterpret_cast<const u32 *>(ChunkBegin); + return true; + }; + + auto *Depot = reinterpret_cast<const StackDepot *>(DepotPtr); + + auto MaybeCollectTrace = [&](uintptr_t(&Trace)[MaxTraceSize], u32 Hash) { + uptr RingPos, Size; + if (!Depot->find(Hash, &RingPos, &Size)) + return; + for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I) + Trace[I] = (*Depot)[RingPos + I]; + }; + + size_t NextErrorReport = 0; + + // First, check for UAF. + { + uptr ChunkAddr; + Chunk::UnpackedHeader Header; + const u32 *Data; + uint8_t Tag; + if (ReadBlock(Info.BlockBegin, &ChunkAddr, &Header, &Data, &Tag) && + Header.State != Chunk::State::Allocated && + Data[MemTagPrevTagIndex] == FaultAddrTag) { + auto *R = &ErrorInfo->reports[NextErrorReport++]; + R->error_type = USE_AFTER_FREE; + R->allocation_address = ChunkAddr; + R->allocation_size = Header.SizeOrUnusedBytes; + MaybeCollectTrace(R->allocation_trace, + Data[MemTagAllocationTraceIndex]); + R->allocation_tid = Data[MemTagAllocationTidIndex]; + MaybeCollectTrace(R->deallocation_trace, + Data[MemTagDeallocationTraceIndex]); + R->deallocation_tid = Data[MemTagDeallocationTidIndex]; + } + } + + auto CheckOOB = [&](uptr BlockAddr) { + if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd) + return false; + + uptr ChunkAddr; + Chunk::UnpackedHeader Header; + const u32 *Data; + uint8_t Tag; + if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) || + Header.State != Chunk::State::Allocated || Tag != FaultAddrTag) + return false; + + auto *R = &ErrorInfo->reports[NextErrorReport++]; + R->error_type = + UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW; + R->allocation_address = ChunkAddr; + R->allocation_size = Header.SizeOrUnusedBytes; + MaybeCollectTrace(R->allocation_trace, Data[MemTagAllocationTraceIndex]); + R->allocation_tid = Data[MemTagAllocationTidIndex]; + return NextErrorReport == + sizeof(ErrorInfo->reports) / sizeof(ErrorInfo->reports[0]); + }; + + if (CheckOOB(Info.BlockBegin)) + return; + + // Check for OOB in the 30 surrounding blocks. Beyond that we are likely to + // hit false positives. + for (int I = 1; I != 16; ++I) + if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) || + CheckOOB(Info.BlockBegin - I * Info.BlockSize)) + return; + } + private: using SecondaryT = typename Params::Secondary; typedef typename PrimaryT::SizeClassMap SizeClassMap; @@ -561,9 +877,32 @@ private: static_assert(MinAlignment >= sizeof(Chunk::PackedHeader), "Minimal alignment must at least cover a chunk header."); + static_assert(!PrimaryT::SupportsMemoryTagging || + MinAlignment >= archMemoryTagGranuleSize(), + ""); static const u32 BlockMarker = 0x44554353U; + // These are indexes into an "array" of 32-bit values that store information + // inline with a chunk that is relevant to diagnosing memory tag faults, where + // 0 corresponds to the address of the user memory. This means that negative + // indexes may be used to store information about allocations, while positive + // indexes may only be used to store information about deallocations, because + // the user memory is in use until it has been deallocated. The smallest index + // that may be used is -2, which corresponds to 8 bytes before the user + // memory, because the chunk header size is 8 bytes and in allocators that + // support memory tagging the minimum alignment is at least the tag granule + // size (16 on aarch64), and the largest index that may be used is 3 because + // we are only guaranteed to have at least a granule's worth of space in the + // user memory. + static const sptr MemTagAllocationTraceIndex = -2; + static const sptr MemTagAllocationTidIndex = -1; + static const sptr MemTagDeallocationTraceIndex = 0; + static const sptr MemTagDeallocationTidIndex = 1; + static const sptr MemTagPrevTagIndex = 2; + + static const uptr MaxTraceSize = 64; + GlobalStats Stats; TSDRegistryT TSDRegistry; PrimaryT Primary; @@ -574,12 +913,19 @@ private: struct { u8 MayReturnNull : 1; // may_return_null - u8 ZeroContents : 1; // zero_contents + FillContentsMode FillContents : 2; // zero_contents, pattern_fill_contents u8 DeallocTypeMismatch : 1; // dealloc_type_mismatch u8 DeleteSizeMismatch : 1; // delete_size_mismatch + u8 TrackAllocationStacks : 1; u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size } Options; +#ifdef GWP_ASAN_HOOKS + gwp_asan::GuardedPoolAllocator GuardedAlloc; +#endif // GWP_ASAN_HOOKS + + StackDepot Depot; + // The following might get optimized out by the compiler. NOINLINE void performSanityChecks() { // Verify that the header offset field can hold the maximum offset. In the @@ -638,6 +984,14 @@ private: void quarantineOrDeallocateChunk(void *Ptr, Chunk::UnpackedHeader *Header, uptr Size) { Chunk::UnpackedHeader NewHeader = *Header; + if (UNLIKELY(NewHeader.ClassId && useMemoryTagging())) { + u8 PrevTag = extractTag(loadTag(reinterpret_cast<uptr>(Ptr))); + uptr TaggedBegin, TaggedEnd; + // Exclude the previous tag so that immediate use after free is detected + // 100% of the time. + setRandomTag(Ptr, Size, 1UL << PrevTag, &TaggedBegin, &TaggedEnd); + storeDeallocationStackMaybe(Ptr, PrevTag); + } // If the quarantine is disabled, the actual size of a chunk is 0 or larger // than the maximum allowed, we return a chunk directly to the backend. // Logical Or can be short-circuited, which introduces unnecessary @@ -672,13 +1026,39 @@ private: bool getChunkFromBlock(uptr Block, uptr *Chunk, Chunk::UnpackedHeader *Header) { - u32 Offset = 0; - if (reinterpret_cast<u32 *>(Block)[0] == BlockMarker) - Offset = reinterpret_cast<u32 *>(Block)[1]; - *Chunk = Block + Offset + Chunk::getHeaderSize(); + *Chunk = + Block + getChunkOffsetFromBlock(reinterpret_cast<const char *>(Block)); return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header); } + static uptr getChunkOffsetFromBlock(const char *Block) { + u32 Offset = 0; + if (reinterpret_cast<const u32 *>(Block)[0] == BlockMarker) + Offset = reinterpret_cast<const u32 *>(Block)[1]; + return Offset + Chunk::getHeaderSize(); + } + + void storeAllocationStackMaybe(void *Ptr) { + if (!UNLIKELY(Options.TrackAllocationStacks)) + return; + auto *Ptr32 = reinterpret_cast<u32 *>(Ptr); + Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(); + Ptr32[MemTagAllocationTidIndex] = getThreadID(); + } + + void storeDeallocationStackMaybe(void *Ptr, uint8_t PrevTag) { + if (!UNLIKELY(Options.TrackAllocationStacks)) + return; + + // Disable tag checks here so that we don't need to worry about zero sized + // allocations. + ScopedDisableMemoryTagChecks x; + auto *Ptr32 = reinterpret_cast<u32 *>(Ptr); + Ptr32[MemTagDeallocationTraceIndex] = collectStackTrace(); + Ptr32[MemTagDeallocationTidIndex] = getThreadID(); + Ptr32[MemTagPrevTagIndex] = PrevTag; + } + uptr getStats(ScopedString *Str) { Primary.getStats(Str); Secondary.getStats(Str); diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/common.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/common.h index a76eb6bbc16..9037f92b497 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/common.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/common.h @@ -126,12 +126,15 @@ inline uptr getPageSizeCached() { return getPageSizeSlow(); } +// Returns 0 if the number of CPUs could not be determined. u32 getNumberOfCPUs(); const char *getEnv(const char *Name); u64 getMonotonicTime(); +u32 getThreadID(); + // Our randomness gathering function is limited to 256 bytes to ensure we get // as many bytes as requested, and avoid interruptions (on Linux). constexpr uptr MaxRandomLength = 256U; @@ -142,6 +145,7 @@ bool getRandom(void *Buffer, uptr Length, bool Blocking = false); #define MAP_ALLOWNOMEM (1U << 0) #define MAP_NOACCESS (1U << 1) #define MAP_RESIZABLE (1U << 2) +#define MAP_MEMTAG (1U << 3) // Our platform memory mapping use is restricted to 3 scenarios: // - reserve memory at a random address (MAP_NOACCESS); @@ -171,6 +175,22 @@ void NORETURN dieOnMapUnmapError(bool OutOfMemory = false); void setAbortMessage(const char *Message); +struct BlockInfo { + uptr BlockBegin; + uptr BlockSize; + uptr RegionBegin; + uptr RegionEnd; +}; + +constexpr unsigned char PatternFillByte = 0xAB; + +enum FillContentsMode { + NoFill = 0, + ZeroFill = 1, + PatternOrZeroFill = 2 // Pattern fill unless the memory is known to be + // zero-initialized already. +}; + } // namespace scudo #endif // SCUDO_COMMON_H_ diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/flags.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/flags.cpp index dd9f050a2d2..de5153b288b 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/flags.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/flags.cpp @@ -9,7 +9,8 @@ #include "flags.h" #include "common.h" #include "flags_parser.h" -#include "interface.h" + +#include "scudo/interface.h" namespace scudo { diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/flags.inc b/gnu/llvm/compiler-rt/lib/scudo/standalone/flags.inc index 25b86e14fa9..b5cab473416 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/flags.inc +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/flags.inc @@ -34,6 +34,9 @@ SCUDO_FLAG(bool, delete_size_mismatch, true, SCUDO_FLAG(bool, zero_contents, false, "Zero chunk contents on allocation.") +SCUDO_FLAG(bool, pattern_fill_contents, false, + "Pattern fill chunk contents on allocation.") + SCUDO_FLAG(int, rss_limit_mb, -1, "Enforce an upper limit (in megabytes) to the process RSS. The " "allocator will terminate or return NULL when allocations are " @@ -45,6 +48,6 @@ SCUDO_FLAG(bool, may_return_null, true, "returning NULL in otherwise non-fatal error scenarios, eg: OOM, " "invalid allocation alignments, etc.") -SCUDO_FLAG(int, release_to_os_interval_ms, 5000, +SCUDO_FLAG(int, release_to_os_interval_ms, SCUDO_ANDROID ? INT32_MIN : 5000, "Interval (in milliseconds) at which to attempt release of unused " "memory to the OS. Negative values disable the feature.") diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/fuchsia.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/fuchsia.cpp index b3d72de158c..d4ea3327794 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/fuchsia.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/fuchsia.cpp @@ -170,6 +170,8 @@ u64 getMonotonicTime() { return _zx_clock_get_monotonic(); } u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); } +u32 getThreadID() { return 0; } + bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) { static_assert(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN, ""); if (UNLIKELY(!Buffer || !Length || Length > MaxRandomLength)) diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/fuzz/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/scudo/standalone/fuzz/CMakeLists.txt new file mode 100644 index 00000000000..d29c2f2fe74 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/fuzz/CMakeLists.txt @@ -0,0 +1,12 @@ +if (LLVM_USE_SANITIZE_COVERAGE) + add_executable(get_error_info_fuzzer + get_error_info_fuzzer.cpp) + set_target_properties( + get_error_info_fuzzer PROPERTIES FOLDER "Fuzzers") + target_compile_options( + get_error_info_fuzzer PRIVATE -fsanitize=fuzzer) + set_target_properties( + get_error_info_fuzzer PROPERTIES LINK_FLAGS -fsanitize=fuzzer) + target_include_directories( + get_error_info_fuzzer PRIVATE .. ../include) +endif() diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp new file mode 100644 index 00000000000..d29f515215e --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp @@ -0,0 +1,48 @@ +//===-- get_error_info_fuzzer.cpp -----------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#define SCUDO_FUZZ +#include "allocator_config.h" +#include "combined.h" + +#include <fuzzer/FuzzedDataProvider.h> + +#include <string> +#include <vector> + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) { + using AllocatorT = scudo::Allocator<scudo::AndroidConfig>; + FuzzedDataProvider FDP(Data, Size); + + uintptr_t FaultAddr = FDP.ConsumeIntegral<uintptr_t>(); + uintptr_t MemoryAddr = FDP.ConsumeIntegral<uintptr_t>(); + + std::string MemoryAndTags = FDP.ConsumeRandomLengthString(FDP.remaining_bytes()); + const char *Memory = MemoryAndTags.c_str(); + // Assume 16-byte alignment. + size_t MemorySize = (MemoryAndTags.length() / 17) * 16; + const char *MemoryTags = Memory + MemorySize; + + std::string StackDepotBytes = FDP.ConsumeRandomLengthString(FDP.remaining_bytes()); + std::vector<char> StackDepot(sizeof(scudo::StackDepot), 0); + for (size_t i = 0; i < StackDepotBytes.length() && i < StackDepot.size(); ++i) { + StackDepot[i] = StackDepotBytes[i]; + } + + std::string RegionInfoBytes = FDP.ConsumeRemainingBytesAsString(); + std::vector<char> RegionInfo(AllocatorT::getRegionInfoArraySize(), 0); + for (size_t i = 0; i < RegionInfoBytes.length() && i < RegionInfo.size(); ++i) { + RegionInfo[i] = RegionInfoBytes[i]; + } + + scudo_error_info ErrorInfo; + AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepot.data(), + RegionInfo.data(), Memory, MemoryTags, MemoryAddr, + MemorySize); + return 0; +} diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/include/scudo/interface.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/include/scudo/interface.h new file mode 100644 index 00000000000..d30fb6514a1 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/include/scudo/interface.h @@ -0,0 +1,110 @@ +//===-- scudo/interface.h ---------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_INTERFACE_H_ +#define SCUDO_INTERFACE_H_ + +#include <stddef.h> + +extern "C" { + +__attribute__((weak)) const char *__scudo_default_options(); + +// Post-allocation & pre-deallocation hooks. +// They must be thread-safe and not use heap related functions. +__attribute__((weak)) void __scudo_allocate_hook(void *ptr, size_t size); +__attribute__((weak)) void __scudo_deallocate_hook(void *ptr); + +void __scudo_print_stats(void); + +typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg); + +// Determine the likely cause of a tag check fault or other memory protection +// error on a system with memory tagging support. The results are returned via +// the error_info data structure. Up to three possible causes are returned in +// the reports array, in decreasing order of probability. The remaining elements +// of reports are zero-initialized. +// +// This function may be called from a different process from the one that +// crashed. In this case, various data structures must be copied from the +// crashing process to the process that analyzes the crash. +// +// This interface is not guaranteed to be stable and may change at any time. +// Furthermore, the version of scudo in the crashing process must be the same as +// the version in the process that analyzes the crash. +// +// fault_addr is the fault address. On aarch64 this is available in the system +// register FAR_ELx, or far_context.far in an upcoming release of the Linux +// kernel. This address must include the pointer tag; note that the kernel +// strips the tag from the fields siginfo.si_addr and sigcontext.fault_address, +// so these addresses are not suitable to be passed as fault_addr. +// +// stack_depot is a pointer to the stack depot data structure, which may be +// obtained by calling the function __scudo_get_stack_depot_addr() in the +// crashing process. The size of the stack depot is available by calling the +// function __scudo_get_stack_depot_size(). +// +// region_info is a pointer to the region info data structure, which may be +// obtained by calling the function __scudo_get_region_info_addr() in the +// crashing process. The size of the region info is available by calling the +// function __scudo_get_region_info_size(). +// +// memory is a pointer to a region of memory surrounding the fault address. +// The more memory available via this pointer, the more likely it is that the +// function will be able to analyze a crash correctly. It is recommended to +// provide an amount of memory equal to 16 * the primary allocator's largest +// size class either side of the fault address. +// +// memory_tags is a pointer to an array of memory tags for the memory accessed +// via memory. Each byte of this array corresponds to a region of memory of size +// equal to the architecturally defined memory tag granule size (16 on aarch64). +// +// memory_addr is the start address of memory in the crashing process's address +// space. +// +// memory_size is the size of the memory region referred to by the memory +// pointer. +void __scudo_get_error_info(struct scudo_error_info *error_info, + uintptr_t fault_addr, const char *stack_depot, + const char *region_info, const char *memory, + const char *memory_tags, uintptr_t memory_addr, + size_t memory_size); + +enum scudo_error_type { + UNKNOWN, + USE_AFTER_FREE, + BUFFER_OVERFLOW, + BUFFER_UNDERFLOW, +}; + +struct scudo_error_report { + enum scudo_error_type error_type; + + uintptr_t allocation_address; + uintptr_t allocation_size; + + uint32_t allocation_tid; + uintptr_t allocation_trace[64]; + + uint32_t deallocation_tid; + uintptr_t deallocation_trace[64]; +}; + +struct scudo_error_info { + struct scudo_error_report reports[3]; +}; + +const char *__scudo_get_stack_depot_addr(); +size_t __scudo_get_stack_depot_size(); + +const char *__scudo_get_region_info_addr(); +size_t __scudo_get_region_info_size(); + +} // extern "C" + +#endif // SCUDO_INTERFACE_H_ diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/internal_defs.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/internal_defs.h index 8f6a89ecba7..a884f1f3a40 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/internal_defs.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/internal_defs.h @@ -29,12 +29,10 @@ // Attributes & builtins related macros. #define INTERFACE __attribute__((visibility("default"))) +#define HIDDEN __attribute__((visibility("hidden"))) #define WEAK __attribute__((weak)) #define ALWAYS_INLINE inline __attribute__((always_inline)) #define ALIAS(X) __attribute__((alias(X))) -// Please only use the ALIGNED macro before the type. Using ALIGNED after the -// variable declaration is not portable. -#define ALIGNED(X) __attribute__((aligned(X))) #define FORMAT(F, A) __attribute__((format(printf, F, A))) #define NOINLINE __attribute__((noinline)) #define NORETURN __attribute__((noreturn)) diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/linux.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/linux.cpp index 8266a528f42..69ffdd9a165 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/linux.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/linux.cpp @@ -35,6 +35,10 @@ #define ANDROID_PR_SET_VMA_ANON_NAME 0 #endif +#ifdef ANDROID_EXPERIMENTAL_MTE +#include <bionic/mte_kernel.h> +#endif + namespace scudo { uptr getPageSize() { return static_cast<uptr>(sysconf(_SC_PAGESIZE)); } @@ -50,6 +54,10 @@ void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags, MmapProt = PROT_NONE; } else { MmapProt = PROT_READ | PROT_WRITE; +#if defined(__aarch64__) && defined(ANDROID_EXPERIMENTAL_MTE) + if (Flags & MAP_MEMTAG) + MmapProt |= PROT_MTE; +#endif } if (Addr) { // Currently no scenario for a noaccess mapping with a fixed address. @@ -124,10 +132,21 @@ u64 getMonotonicTime() { u32 getNumberOfCPUs() { cpu_set_t CPUs; - CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0); + // sched_getaffinity can fail for a variety of legitimate reasons (lack of + // CAP_SYS_NICE, syscall filtering, etc), in which case we shall return 0. + if (sched_getaffinity(0, sizeof(cpu_set_t), &CPUs) != 0) + return 0; return static_cast<u32>(CPU_COUNT(&CPUs)); } +u32 getThreadID() { +#if SCUDO_ANDROID + return static_cast<u32>(gettid()); +#else + return static_cast<u32>(syscall(SYS_gettid)); +#endif +} + // Blocking is possibly unused if the getrandom block is not compiled in. bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) { if (!Buffer || !Length || Length > MaxRandomLength) @@ -153,10 +172,34 @@ bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) { return (ReadBytes == static_cast<ssize_t>(Length)); } +// Allocation free syslog-like API. +extern "C" WEAK int async_safe_write_log(int pri, const char *tag, + const char *msg); + void outputRaw(const char *Buffer) { - static HybridMutex Mutex; - ScopedLock L(Mutex); - write(2, Buffer, strlen(Buffer)); + if (&async_safe_write_log) { + constexpr s32 AndroidLogInfo = 4; + constexpr uptr MaxLength = 1024U; + char LocalBuffer[MaxLength]; + while (strlen(Buffer) > MaxLength) { + uptr P; + for (P = MaxLength - 1; P > 0; P--) { + if (Buffer[P] == '\n') { + memcpy(LocalBuffer, Buffer, P); + LocalBuffer[P] = '\0'; + async_safe_write_log(AndroidLogInfo, "scudo", LocalBuffer); + Buffer = &Buffer[P + 1]; + break; + } + } + // If no newline was found, just log the buffer. + if (P == 0) + break; + } + async_safe_write_log(AndroidLogInfo, "scudo", Buffer); + } else { + write(2, Buffer, strlen(Buffer)); + } } extern "C" WEAK void android_set_abort_message(const char *); diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/local_cache.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/local_cache.h index b08abd3e5d9..a6425fc6d1e 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/local_cache.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/local_cache.h @@ -165,13 +165,14 @@ private: NOINLINE void drain(PerClass *C, uptr ClassId) { const u32 Count = Min(C->MaxCount / 2, C->Count); - const uptr FirstIndexToDrain = C->Count - Count; - TransferBatch *B = createBatch(ClassId, C->Chunks[FirstIndexToDrain]); + TransferBatch *B = createBatch(ClassId, C->Chunks[0]); if (UNLIKELY(!B)) reportOutOfMemory( SizeClassAllocator::getSizeByClassId(SizeClassMap::BatchClassId)); - B->setFromArray(&C->Chunks[FirstIndexToDrain], Count); + B->setFromArray(&C->Chunks[0], Count); C->Count -= Count; + for (uptr I = 0; I < C->Count; I++) + C->Chunks[I] = C->Chunks[I + Count]; Allocator->pushBatch(ClassId, B); } }; diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/memtag.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/memtag.h new file mode 100644 index 00000000000..6f347f4694e --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/memtag.h @@ -0,0 +1,261 @@ +//===-- memtag.h ------------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_MEMTAG_H_ +#define SCUDO_MEMTAG_H_ + +#include "internal_defs.h" + +#if SCUDO_LINUX +#include <sys/auxv.h> +#include <sys/prctl.h> +#if defined(ANDROID_EXPERIMENTAL_MTE) +#include <bionic/mte_kernel.h> +#endif +#endif + +namespace scudo { + +#if defined(__aarch64__) || defined(SCUDO_FUZZ) + +inline constexpr bool archSupportsMemoryTagging() { return true; } +inline constexpr uptr archMemoryTagGranuleSize() { return 16; } + +inline uptr untagPointer(uptr Ptr) { return Ptr & ((1ULL << 56) - 1); } + +inline uint8_t extractTag(uptr Ptr) { + return (Ptr >> 56) & 0xf; +} + +#else + +inline constexpr bool archSupportsMemoryTagging() { return false; } + +inline uptr archMemoryTagGranuleSize() { + UNREACHABLE("memory tagging not supported"); +} + +inline uptr untagPointer(uptr Ptr) { + (void)Ptr; + UNREACHABLE("memory tagging not supported"); +} + +inline uint8_t extractTag(uptr Ptr) { + (void)Ptr; + UNREACHABLE("memory tagging not supported"); +} + +#endif + +#if defined(__aarch64__) + +inline bool systemSupportsMemoryTagging() { +#if defined(ANDROID_EXPERIMENTAL_MTE) + return getauxval(AT_HWCAP2) & HWCAP2_MTE; +#else + return false; +#endif +} + +inline bool systemDetectsMemoryTagFaultsTestOnly() { +#if defined(ANDROID_EXPERIMENTAL_MTE) + return (prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) & PR_MTE_TCF_MASK) != + PR_MTE_TCF_NONE; +#else + return false; +#endif +} + +inline void disableMemoryTagChecksTestOnly() { + __asm__ __volatile__(".arch_extension mte; msr tco, #1"); +} + +inline void enableMemoryTagChecksTestOnly() { + __asm__ __volatile__(".arch_extension mte; msr tco, #0"); +} + +class ScopedDisableMemoryTagChecks { + size_t PrevTCO; + + public: + ScopedDisableMemoryTagChecks() { + __asm__ __volatile__(".arch_extension mte; mrs %0, tco; msr tco, #1" + : "=r"(PrevTCO)); + } + + ~ScopedDisableMemoryTagChecks() { + __asm__ __volatile__(".arch_extension mte; msr tco, %0" : : "r"(PrevTCO)); + } +}; + +inline void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask, + uptr *TaggedBegin, uptr *TaggedEnd) { + void *End; + __asm__ __volatile__( + R"( + .arch_extension mte + + // Set a random tag for Ptr in TaggedPtr. This needs to happen even if + // Size = 0 so that TaggedPtr ends up pointing at a valid address. + irg %[TaggedPtr], %[Ptr], %[ExcludeMask] + mov %[Cur], %[TaggedPtr] + + // Skip the loop if Size = 0. We don't want to do any tagging in this case. + cbz %[Size], 2f + + // Set the memory tag of the region + // [TaggedPtr, TaggedPtr + roundUpTo(Size, 16)) + // to the pointer tag stored in TaggedPtr. + add %[End], %[TaggedPtr], %[Size] + + 1: + stzg %[Cur], [%[Cur]], #16 + cmp %[Cur], %[End] + b.lt 1b + + 2: + )" + : + [TaggedPtr] "=&r"(*TaggedBegin), [Cur] "=&r"(*TaggedEnd), [End] "=&r"(End) + : [Ptr] "r"(Ptr), [Size] "r"(Size), [ExcludeMask] "r"(ExcludeMask) + : "memory"); +} + +inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr BlockEnd) { + // Prepare the granule before the chunk to store the chunk header by setting + // its tag to 0. Normally its tag will already be 0, but in the case where a + // chunk holding a low alignment allocation is reused for a higher alignment + // allocation, the chunk may already have a non-zero tag from the previous + // allocation. + __asm__ __volatile__(".arch_extension mte; stg %0, [%0, #-16]" + : + : "r"(Ptr) + : "memory"); + + uptr TaggedBegin, TaggedEnd; + setRandomTag(Ptr, Size, 0, &TaggedBegin, &TaggedEnd); + + // Finally, set the tag of the granule past the end of the allocation to 0, + // to catch linear overflows even if a previous larger allocation used the + // same block and tag. Only do this if the granule past the end is in our + // block, because this would otherwise lead to a SEGV if the allocation + // covers the entire block and our block is at the end of a mapping. The tag + // of the next block's header granule will be set to 0, so it will serve the + // purpose of catching linear overflows in this case. + uptr UntaggedEnd = untagPointer(TaggedEnd); + if (UntaggedEnd != BlockEnd) + __asm__ __volatile__(".arch_extension mte; stg %0, [%0]" + : + : "r"(UntaggedEnd) + : "memory"); + return reinterpret_cast<void *>(TaggedBegin); +} + +inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) { + uptr RoundOldPtr = roundUpTo(OldPtr, 16); + if (RoundOldPtr >= NewPtr) { + // If the allocation is shrinking we just need to set the tag past the end + // of the allocation to 0. See explanation in prepareTaggedChunk above. + uptr RoundNewPtr = untagPointer(roundUpTo(NewPtr, 16)); + if (RoundNewPtr != BlockEnd) + __asm__ __volatile__(".arch_extension mte; stg %0, [%0]" + : + : "r"(RoundNewPtr) + : "memory"); + return; + } + + __asm__ __volatile__(R"( + .arch_extension mte + + // Set the memory tag of the region + // [roundUpTo(OldPtr, 16), roundUpTo(NewPtr, 16)) + // to the pointer tag stored in OldPtr. + 1: + stzg %[Cur], [%[Cur]], #16 + cmp %[Cur], %[End] + b.lt 1b + + // Finally, set the tag of the granule past the end of the allocation to 0. + and %[Cur], %[Cur], #(1 << 56) - 1 + cmp %[Cur], %[BlockEnd] + b.eq 2f + stg %[Cur], [%[Cur]] + + 2: + )" + : [ Cur ] "+&r"(RoundOldPtr), [ End ] "+&r"(NewPtr) + : [ BlockEnd ] "r"(BlockEnd) + : "memory"); +} + +inline uptr loadTag(uptr Ptr) { + uptr TaggedPtr = Ptr; + __asm__ __volatile__(".arch_extension mte; ldg %0, [%0]" + : "+r"(TaggedPtr) + : + : "memory"); + return TaggedPtr; +} + +#else + +inline bool systemSupportsMemoryTagging() { + UNREACHABLE("memory tagging not supported"); +} + +inline bool systemDetectsMemoryTagFaultsTestOnly() { + UNREACHABLE("memory tagging not supported"); +} + +inline void disableMemoryTagChecksTestOnly() { + UNREACHABLE("memory tagging not supported"); +} + +inline void enableMemoryTagChecksTestOnly() { + UNREACHABLE("memory tagging not supported"); +} + +struct ScopedDisableMemoryTagChecks { + ScopedDisableMemoryTagChecks() {} +}; + +inline void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask, + uptr *TaggedBegin, uptr *TaggedEnd) { + (void)Ptr; + (void)Size; + (void)ExcludeMask; + (void)TaggedBegin; + (void)TaggedEnd; + UNREACHABLE("memory tagging not supported"); +} + +inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr BlockEnd) { + (void)Ptr; + (void)Size; + (void)BlockEnd; + UNREACHABLE("memory tagging not supported"); +} + +inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) { + (void)OldPtr; + (void)NewPtr; + (void)BlockEnd; + UNREACHABLE("memory tagging not supported"); +} + +inline uptr loadTag(uptr Ptr) { + (void)Ptr; + UNREACHABLE("memory tagging not supported"); +} + +#endif + +} // namespace scudo + +#endif diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/mutex.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/mutex.h index b26b2df0662..d6e6a5b33aa 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/mutex.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/mutex.h @@ -22,7 +22,7 @@ namespace scudo { class HybridMutex { public: - void init() { memset(this, 0, sizeof(*this)); } + void init() { M = {}; } bool tryLock(); NOINLINE void lock() { if (LIKELY(tryLock())) diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/primary32.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/primary32.h index e296a78778e..29a26809818 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/primary32.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/primary32.h @@ -38,14 +38,23 @@ namespace scudo { // Memory used by this allocator is never unmapped but can be partially // reclaimed if the platform allows for it. -template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 { +template <class SizeClassMapT, uptr RegionSizeLog, + s32 MinReleaseToOsIntervalMs = INT32_MIN, + s32 MaxReleaseToOsIntervalMs = INT32_MAX> +class SizeClassAllocator32 { public: typedef SizeClassMapT SizeClassMap; + // The bytemap can only track UINT8_MAX - 1 classes. + static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), ""); // Regions should be large enough to hold the largest Block. static_assert((1UL << RegionSizeLog) >= SizeClassMap::MaxSize, ""); - typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog> ThisT; + typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog, + MinReleaseToOsIntervalMs, + MaxReleaseToOsIntervalMs> + ThisT; typedef SizeClassAllocatorLocalCache<ThisT> CacheT; typedef typename CacheT::TransferBatch TransferBatch; + static const bool SupportsMemoryTagging = false; static uptr getSizeByClassId(uptr ClassId) { return (ClassId == SizeClassMap::BatchClassId) @@ -63,20 +72,21 @@ public: MinRegionIndex = NumRegions; // MaxRegionIndex is already initialized to 0. u32 Seed; + const u64 Time = getMonotonicTime(); if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))) - Seed = - static_cast<u32>(getMonotonicTime() ^ - (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6)); + Seed = static_cast<u32>( + Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6)); const uptr PageSize = getPageSizeCached(); for (uptr I = 0; I < NumClasses; I++) { SizeClassInfo *Sci = getSizeClassInfo(I); Sci->RandState = getRandomU32(&Seed); // See comment in the 64-bit primary about releasing smaller size classes. - Sci->CanRelease = (ReleaseToOsInterval >= 0) && - (I != SizeClassMap::BatchClassId) && + Sci->CanRelease = (I != SizeClassMap::BatchClassId) && (getSizeByClassId(I) >= (PageSize / 32)); + if (Sci->CanRelease) + Sci->ReleaseInfo.LastReleaseAtNs = Time; } - ReleaseToOsIntervalMs = ReleaseToOsInterval; + setReleaseToOsIntervalMs(ReleaseToOsInterval); } void init(s32 ReleaseToOsInterval) { memset(this, 0, sizeof(*this)); @@ -87,8 +97,7 @@ public: while (NumberOfStashedRegions > 0) unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]), RegionSize); - // TODO(kostyak): unmap the TransferBatch regions as well. - for (uptr I = 0; I < NumRegions; I++) + for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) if (PossibleRegions[I]) unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize); PossibleRegions.unmapTestOnly(); @@ -147,8 +156,9 @@ public: template <typename F> void iterateOverBlocks(F Callback) { for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) - if (PossibleRegions[I]) { - const uptr BlockSize = getSizeByClassId(PossibleRegions[I]); + if (PossibleRegions[I] && + (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) { + const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U); const uptr From = I * RegionSize; const uptr To = From + (RegionSize / BlockSize) * BlockSize; for (uptr Block = From; Block < To; Block += BlockSize) @@ -174,11 +184,18 @@ public: getStats(Str, I, 0); } + void setReleaseToOsIntervalMs(s32 Interval) { + if (Interval >= MaxReleaseToOsIntervalMs) { + Interval = MaxReleaseToOsIntervalMs; + } else if (Interval <= MinReleaseToOsIntervalMs) { + Interval = MinReleaseToOsIntervalMs; + } + atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed); + } + uptr releaseToOS() { uptr TotalReleasedBytes = 0; for (uptr I = 0; I < NumClasses; I++) { - if (I == SizeClassMap::BatchClassId) - continue; SizeClassInfo *Sci = getSizeClassInfo(I); ScopedLock L(Sci->Mutex); TotalReleasedBytes += releaseToOSMaybe(Sci, I, /*Force=*/true); @@ -186,15 +203,24 @@ public: return TotalReleasedBytes; } + bool useMemoryTagging() { return false; } + void disableMemoryTagging() {} + + const char *getRegionInfoArrayAddress() const { return nullptr; } + static uptr getRegionInfoArraySize() { return 0; } + + static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) { + (void)RegionInfoData; + (void)Ptr; + return {}; + } + private: static const uptr NumClasses = SizeClassMap::NumClasses; static const uptr RegionSize = 1UL << RegionSizeLog; static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >> RegionSizeLog; -#if SCUDO_WORDSIZE == 32U + static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U; typedef FlatByteMap<NumRegions> ByteMap; -#else - typedef TwoLevelByteMap<(NumRegions >> 12), 1UL << 12> ByteMap; -#endif struct SizeClassStats { uptr PoppedBlocks; @@ -208,9 +234,11 @@ private: u64 LastReleaseAtNs; }; - struct ALIGNED(SCUDO_CACHE_LINE_SIZE) SizeClassInfo { + struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo { HybridMutex Mutex; SinglyLinkedList<TransferBatch> FreeList; + uptr CurrentRegion; + uptr CurrentRegionAllocated; SizeClassStats Stats; bool CanRelease; u32 RandState; @@ -261,14 +289,12 @@ private: if (!Region) Region = allocateRegionSlow(); if (LIKELY(Region)) { - if (ClassId) { - const uptr RegionIndex = computeRegionId(Region); - if (RegionIndex < MinRegionIndex) - MinRegionIndex = RegionIndex; - if (RegionIndex > MaxRegionIndex) - MaxRegionIndex = RegionIndex; - PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId)); - } + const uptr RegionIndex = computeRegionId(Region); + if (RegionIndex < MinRegionIndex) + MinRegionIndex = RegionIndex; + if (RegionIndex > MaxRegionIndex) + MaxRegionIndex = RegionIndex; + PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U)); } return Region; } @@ -303,21 +329,50 @@ private: NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci) { - const uptr Region = allocateRegion(ClassId); - if (UNLIKELY(!Region)) - return nullptr; - C->getStats().add(StatMapped, RegionSize); + uptr Region; + uptr Offset; + // If the size-class currently has a region associated to it, use it. The + // newly created blocks will be located after the currently allocated memory + // for that region (up to RegionSize). Otherwise, create a new region, where + // the new blocks will be carved from the beginning. + if (Sci->CurrentRegion) { + Region = Sci->CurrentRegion; + DCHECK_GT(Sci->CurrentRegionAllocated, 0U); + Offset = Sci->CurrentRegionAllocated; + } else { + DCHECK_EQ(Sci->CurrentRegionAllocated, 0U); + Region = allocateRegion(ClassId); + if (UNLIKELY(!Region)) + return nullptr; + C->getStats().add(StatMapped, RegionSize); + Sci->CurrentRegion = Region; + Offset = 0; + } + const uptr Size = getSizeByClassId(ClassId); const u32 MaxCount = TransferBatch::getMaxCached(Size); - DCHECK_GT(MaxCount, 0); - const uptr NumberOfBlocks = RegionSize / Size; - DCHECK_GT(NumberOfBlocks, 0); + DCHECK_GT(MaxCount, 0U); + // The maximum number of blocks we should carve in the region is dictated + // by the maximum number of batches we want to fill, and the amount of + // memory left in the current region (we use the lowest of the two). This + // will not be 0 as we ensure that a region can at least hold one block (via + // static_assert and at the end of this function). + const u32 NumberOfBlocks = + Min(MaxNumBatches * MaxCount, + static_cast<u32>((RegionSize - Offset) / Size)); + DCHECK_GT(NumberOfBlocks, 0U); + TransferBatch *B = nullptr; - constexpr u32 ShuffleArraySize = 8U * TransferBatch::MaxNumCached; + constexpr u32 ShuffleArraySize = + MaxNumBatches * TransferBatch::MaxNumCached; + // Fill the transfer batches and put them in the size-class freelist. We + // need to randomize the blocks for security purposes, so we first fill a + // local array that we then shuffle before populating the batches. void *ShuffleArray[ShuffleArraySize]; u32 Count = 0; const uptr AllocatedUser = Size * NumberOfBlocks; - for (uptr I = Region; I < Region + AllocatedUser; I += Size) { + for (uptr I = Region + Offset; I < Region + Offset + AllocatedUser; + I += Size) { ShuffleArray[Count++] = reinterpret_cast<void *>(I); if (Count == ShuffleArraySize) { if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount, @@ -340,9 +395,18 @@ private: DCHECK_GT(B->getCount(), 0); C->getStats().add(StatFree, AllocatedUser); + DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize); + // If there is not enough room in the region currently associated to fit + // more blocks, we deassociate the region by resetting CurrentRegion and + // CurrentRegionAllocated. Otherwise, update the allocated amount. + if (RegionSize - (Sci->CurrentRegionAllocated + AllocatedUser) < Size) { + Sci->CurrentRegion = 0; + Sci->CurrentRegionAllocated = 0; + } else { + Sci->CurrentRegionAllocated += AllocatedUser; + } Sci->AllocatedUser += AllocatedUser; - if (Sci->CanRelease) - Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime(); + return B; } @@ -353,10 +417,14 @@ private: const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks; const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId); Str->append(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu " - "inuse: %6zu avail: %6zu rss: %6zuK\n", + "inuse: %6zu avail: %6zu rss: %6zuK releases: %6zu\n", ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10, Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse, - AvailableChunks, Rss >> 10); + AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased); + } + + s32 getReleaseToOsIntervalMs() { + return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed); } NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId, @@ -370,18 +438,18 @@ private: (Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize; if (BytesInFreeList < PageSize) return 0; // No chance to release anything. - if ((Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) * - BlockSize < - PageSize) { + const uptr BytesPushed = + (Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) * + BlockSize; + if (BytesPushed < PageSize) return 0; // Nothing new to release. - } if (!Force) { - const s32 IntervalMs = ReleaseToOsIntervalMs; + const s32 IntervalMs = getReleaseToOsIntervalMs(); if (IntervalMs < 0) return 0; if (Sci->ReleaseInfo.LastReleaseAtNs + - static_cast<uptr>(IntervalMs) * 1000000ULL > + static_cast<u64>(IntervalMs) * 1000000 > getMonotonicTime()) { return 0; // Memory was returned recently. } @@ -391,11 +459,18 @@ private: // iterate multiple times over the same freelist if a ClassId spans multiple // regions. But it will have to do for now. uptr TotalReleasedBytes = 0; + const uptr MaxSize = (RegionSize / BlockSize) * BlockSize; for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) { - if (PossibleRegions[I] == ClassId) { - ReleaseRecorder Recorder(I * RegionSize); - releaseFreeMemoryToOS(Sci->FreeList, I * RegionSize, - RegionSize / PageSize, BlockSize, &Recorder); + if (PossibleRegions[I] - 1U == ClassId) { + const uptr Region = I * RegionSize; + // If the region is the one currently associated to the size-class, we + // only need to release up to CurrentRegionAllocated, MaxSize otherwise. + const uptr Size = (Region == Sci->CurrentRegion) + ? Sci->CurrentRegionAllocated + : MaxSize; + ReleaseRecorder Recorder(Region); + releaseFreeMemoryToOS(Sci->FreeList, Region, Size, BlockSize, + &Recorder); if (Recorder.getReleasedRangesCount() > 0) { Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks; Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount(); @@ -410,12 +485,13 @@ private: SizeClassInfo SizeClassInfoArray[NumClasses]; + // Track the regions in use, 0 is unused, otherwise store ClassId + 1. ByteMap PossibleRegions; // Keep track of the lowest & highest regions allocated to avoid looping // through the whole NumRegions. uptr MinRegionIndex; uptr MaxRegionIndex; - s32 ReleaseToOsIntervalMs; + atomic_s32 ReleaseToOsIntervalMs; // Unless several threads request regions simultaneously from different size // classes, the stash rarely contains more than 1 entry. static constexpr uptr MaxStashedRegions = 4; diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/primary64.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/primary64.h index ef02f0b772d..d4767882ba2 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/primary64.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/primary64.h @@ -13,6 +13,7 @@ #include "common.h" #include "list.h" #include "local_cache.h" +#include "memtag.h" #include "release.h" #include "stats.h" #include "string_utils.h" @@ -38,12 +39,21 @@ namespace scudo { // The memory used by this allocator is never unmapped, but can be partially // released if the platform allows for it. -template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator64 { +template <class SizeClassMapT, uptr RegionSizeLog, + s32 MinReleaseToOsIntervalMs = INT32_MIN, + s32 MaxReleaseToOsIntervalMs = INT32_MAX, + bool MaySupportMemoryTagging = false> +class SizeClassAllocator64 { public: typedef SizeClassMapT SizeClassMap; - typedef SizeClassAllocator64<SizeClassMap, RegionSizeLog> ThisT; + typedef SizeClassAllocator64< + SizeClassMap, RegionSizeLog, MinReleaseToOsIntervalMs, + MaxReleaseToOsIntervalMs, MaySupportMemoryTagging> + ThisT; typedef SizeClassAllocatorLocalCache<ThisT> CacheT; typedef typename CacheT::TransferBatch TransferBatch; + static const bool SupportsMemoryTagging = + MaySupportMemoryTagging && archSupportsMemoryTagging(); static uptr getSizeByClassId(uptr ClassId) { return (ClassId == SizeClassMap::BatchClassId) @@ -58,20 +68,17 @@ public: PrimaryBase = reinterpret_cast<uptr>( map(nullptr, PrimarySize, "scudo:primary", MAP_NOACCESS, &Data)); - RegionInfoArray = reinterpret_cast<RegionInfo *>( - map(nullptr, sizeof(RegionInfo) * NumClasses, "scudo:regioninfo")); - DCHECK_EQ(reinterpret_cast<uptr>(RegionInfoArray) % SCUDO_CACHE_LINE_SIZE, - 0); - u32 Seed; + const u64 Time = getMonotonicTime(); if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))) - Seed = static_cast<u32>(getMonotonicTime() ^ (PrimaryBase >> 12)); + Seed = static_cast<u32>(Time ^ (PrimaryBase >> 12)); const uptr PageSize = getPageSizeCached(); for (uptr I = 0; I < NumClasses; I++) { RegionInfo *Region = getRegionInfo(I); // The actual start of a region is offseted by a random number of pages. Region->RegionBeg = getRegionBaseByClassId(I) + (getRandomModN(&Seed, 16) + 1) * PageSize; + Region->RandState = getRandomU32(&Seed); // Releasing smaller size classes doesn't necessarily yield to a // meaningful RSS impact: there are more blocks per page, they are // randomized around, and thus pages are less likely to be entirely empty. @@ -79,12 +86,15 @@ public: // memory accesses which ends up being fairly costly. The current lower // limit is mostly arbitrary and based on empirical observations. // TODO(kostyak): make the lower limit a runtime option - Region->CanRelease = (ReleaseToOsInterval >= 0) && - (I != SizeClassMap::BatchClassId) && + Region->CanRelease = (I != SizeClassMap::BatchClassId) && (getSizeByClassId(I) >= (PageSize / 32)); - Region->RandState = getRandomU32(&Seed); + if (Region->CanRelease) + Region->ReleaseInfo.LastReleaseAtNs = Time; } - ReleaseToOsIntervalMs = ReleaseToOsInterval; + setReleaseToOsIntervalMs(ReleaseToOsInterval); + + if (SupportsMemoryTagging) + UseMemoryTagging = systemSupportsMemoryTagging(); } void init(s32 ReleaseToOsInterval) { memset(this, 0, sizeof(*this)); @@ -93,8 +103,6 @@ public: void unmapTestOnly() { unmap(reinterpret_cast<void *>(PrimaryBase), PrimarySize, UNMAP_ALL, &Data); - unmap(reinterpret_cast<void *>(RegionInfoArray), - sizeof(RegionInfo) * NumClasses); } TransferBatch *popBatch(CacheT *C, uptr ClassId) { @@ -143,7 +151,7 @@ public: } } - template <typename F> void iterateOverBlocks(F Callback) const { + template <typename F> void iterateOverBlocks(F Callback) { for (uptr I = 0; I < NumClasses; I++) { if (I == SizeClassMap::BatchClassId) continue; @@ -156,7 +164,7 @@ public: } } - void getStats(ScopedString *Str) const { + void getStats(ScopedString *Str) { // TODO(kostyak): get the RSS per region. uptr TotalMapped = 0; uptr PoppedBlocks = 0; @@ -177,11 +185,18 @@ public: getStats(Str, I, 0); } + void setReleaseToOsIntervalMs(s32 Interval) { + if (Interval >= MaxReleaseToOsIntervalMs) { + Interval = MaxReleaseToOsIntervalMs; + } else if (Interval <= MinReleaseToOsIntervalMs) { + Interval = MinReleaseToOsIntervalMs; + } + atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed); + } + uptr releaseToOS() { uptr TotalReleasedBytes = 0; for (uptr I = 0; I < NumClasses; I++) { - if (I == SizeClassMap::BatchClassId) - continue; RegionInfo *Region = getRegionInfo(I); ScopedLock L(Region->Mutex); TotalReleasedBytes += releaseToOSMaybe(Region, I, /*Force=*/true); @@ -189,15 +204,72 @@ public: return TotalReleasedBytes; } + bool useMemoryTagging() const { + return SupportsMemoryTagging && UseMemoryTagging; + } + void disableMemoryTagging() { UseMemoryTagging = false; } + + const char *getRegionInfoArrayAddress() const { + return reinterpret_cast<const char *>(RegionInfoArray); + } + + static uptr getRegionInfoArraySize() { + return sizeof(RegionInfoArray); + } + + static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) { + const RegionInfo *RegionInfoArray = + reinterpret_cast<const RegionInfo *>(RegionInfoData); + uptr ClassId; + uptr MinDistance = -1UL; + for (uptr I = 0; I != NumClasses; ++I) { + if (I == SizeClassMap::BatchClassId) + continue; + uptr Begin = RegionInfoArray[I].RegionBeg; + uptr End = Begin + RegionInfoArray[I].AllocatedUser; + if (Begin > End || End - Begin < SizeClassMap::getSizeByClassId(I)) + continue; + uptr RegionDistance; + if (Begin <= Ptr) { + if (Ptr < End) + RegionDistance = 0; + else + RegionDistance = Ptr - End; + } else { + RegionDistance = Begin - Ptr; + } + + if (RegionDistance < MinDistance) { + MinDistance = RegionDistance; + ClassId = I; + } + } + + BlockInfo B = {}; + if (MinDistance <= 8192) { + B.RegionBegin = RegionInfoArray[ClassId].RegionBeg; + B.RegionEnd = B.RegionBegin + RegionInfoArray[ClassId].AllocatedUser; + B.BlockSize = SizeClassMap::getSizeByClassId(ClassId); + B.BlockBegin = + B.RegionBegin + uptr(sptr(Ptr - B.RegionBegin) / sptr(B.BlockSize) * + sptr(B.BlockSize)); + while (B.BlockBegin < B.RegionBegin) + B.BlockBegin += B.BlockSize; + while (B.RegionEnd < B.BlockBegin + B.BlockSize) + B.BlockBegin -= B.BlockSize; + } + return B; + } + private: static const uptr RegionSize = 1UL << RegionSizeLog; static const uptr NumClasses = SizeClassMap::NumClasses; static const uptr PrimarySize = RegionSize * NumClasses; // Call map for user memory with at least this size. - static const uptr MapSizeIncrement = 1UL << 17; + static const uptr MapSizeIncrement = 1UL << 18; // Fill at most this number of batches from the newly map'd memory. - static const u32 MaxNumBatches = 8U; + static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U; struct RegionStats { uptr PoppedBlocks; @@ -211,7 +283,7 @@ private: u64 LastReleaseAtNs; }; - struct ALIGNED(SCUDO_CACHE_LINE_SIZE) RegionInfo { + struct UnpaddedRegionInfo { HybridMutex Mutex; SinglyLinkedList<TransferBatch> FreeList; RegionStats Stats; @@ -224,14 +296,19 @@ private: MapPlatformData Data; ReleaseToOsInfo ReleaseInfo; }; + struct RegionInfo : UnpaddedRegionInfo { + char Padding[SCUDO_CACHE_LINE_SIZE - + (sizeof(UnpaddedRegionInfo) % SCUDO_CACHE_LINE_SIZE)]; + }; static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, ""); uptr PrimaryBase; - RegionInfo *RegionInfoArray; MapPlatformData Data; - s32 ReleaseToOsIntervalMs; + atomic_s32 ReleaseToOsIntervalMs; + bool UseMemoryTagging; + alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses]; - RegionInfo *getRegionInfo(uptr ClassId) const { + RegionInfo *getRegionInfo(uptr ClassId) { DCHECK_LT(ClassId, NumClasses); return &RegionInfoArray[ClassId]; } @@ -294,7 +371,9 @@ private: Region->Data = Data; if (UNLIKELY(!map(reinterpret_cast<void *>(RegionBeg + MappedUser), UserMapSize, "scudo:primary", - MAP_ALLOWNOMEM | MAP_RESIZABLE, &Region->Data))) + MAP_ALLOWNOMEM | MAP_RESIZABLE | + (useMemoryTagging() ? MAP_MEMTAG : 0), + &Region->Data))) return nullptr; Region->MappedUser += UserMapSize; C->getStats().add(StatMapped, UserMapSize); @@ -337,13 +416,11 @@ private: C->getStats().add(StatFree, AllocatedUser); Region->AllocatedUser += AllocatedUser; Region->Exhausted = false; - if (Region->CanRelease) - Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime(); return B; } - void getStats(ScopedString *Str, uptr ClassId, uptr Rss) const { + void getStats(ScopedString *Str, uptr ClassId, uptr Rss) { RegionInfo *Region = getRegionInfo(ClassId); if (Region->MappedUser == 0) return; @@ -360,6 +437,10 @@ private: getRegionBaseByClassId(ClassId)); } + s32 getReleaseToOsIntervalMs() { + return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed); + } + NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId, bool Force = false) { const uptr BlockSize = getSizeByClassId(ClassId); @@ -371,19 +452,18 @@ private: (Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks) * BlockSize; if (BytesInFreeList < PageSize) return 0; // No chance to release anything. - if ((Region->Stats.PushedBlocks - - Region->ReleaseInfo.PushedBlocksAtLastRelease) * - BlockSize < - PageSize) { + const uptr BytesPushed = (Region->Stats.PushedBlocks - + Region->ReleaseInfo.PushedBlocksAtLastRelease) * + BlockSize; + if (BytesPushed < PageSize) return 0; // Nothing new to release. - } if (!Force) { - const s32 IntervalMs = ReleaseToOsIntervalMs; + const s32 IntervalMs = getReleaseToOsIntervalMs(); if (IntervalMs < 0) return 0; if (Region->ReleaseInfo.LastReleaseAtNs + - static_cast<uptr>(IntervalMs) * 1000000ULL > + static_cast<u64>(IntervalMs) * 1000000 > getMonotonicTime()) { return 0; // Memory was returned recently. } @@ -391,8 +471,7 @@ private: ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data); releaseFreeMemoryToOS(Region->FreeList, Region->RegionBeg, - roundUpTo(Region->AllocatedUser, PageSize) / PageSize, - BlockSize, &Recorder); + Region->AllocatedUser, BlockSize, &Recorder); if (Recorder.getReleasedRangesCount() > 0) { Region->ReleaseInfo.PushedBlocksAtLastRelease = diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/quarantine.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/quarantine.h index 406a0e23804..27aa4bfec91 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/quarantine.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/quarantine.h @@ -187,7 +187,12 @@ public: Cache.initLinkerInitialized(); } void init(uptr Size, uptr CacheSize) { - memset(this, 0, sizeof(*this)); + CacheMutex.init(); + Cache.init(); + RecycleMutex.init(); + MinSize = {}; + MaxSize = {}; + MaxCacheSize = {}; initLinkerInitialized(Size, CacheSize); } diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/release.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/release.cpp new file mode 100644 index 00000000000..e144b354b25 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/release.cpp @@ -0,0 +1,16 @@ +//===-- release.cpp ---------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "release.h" + +namespace scudo { + +HybridMutex PackedCounterArray::Mutex = {}; +uptr PackedCounterArray::StaticBuffer[1024]; + +} // namespace scudo diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/release.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/release.h index 4b5c56ce7c1..323bf9db6dc 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/release.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/release.h @@ -11,6 +11,7 @@ #include "common.h" #include "list.h" +#include "mutex.h" namespace scudo { @@ -39,11 +40,13 @@ private: }; // A packed array of Counters. Each counter occupies 2^N bits, enough to store -// counter's MaxValue. Ctor will try to allocate the required Buffer via map() -// and the caller is expected to check whether the initialization was successful -// by checking isAllocated() result. For the performance sake, none of the -// accessors check the validity of the arguments, It is assumed that Index is -// always in [0, N) range and the value is not incremented past MaxValue. +// counter's MaxValue. Ctor will try to use a static buffer first, and if that +// fails (the buffer is too small or already locked), will allocate the +// required Buffer via map(). The caller is expected to check whether the +// initialization was successful by checking isAllocated() result. For +// performance sake, none of the accessors check the validity of the arguments, +// It is assumed that Index is always in [0, N) range and the value is not +// incremented past MaxValue. class PackedCounterArray { public: PackedCounterArray(uptr NumCounters, uptr MaxValue) : N(NumCounters) { @@ -66,11 +69,20 @@ public: BufferSize = (roundUpTo(N, static_cast<uptr>(1U) << PackingRatioLog) >> PackingRatioLog) * sizeof(*Buffer); - Buffer = reinterpret_cast<uptr *>( - map(nullptr, BufferSize, "scudo:counters", MAP_ALLOWNOMEM)); + if (BufferSize <= StaticBufferSize && Mutex.tryLock()) { + Buffer = &StaticBuffer[0]; + memset(Buffer, 0, BufferSize); + } else { + Buffer = reinterpret_cast<uptr *>( + map(nullptr, BufferSize, "scudo:counters", MAP_ALLOWNOMEM)); + } } ~PackedCounterArray() { - if (isAllocated()) + if (!isAllocated()) + return; + if (Buffer == &StaticBuffer[0]) + Mutex.unlock(); + else unmap(reinterpret_cast<void *>(Buffer), BufferSize); } @@ -95,7 +107,8 @@ public: void incRange(uptr From, uptr To) const { DCHECK_LE(From, To); - for (uptr I = From; I <= To; I++) + const uptr Top = Min(To + 1, N); + for (uptr I = From; I < Top; I++) inc(I); } @@ -110,6 +123,10 @@ private: uptr BufferSize; uptr *Buffer; + + static HybridMutex Mutex; + static const uptr StaticBufferSize = 1024U; + static uptr StaticBuffer[StaticBufferSize]; }; template <class ReleaseRecorderT> class FreePagesRangeTracker { @@ -150,8 +167,7 @@ private: template <class TransferBatchT, class ReleaseRecorderT> NOINLINE void releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base, - uptr AllocatedPagesCount, uptr BlockSize, - ReleaseRecorderT *Recorder) { + uptr Size, uptr BlockSize, ReleaseRecorderT *Recorder) { const uptr PageSize = getPageSizeCached(); // Figure out the number of chunks per page and whether we can take a fast @@ -188,34 +204,51 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base, } } - PackedCounterArray Counters(AllocatedPagesCount, FullPagesBlockCountMax); + const uptr PagesCount = roundUpTo(Size, PageSize) / PageSize; + PackedCounterArray Counters(PagesCount, FullPagesBlockCountMax); if (!Counters.isAllocated()) return; const uptr PageSizeLog = getLog2(PageSize); - const uptr End = Base + AllocatedPagesCount * PageSize; + const uptr RoundedSize = PagesCount << PageSizeLog; // Iterate over free chunks and count how many free chunks affect each // allocated page. if (BlockSize <= PageSize && PageSize % BlockSize == 0) { // Each chunk affects one page only. for (const auto &It : FreeList) { - for (u32 I = 0; I < It.getCount(); I++) { - const uptr P = reinterpret_cast<uptr>(It.get(I)); - if (P >= Base && P < End) - Counters.inc((P - Base) >> PageSizeLog); + // If dealing with a TransferBatch, the first pointer of the batch will + // point to the batch itself, we do not want to mark this for release as + // the batch is in use, so skip the first entry. + const bool IsTransferBatch = + (It.getCount() != 0) && + (reinterpret_cast<uptr>(It.get(0)) == reinterpret_cast<uptr>(&It)); + for (u32 I = IsTransferBatch ? 1 : 0; I < It.getCount(); I++) { + const uptr P = reinterpret_cast<uptr>(It.get(I)) - Base; + // This takes care of P < Base and P >= Base + RoundedSize. + if (P < RoundedSize) + Counters.inc(P >> PageSizeLog); } } + for (uptr P = Size; P < RoundedSize; P += BlockSize) + Counters.inc(P >> PageSizeLog); } else { // In all other cases chunks might affect more than one page. for (const auto &It : FreeList) { - for (u32 I = 0; I < It.getCount(); I++) { - const uptr P = reinterpret_cast<uptr>(It.get(I)); - if (P >= Base && P < End) - Counters.incRange((P - Base) >> PageSizeLog, - (P - Base + BlockSize - 1) >> PageSizeLog); + // See TransferBatch comment above. + const bool IsTransferBatch = + (It.getCount() != 0) && + (reinterpret_cast<uptr>(It.get(0)) == reinterpret_cast<uptr>(&It)); + for (u32 I = IsTransferBatch ? 1 : 0; I < It.getCount(); I++) { + const uptr P = reinterpret_cast<uptr>(It.get(I)) - Base; + // This takes care of P < Base and P >= Base + RoundedSize. + if (P < RoundedSize) + Counters.incRange(P >> PageSizeLog, + (P + BlockSize - 1) >> PageSizeLog); } } + for (uptr P = Size; P < RoundedSize; P += BlockSize) + Counters.incRange(P >> PageSizeLog, (P + BlockSize - 1) >> PageSizeLog); } // Iterate over pages detecting ranges of pages with chunk Counters equal diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/secondary.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/secondary.h index ab68e5a1d38..84eaa5091b4 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/secondary.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/secondary.h @@ -48,24 +48,195 @@ static Header *getHeader(const void *Ptr) { } // namespace LargeBlock -template <uptr MaxFreeListSize = 32U> class MapAllocator { +class MapAllocatorNoCache { public: - // Ensure the freelist is disabled on Fuchsia, since it doesn't support - // releasing Secondary blocks yet. - static_assert(!SCUDO_FUCHSIA || MaxFreeListSize == 0U, ""); + void initLinkerInitialized(UNUSED s32 ReleaseToOsInterval) {} + void init(UNUSED s32 ReleaseToOsInterval) {} + bool retrieve(UNUSED uptr Size, UNUSED LargeBlock::Header **H) { + return false; + } + bool store(UNUSED LargeBlock::Header *H) { return false; } + static bool canCache(UNUSED uptr Size) { return false; } + void disable() {} + void enable() {} + void releaseToOS() {} + void setReleaseToOsIntervalMs(UNUSED s32 Interval) {} +}; + +template <uptr MaxEntriesCount = 32U, uptr MaxEntrySize = 1UL << 19, + s32 MinReleaseToOsIntervalMs = INT32_MIN, + s32 MaxReleaseToOsIntervalMs = INT32_MAX> +class MapAllocatorCache { +public: + // Fuchsia doesn't allow releasing Secondary blocks yet. Note that 0 length + // arrays are an extension for some compilers. + // FIXME(kostyak): support (partially) the cache on Fuchsia. + static_assert(!SCUDO_FUCHSIA || MaxEntriesCount == 0U, ""); + + void initLinkerInitialized(s32 ReleaseToOsInterval) { + setReleaseToOsIntervalMs(ReleaseToOsInterval); + } + void init(s32 ReleaseToOsInterval) { + memset(this, 0, sizeof(*this)); + initLinkerInitialized(ReleaseToOsInterval); + } + + bool store(LargeBlock::Header *H) { + bool EntryCached = false; + bool EmptyCache = false; + const u64 Time = getMonotonicTime(); + { + ScopedLock L(Mutex); + if (EntriesCount == MaxEntriesCount) { + if (IsFullEvents++ == 4U) + EmptyCache = true; + } else { + for (uptr I = 0; I < MaxEntriesCount; I++) { + if (Entries[I].Block) + continue; + if (I != 0) + Entries[I] = Entries[0]; + Entries[0].Block = reinterpret_cast<uptr>(H); + Entries[0].BlockEnd = H->BlockEnd; + Entries[0].MapBase = H->MapBase; + Entries[0].MapSize = H->MapSize; + Entries[0].Data = H->Data; + Entries[0].Time = Time; + EntriesCount++; + EntryCached = true; + break; + } + } + } + s32 Interval; + if (EmptyCache) + empty(); + else if ((Interval = getReleaseToOsIntervalMs()) >= 0) + releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000); + return EntryCached; + } + + bool retrieve(uptr Size, LargeBlock::Header **H) { + const uptr PageSize = getPageSizeCached(); + ScopedLock L(Mutex); + if (EntriesCount == 0) + return false; + for (uptr I = 0; I < MaxEntriesCount; I++) { + if (!Entries[I].Block) + continue; + const uptr BlockSize = Entries[I].BlockEnd - Entries[I].Block; + if (Size > BlockSize) + continue; + if (Size < BlockSize - PageSize * 4U) + continue; + *H = reinterpret_cast<LargeBlock::Header *>(Entries[I].Block); + Entries[I].Block = 0; + (*H)->BlockEnd = Entries[I].BlockEnd; + (*H)->MapBase = Entries[I].MapBase; + (*H)->MapSize = Entries[I].MapSize; + (*H)->Data = Entries[I].Data; + EntriesCount--; + return true; + } + return false; + } + + static bool canCache(uptr Size) { + return MaxEntriesCount != 0U && Size <= MaxEntrySize; + } + + void setReleaseToOsIntervalMs(s32 Interval) { + if (Interval >= MaxReleaseToOsIntervalMs) { + Interval = MaxReleaseToOsIntervalMs; + } else if (Interval <= MinReleaseToOsIntervalMs) { + Interval = MinReleaseToOsIntervalMs; + } + atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed); + } + + void releaseToOS() { releaseOlderThan(UINT64_MAX); } + + void disable() { Mutex.lock(); } + + void enable() { Mutex.unlock(); } + +private: + void empty() { + struct { + void *MapBase; + uptr MapSize; + MapPlatformData Data; + } MapInfo[MaxEntriesCount]; + uptr N = 0; + { + ScopedLock L(Mutex); + for (uptr I = 0; I < MaxEntriesCount; I++) { + if (!Entries[I].Block) + continue; + MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase); + MapInfo[N].MapSize = Entries[I].MapSize; + MapInfo[N].Data = Entries[I].Data; + Entries[I].Block = 0; + N++; + } + EntriesCount = 0; + IsFullEvents = 0; + } + for (uptr I = 0; I < N; I++) + unmap(MapInfo[I].MapBase, MapInfo[I].MapSize, UNMAP_ALL, + &MapInfo[I].Data); + } + + void releaseOlderThan(u64 Time) { + ScopedLock L(Mutex); + if (!EntriesCount) + return; + for (uptr I = 0; I < MaxEntriesCount; I++) { + if (!Entries[I].Block || !Entries[I].Time || Entries[I].Time > Time) + continue; + releasePagesToOS(Entries[I].Block, 0, + Entries[I].BlockEnd - Entries[I].Block, + &Entries[I].Data); + Entries[I].Time = 0; + } + } + + s32 getReleaseToOsIntervalMs() { + return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed); + } - void initLinkerInitialized(GlobalStats *S) { + struct CachedBlock { + uptr Block; + uptr BlockEnd; + uptr MapBase; + uptr MapSize; + MapPlatformData Data; + u64 Time; + }; + + HybridMutex Mutex; + CachedBlock Entries[MaxEntriesCount]; + u32 EntriesCount; + uptr LargestSize; + u32 IsFullEvents; + atomic_s32 ReleaseToOsIntervalMs; +}; + +template <class CacheT> class MapAllocator { +public: + void initLinkerInitialized(GlobalStats *S, s32 ReleaseToOsInterval = -1) { + Cache.initLinkerInitialized(ReleaseToOsInterval); Stats.initLinkerInitialized(); if (LIKELY(S)) S->link(&Stats); } - void init(GlobalStats *S) { + void init(GlobalStats *S, s32 ReleaseToOsInterval = -1) { memset(this, 0, sizeof(*this)); - initLinkerInitialized(S); + initLinkerInitialized(S, ReleaseToOsInterval); } void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr, - bool ZeroContents = false); + FillContentsMode FillContents = NoFill); void deallocate(void *Ptr); @@ -79,22 +250,34 @@ public: void getStats(ScopedString *Str) const; - void disable() { Mutex.lock(); } + void disable() { + Mutex.lock(); + Cache.disable(); + } - void enable() { Mutex.unlock(); } + void enable() { + Cache.enable(); + Mutex.unlock(); + } template <typename F> void iterateOverBlocks(F Callback) const { for (const auto &H : InUseBlocks) Callback(reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize()); } - static uptr getMaxFreeListSize(void) { return MaxFreeListSize; } + static uptr canCache(uptr Size) { return CacheT::canCache(Size); } + + void setReleaseToOsIntervalMs(s32 Interval) { + Cache.setReleaseToOsIntervalMs(Interval); + } + + void releaseToOS() { Cache.releaseToOS(); } private: + CacheT Cache; + HybridMutex Mutex; DoublyLinkedList<LargeBlock::Header> InUseBlocks; - // The free list is sorted based on the committed size of blocks. - DoublyLinkedList<LargeBlock::Header> FreeBlocks; uptr AllocatedBytes; uptr FreedBytes; uptr LargestSize; @@ -114,35 +297,34 @@ private: // For allocations requested with an alignment greater than or equal to a page, // the committed memory will amount to something close to Size - AlignmentHint // (pending rounding and headers). -template <uptr MaxFreeListSize> -void *MapAllocator<MaxFreeListSize>::allocate(uptr Size, uptr AlignmentHint, - uptr *BlockEnd, - bool ZeroContents) { +template <class CacheT> +void *MapAllocator<CacheT>::allocate(uptr Size, uptr AlignmentHint, + uptr *BlockEnd, + FillContentsMode FillContents) { DCHECK_GE(Size, AlignmentHint); const uptr PageSize = getPageSizeCached(); const uptr RoundedSize = roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize); - if (MaxFreeListSize && AlignmentHint < PageSize) { - ScopedLock L(Mutex); - for (auto &H : FreeBlocks) { - const uptr FreeBlockSize = H.BlockEnd - reinterpret_cast<uptr>(&H); - if (FreeBlockSize < RoundedSize) - continue; - // Candidate free block should only be at most 4 pages larger. - if (FreeBlockSize > RoundedSize + 4 * PageSize) - break; - FreeBlocks.remove(&H); - InUseBlocks.push_back(&H); - AllocatedBytes += FreeBlockSize; - NumberOfAllocs++; - Stats.add(StatAllocated, FreeBlockSize); + if (AlignmentHint < PageSize && CacheT::canCache(RoundedSize)) { + LargeBlock::Header *H; + if (Cache.retrieve(RoundedSize, &H)) { if (BlockEnd) - *BlockEnd = H.BlockEnd; - void *Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(&H) + + *BlockEnd = H->BlockEnd; + void *Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(H) + LargeBlock::getHeaderSize()); - if (ZeroContents) - memset(Ptr, 0, H.BlockEnd - reinterpret_cast<uptr>(Ptr)); + if (FillContents) + memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte, + H->BlockEnd - reinterpret_cast<uptr>(Ptr)); + const uptr BlockSize = H->BlockEnd - reinterpret_cast<uptr>(H); + { + ScopedLock L(Mutex); + InUseBlocks.push_back(H); + AllocatedBytes += BlockSize; + NumberOfAllocs++; + Stats.add(StatAllocated, BlockSize); + Stats.add(StatMapped, H->MapSize); + } return Ptr; } } @@ -191,6 +373,8 @@ void *MapAllocator<MaxFreeListSize>::allocate(uptr Size, uptr AlignmentHint, H->MapSize = MapEnd - MapBase; H->BlockEnd = CommitBase + CommitSize; H->Data = Data; + if (BlockEnd) + *BlockEnd = CommitBase + CommitSize; { ScopedLock L(Mutex); InUseBlocks.push_back(H); @@ -201,52 +385,31 @@ void *MapAllocator<MaxFreeListSize>::allocate(uptr Size, uptr AlignmentHint, Stats.add(StatAllocated, CommitSize); Stats.add(StatMapped, H->MapSize); } - if (BlockEnd) - *BlockEnd = CommitBase + CommitSize; return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize()); } -template <uptr MaxFreeListSize> -void MapAllocator<MaxFreeListSize>::deallocate(void *Ptr) { +template <class CacheT> void MapAllocator<CacheT>::deallocate(void *Ptr) { LargeBlock::Header *H = LargeBlock::getHeader(Ptr); const uptr Block = reinterpret_cast<uptr>(H); + const uptr CommitSize = H->BlockEnd - Block; { ScopedLock L(Mutex); InUseBlocks.remove(H); - const uptr CommitSize = H->BlockEnd - Block; FreedBytes += CommitSize; NumberOfFrees++; Stats.sub(StatAllocated, CommitSize); - if (MaxFreeListSize && FreeBlocks.size() < MaxFreeListSize) { - bool Inserted = false; - for (auto &F : FreeBlocks) { - const uptr FreeBlockSize = F.BlockEnd - reinterpret_cast<uptr>(&F); - if (FreeBlockSize >= CommitSize) { - FreeBlocks.insert(H, &F); - Inserted = true; - break; - } - } - if (!Inserted) - FreeBlocks.push_back(H); - const uptr RoundedAllocationStart = - roundUpTo(Block + LargeBlock::getHeaderSize(), getPageSizeCached()); - MapPlatformData Data = H->Data; - // TODO(kostyak): use release_to_os_interval_ms - releasePagesToOS(Block, RoundedAllocationStart - Block, - H->BlockEnd - RoundedAllocationStart, &Data); - return; - } Stats.sub(StatMapped, H->MapSize); } + if (CacheT::canCache(CommitSize) && Cache.store(H)) + return; void *Addr = reinterpret_cast<void *>(H->MapBase); const uptr Size = H->MapSize; MapPlatformData Data = H->Data; unmap(Addr, Size, UNMAP_ALL, &Data); } -template <uptr MaxFreeListSize> -void MapAllocator<MaxFreeListSize>::getStats(ScopedString *Str) const { +template <class CacheT> +void MapAllocator<CacheT>::getStats(ScopedString *Str) const { Str->append( "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times " "(%zuK), remains %zu (%zuK) max %zuM\n", diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/size_class_map.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/size_class_map.h index 947526e8aea..5ed8e2845b3 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/size_class_map.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/size_class_map.h @@ -9,11 +9,32 @@ #ifndef SCUDO_SIZE_CLASS_MAP_H_ #define SCUDO_SIZE_CLASS_MAP_H_ +#include "chunk.h" #include "common.h" #include "string_utils.h" namespace scudo { +inline uptr scaledLog2(uptr Size, uptr ZeroLog, uptr LogBits) { + const uptr L = getMostSignificantSetBitIndex(Size); + const uptr LBits = (Size >> (L - LogBits)) - (1 << LogBits); + const uptr HBits = (L - ZeroLog) << LogBits; + return LBits + HBits; +} + +template <typename Config> struct SizeClassMapBase { + static u32 getMaxCachedHint(uptr Size) { + DCHECK_NE(Size, 0); + u32 N; + // Force a 32-bit division if the template parameters allow for it. + if (Config::MaxBytesCachedLog > 31 || Config::MaxSizeLog > 31) + N = static_cast<u32>((1UL << Config::MaxBytesCachedLog) / Size); + else + N = (1U << Config::MaxBytesCachedLog) / static_cast<u32>(Size); + return Max(1U, Min(Config::MaxNumCachedHint, N)); + } +}; + // SizeClassMap maps allocation sizes into size classes and back, in an // efficient table-free manner. // @@ -33,22 +54,24 @@ namespace scudo { // of chunks that can be cached per-thread: // - MaxNumCachedHint is a hint for the max number of chunks cached per class. // - 2^MaxBytesCachedLog is the max number of bytes cached per class. +template <typename Config> +class FixedSizeClassMap : public SizeClassMapBase<Config> { + typedef SizeClassMapBase<Config> Base; -template <u8 NumBits, u8 MinSizeLog, u8 MidSizeLog, u8 MaxSizeLog, - u32 MaxNumCachedHintT, u8 MaxBytesCachedLog> -class SizeClassMap { - static const uptr MinSize = 1UL << MinSizeLog; - static const uptr MidSize = 1UL << MidSizeLog; + static const uptr MinSize = 1UL << Config::MinSizeLog; + static const uptr MidSize = 1UL << Config::MidSizeLog; static const uptr MidClass = MidSize / MinSize; - static const u8 S = NumBits - 1; + static const u8 S = Config::NumBits - 1; static const uptr M = (1UL << S) - 1; + static const uptr SizeDelta = Chunk::getHeaderSize(); + public: - static const u32 MaxNumCachedHint = MaxNumCachedHintT; + static const u32 MaxNumCachedHint = Config::MaxNumCachedHint; - static const uptr MaxSize = 1UL << MaxSizeLog; + static const uptr MaxSize = (1UL << Config::MaxSizeLog) + SizeDelta; static const uptr NumClasses = - MidClass + ((MaxSizeLog - MidSizeLog) << S) + 1; + MidClass + ((Config::MaxSizeLog - Config::MidSizeLog) << S) + 1; static_assert(NumClasses <= 256, ""); static const uptr LargestClassId = NumClasses - 1; static const uptr BatchClassId = 0; @@ -56,97 +79,213 @@ public: static uptr getSizeByClassId(uptr ClassId) { DCHECK_NE(ClassId, BatchClassId); if (ClassId <= MidClass) - return ClassId << MinSizeLog; + return (ClassId << Config::MinSizeLog) + SizeDelta; ClassId -= MidClass; const uptr T = MidSize << (ClassId >> S); - return T + (T >> S) * (ClassId & M); + return T + (T >> S) * (ClassId & M) + SizeDelta; } static uptr getClassIdBySize(uptr Size) { + if (Size <= SizeDelta + (1 << Config::MinSizeLog)) + return 1; + Size -= SizeDelta; DCHECK_LE(Size, MaxSize); if (Size <= MidSize) - return (Size + MinSize - 1) >> MinSizeLog; - const uptr L = getMostSignificantSetBitIndex(Size); - const uptr HBits = (Size >> (L - S)) & M; - const uptr LBits = Size & ((1UL << (L - S)) - 1); - const uptr L1 = L - MidSizeLog; - return MidClass + (L1 << S) + HBits + (LBits > 0); + return (Size + MinSize - 1) >> Config::MinSizeLog; + return MidClass + 1 + scaledLog2(Size - 1, Config::MidSizeLog, S); } static u32 getMaxCachedHint(uptr Size) { DCHECK_LE(Size, MaxSize); - DCHECK_NE(Size, 0); - u32 N; - // Force a 32-bit division if the template parameters allow for it. - if (MaxBytesCachedLog > 31 || MaxSizeLog > 31) - N = static_cast<u32>((1UL << MaxBytesCachedLog) / Size); - else - N = (1U << MaxBytesCachedLog) / static_cast<u32>(Size); - return Max(1U, Min(MaxNumCachedHint, N)); + return Base::getMaxCachedHint(Size); } +}; + +template <typename Config> +class TableSizeClassMap : public SizeClassMapBase<Config> { + typedef SizeClassMapBase<Config> Base; + + static const u8 S = Config::NumBits - 1; + static const uptr M = (1UL << S) - 1; + static const uptr ClassesSize = + sizeof(Config::Classes) / sizeof(Config::Classes[0]); - static void print() { - ScopedString Buffer(1024); - uptr PrevS = 0; - uptr TotalCached = 0; - for (uptr I = 0; I < NumClasses; I++) { - if (I == BatchClassId) - continue; - const uptr S = getSizeByClassId(I); - if (S >= MidSize / 2 && (S & (S - 1)) == 0) - Buffer.append("\n"); - const uptr D = S - PrevS; - const uptr P = PrevS ? (D * 100 / PrevS) : 0; - const uptr L = S ? getMostSignificantSetBitIndex(S) : 0; - const uptr Cached = getMaxCachedHint(S) * S; - Buffer.append( - "C%02zu => S: %zu diff: +%zu %02zu%% L %zu Cached: %zu %zu; id %zu\n", - I, getSizeByClassId(I), D, P, L, getMaxCachedHint(S), Cached, - getClassIdBySize(S)); - TotalCached += Cached; - PrevS = S; + struct SizeTable { + constexpr SizeTable() { + uptr Pos = 1 << Config::MidSizeLog; + uptr Inc = 1 << (Config::MidSizeLog - S); + for (uptr i = 0; i != getTableSize(); ++i) { + Pos += Inc; + if ((Pos & (Pos - 1)) == 0) + Inc *= 2; + Tab[i] = computeClassId(Pos + Config::SizeDelta); + } } - Buffer.append("Total Cached: %zu\n", TotalCached); - Buffer.output(); - } - static void validate() { - for (uptr C = 0; C < NumClasses; C++) { - if (C == BatchClassId) - continue; - const uptr S = getSizeByClassId(C); - CHECK_NE(S, 0U); - CHECK_EQ(getClassIdBySize(S), C); - if (C < LargestClassId) - CHECK_EQ(getClassIdBySize(S + 1), C + 1); - CHECK_EQ(getClassIdBySize(S - 1), C); - if (C - 1 != BatchClassId) - CHECK_GT(getSizeByClassId(C), getSizeByClassId(C - 1)); + constexpr static u8 computeClassId(uptr Size) { + for (uptr i = 0; i != ClassesSize; ++i) { + if (Size <= Config::Classes[i]) + return static_cast<u8>(i + 1); + } + return static_cast<u8>(-1); } - // Do not perform the loop if the maximum size is too large. - if (MaxSizeLog > 19) - return; - for (uptr S = 1; S <= MaxSize; S++) { - const uptr C = getClassIdBySize(S); - CHECK_LT(C, NumClasses); - CHECK_GE(getSizeByClassId(C), S); - if (C - 1 != BatchClassId) - CHECK_LT(getSizeByClassId(C - 1), S); + + constexpr static uptr getTableSize() { + return (Config::MaxSizeLog - Config::MidSizeLog) << S; } + + u8 Tab[getTableSize()] = {}; + }; + + static constexpr SizeTable Table = {}; + +public: + static const u32 MaxNumCachedHint = Config::MaxNumCachedHint; + + static const uptr NumClasses = ClassesSize + 1; + static_assert(NumClasses < 256, ""); + static const uptr LargestClassId = NumClasses - 1; + static const uptr BatchClassId = 0; + static const uptr MaxSize = Config::Classes[LargestClassId - 1]; + + static uptr getSizeByClassId(uptr ClassId) { + return Config::Classes[ClassId - 1]; } + + static uptr getClassIdBySize(uptr Size) { + if (Size <= Config::Classes[0]) + return 1; + Size -= Config::SizeDelta; + DCHECK_LE(Size, MaxSize); + if (Size <= (1 << Config::MidSizeLog)) + return ((Size - 1) >> Config::MinSizeLog) + 1; + return Table.Tab[scaledLog2(Size - 1, Config::MidSizeLog, S)]; + } + + static u32 getMaxCachedHint(uptr Size) { + DCHECK_LE(Size, MaxSize); + return Base::getMaxCachedHint(Size); + } +}; + +struct AndroidSizeClassConfig { +#if SCUDO_WORDSIZE == 64U + static const uptr NumBits = 7; + static const uptr MinSizeLog = 4; + static const uptr MidSizeLog = 6; + static const uptr MaxSizeLog = 16; + static const u32 MaxNumCachedHint = 14; + static const uptr MaxBytesCachedLog = 13; + + static constexpr u32 Classes[] = { + 0x00020, 0x00030, 0x00040, 0x00050, 0x00060, 0x00070, 0x00090, 0x000b0, + 0x000c0, 0x000e0, 0x00120, 0x00160, 0x001c0, 0x00250, 0x00320, 0x00450, + 0x00670, 0x00830, 0x00a10, 0x00c30, 0x01010, 0x01210, 0x01bd0, 0x02210, + 0x02d90, 0x03790, 0x04010, 0x04810, 0x05a10, 0x07310, 0x08210, 0x10010, + }; + static const uptr SizeDelta = 16; +#else + static const uptr NumBits = 8; + static const uptr MinSizeLog = 4; + static const uptr MidSizeLog = 7; + static const uptr MaxSizeLog = 16; + static const u32 MaxNumCachedHint = 14; + static const uptr MaxBytesCachedLog = 13; + + static constexpr u32 Classes[] = { + 0x00020, 0x00030, 0x00040, 0x00050, 0x00060, 0x00070, 0x00080, 0x00090, + 0x000a0, 0x000b0, 0x000c0, 0x000e0, 0x000f0, 0x00110, 0x00120, 0x00130, + 0x00150, 0x00160, 0x00170, 0x00190, 0x001d0, 0x00210, 0x00240, 0x002a0, + 0x00330, 0x00370, 0x003a0, 0x00400, 0x00430, 0x004a0, 0x00530, 0x00610, + 0x00730, 0x00840, 0x00910, 0x009c0, 0x00a60, 0x00b10, 0x00ca0, 0x00e00, + 0x00fb0, 0x01030, 0x01130, 0x011f0, 0x01490, 0x01650, 0x01930, 0x02010, + 0x02190, 0x02490, 0x02850, 0x02d50, 0x03010, 0x03210, 0x03c90, 0x04090, + 0x04510, 0x04810, 0x05c10, 0x06f10, 0x07310, 0x08010, 0x0c010, 0x10010, + }; + static const uptr SizeDelta = 16; +#endif +}; + +typedef TableSizeClassMap<AndroidSizeClassConfig> AndroidSizeClassMap; + +struct DefaultSizeClassConfig { + static const uptr NumBits = 3; + static const uptr MinSizeLog = 5; + static const uptr MidSizeLog = 8; + static const uptr MaxSizeLog = 17; + static const u32 MaxNumCachedHint = 8; + static const uptr MaxBytesCachedLog = 10; }; -typedef SizeClassMap<3, 5, 8, 17, 8, 10> DefaultSizeClassMap; +typedef FixedSizeClassMap<DefaultSizeClassConfig> DefaultSizeClassMap; -// TODO(kostyak): further tune class maps for Android & Fuchsia. +struct SvelteSizeClassConfig { #if SCUDO_WORDSIZE == 64U -typedef SizeClassMap<4, 4, 8, 14, 4, 10> SvelteSizeClassMap; -typedef SizeClassMap<3, 5, 8, 17, 14, 14> AndroidSizeClassMap; + static const uptr NumBits = 4; + static const uptr MinSizeLog = 4; + static const uptr MidSizeLog = 8; + static const uptr MaxSizeLog = 14; + static const u32 MaxNumCachedHint = 4; + static const uptr MaxBytesCachedLog = 10; #else -typedef SizeClassMap<4, 3, 7, 14, 5, 10> SvelteSizeClassMap; -typedef SizeClassMap<3, 5, 8, 17, 14, 14> AndroidSizeClassMap; + static const uptr NumBits = 4; + static const uptr MinSizeLog = 3; + static const uptr MidSizeLog = 7; + static const uptr MaxSizeLog = 14; + static const u32 MaxNumCachedHint = 5; + static const uptr MaxBytesCachedLog = 10; #endif +}; + +typedef FixedSizeClassMap<SvelteSizeClassConfig> SvelteSizeClassMap; + +template <typename SCMap> inline void printMap() { + ScopedString Buffer(1024); + uptr PrevS = 0; + uptr TotalCached = 0; + for (uptr I = 0; I < SCMap::NumClasses; I++) { + if (I == SCMap::BatchClassId) + continue; + const uptr S = SCMap::getSizeByClassId(I); + const uptr D = S - PrevS; + const uptr P = PrevS ? (D * 100 / PrevS) : 0; + const uptr L = S ? getMostSignificantSetBitIndex(S) : 0; + const uptr Cached = SCMap::getMaxCachedHint(S) * S; + Buffer.append( + "C%02zu => S: %zu diff: +%zu %02zu%% L %zu Cached: %zu %zu; id %zu\n", + I, S, D, P, L, SCMap::getMaxCachedHint(S), Cached, + SCMap::getClassIdBySize(S)); + TotalCached += Cached; + PrevS = S; + } + Buffer.append("Total Cached: %zu\n", TotalCached); + Buffer.output(); +} +template <typename SCMap> static void validateMap() { + for (uptr C = 0; C < SCMap::NumClasses; C++) { + if (C == SCMap::BatchClassId) + continue; + const uptr S = SCMap::getSizeByClassId(C); + CHECK_NE(S, 0U); + CHECK_EQ(SCMap::getClassIdBySize(S), C); + if (C < SCMap::LargestClassId) + CHECK_EQ(SCMap::getClassIdBySize(S + 1), C + 1); + CHECK_EQ(SCMap::getClassIdBySize(S - 1), C); + if (C - 1 != SCMap::BatchClassId) + CHECK_GT(SCMap::getSizeByClassId(C), SCMap::getSizeByClassId(C - 1)); + } + // Do not perform the loop if the maximum size is too large. + if (SCMap::MaxSize > (1 << 19)) + return; + for (uptr S = 1; S <= SCMap::MaxSize; S++) { + const uptr C = SCMap::getClassIdBySize(S); + CHECK_LT(C, SCMap::NumClasses); + CHECK_GE(SCMap::getSizeByClassId(C), S); + if (C - 1 != SCMap::BatchClassId) + CHECK_LT(SCMap::getSizeByClassId(C - 1), S); + } +} } // namespace scudo #endif // SCUDO_SIZE_CLASS_MAP_H_ diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/stack_depot.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/stack_depot.h new file mode 100644 index 00000000000..f2f4d959779 --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/stack_depot.h @@ -0,0 +1,144 @@ +//===-- stack_depot.h -------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_STACK_DEPOT_H_ +#define SCUDO_STACK_DEPOT_H_ + +#include "atomic_helpers.h" +#include "mutex.h" + +namespace scudo { + +class MurMur2HashBuilder { + static const u32 M = 0x5bd1e995; + static const u32 Seed = 0x9747b28c; + static const u32 R = 24; + u32 H; + + public: + explicit MurMur2HashBuilder(u32 Init = 0) { H = Seed ^ Init; } + void add(u32 K) { + K *= M; + K ^= K >> R; + K *= M; + H *= M; + H ^= K; + } + u32 get() { + u32 X = H; + X ^= X >> 13; + X *= M; + X ^= X >> 15; + return X; + } +}; + +class StackDepot { + HybridMutex RingEndMu; + u32 RingEnd; + + // This data structure stores a stack trace for each allocation and + // deallocation when stack trace recording is enabled, that may be looked up + // using a hash of the stack trace. The lower bits of the hash are an index + // into the Tab array, which stores an index into the Ring array where the + // stack traces are stored. As the name implies, Ring is a ring buffer, so a + // stack trace may wrap around to the start of the array. + // + // Each stack trace in Ring is prefixed by a stack trace marker consisting of + // a fixed 1 bit in bit 0 (this allows disambiguation between stack frames + // and stack trace markers in the case where instruction pointers are 4-byte + // aligned, as they are on arm64), the stack trace hash in bits 1-32, and the + // size of the stack trace in bits 33-63. + // + // The insert() function is potentially racy in its accesses to the Tab and + // Ring arrays, but find() is resilient to races in the sense that, barring + // hash collisions, it will either return the correct stack trace or no stack + // trace at all, even if two instances of insert() raced with one another. + // This is achieved by re-checking the hash of the stack trace before + // returning the trace. + +#ifdef SCUDO_FUZZ + // Use smaller table sizes for fuzzing in order to reduce input size. + static const uptr TabBits = 4; +#else + static const uptr TabBits = 16; +#endif + static const uptr TabSize = 1 << TabBits; + static const uptr TabMask = TabSize - 1; + atomic_u32 Tab[TabSize]; + +#ifdef SCUDO_FUZZ + static const uptr RingBits = 4; +#else + static const uptr RingBits = 19; +#endif + static const uptr RingSize = 1 << RingBits; + static const uptr RingMask = RingSize - 1; + atomic_u64 Ring[RingSize]; + +public: + // Insert hash of the stack trace [Begin, End) into the stack depot, and + // return the hash. + u32 insert(uptr *Begin, uptr *End) { + MurMur2HashBuilder B; + for (uptr *I = Begin; I != End; ++I) + B.add(u32(*I) >> 2); + u32 Hash = B.get(); + + u32 Pos = Hash & TabMask; + u32 RingPos = atomic_load_relaxed(&Tab[Pos]); + u64 Entry = atomic_load_relaxed(&Ring[RingPos]); + u64 Id = (u64(End - Begin) << 33) | (u64(Hash) << 1) | 1; + if (Entry == Id) + return Hash; + + ScopedLock Lock(RingEndMu); + RingPos = RingEnd; + atomic_store_relaxed(&Tab[Pos], RingPos); + atomic_store_relaxed(&Ring[RingPos], Id); + for (uptr *I = Begin; I != End; ++I) { + RingPos = (RingPos + 1) & RingMask; + atomic_store_relaxed(&Ring[RingPos], *I); + } + RingEnd = (RingPos + 1) & RingMask; + return Hash; + } + + // Look up a stack trace by hash. Returns true if successful. The trace may be + // accessed via operator[] passing indexes between *RingPosPtr and + // *RingPosPtr + *SizePtr. + bool find(u32 Hash, uptr *RingPosPtr, uptr *SizePtr) const { + u32 Pos = Hash & TabMask; + u32 RingPos = atomic_load_relaxed(&Tab[Pos]); + if (RingPos >= RingSize) + return false; + u64 Entry = atomic_load_relaxed(&Ring[RingPos]); + u64 HashWithTagBit = (u64(Hash) << 1) | 1; + if ((Entry & 0x1ffffffff) != HashWithTagBit) + return false; + u32 Size = u32(Entry >> 33); + if (Size >= RingSize) + return false; + *RingPosPtr = (RingPos + 1) & RingMask; + *SizePtr = Size; + MurMur2HashBuilder B; + for (uptr I = 0; I != Size; ++I) { + RingPos = (RingPos + 1) & RingMask; + B.add(u32(atomic_load_relaxed(&Ring[RingPos])) >> 2); + } + return B.get() == Hash; + } + + u64 operator[](uptr RingPos) const { + return atomic_load_relaxed(&Ring[RingPos & RingMask]); + } +}; + +} // namespace scudo + +#endif // SCUDO_STACK_DEPOT_H_ diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/stats.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/stats.h index 38481e98e48..d76b904949e 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/stats.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/stats.h @@ -58,7 +58,9 @@ class GlobalStats : public LocalStats { public: void initLinkerInitialized() {} void init() { - memset(this, 0, sizeof(*this)); + LocalStats::init(); + Mutex.init(); + StatsList = {}; initLinkerInitialized(); } diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt index 63007e35896..78c297ae7e8 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt @@ -10,6 +10,7 @@ set(SCUDO_UNITTEST_CFLAGS -I${COMPILER_RT_SOURCE_DIR}/include -I${COMPILER_RT_SOURCE_DIR}/lib -I${COMPILER_RT_SOURCE_DIR}/lib/scudo/standalone + -I${COMPILER_RT_SOURCE_DIR}/lib/scudo/standalone/include -DGTEST_HAS_RTTI=0 -DSCUDO_DEBUG=1 # Extra flags for the C++ tests @@ -20,8 +21,7 @@ if(ANDROID) list(APPEND SCUDO_UNITTEST_CFLAGS -fno-emulated-tls) endif() -# FIXME: GWP-ASan is temporarily disabled, re-enable once issues are fixed. -if (FALSE AND COMPILER_RT_HAS_GWP_ASAN) +if (COMPILER_RT_HAS_GWP_ASAN) list(APPEND SCUDO_UNITTEST_CFLAGS -DGWP_ASAN_HOOKS) endif() @@ -43,8 +43,9 @@ endforeach() macro(add_scudo_unittest testname) cmake_parse_arguments(TEST "" "" "SOURCES;ADDITIONAL_RTOBJECTS" ${ARGN}) - if (FALSE AND COMPILER_RT_HAS_GWP_ASAN) - list(APPEND TEST_ADDITIONAL_RTOBJECTS RTGwpAsan) + if (COMPILER_RT_HAS_GWP_ASAN) + list(APPEND TEST_ADDITIONAL_RTOBJECTS + RTGwpAsan RTGwpAsanBacktraceLibc RTGwpAsanSegvHandler) endif() if(COMPILER_RT_HAS_SCUDO_STANDALONE) diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/bytemap_test.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/bytemap_test.cpp index 7db7feb6acc..4034b108fab 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/bytemap_test.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/bytemap_test.cpp @@ -31,45 +31,3 @@ TEST(ScudoByteMapTest, FlatByteMap) { testMap(Map, Size); Map.unmapTestOnly(); } - -TEST(ScudoByteMapTest, TwoLevelByteMap) { - const scudo::uptr Size1 = 1U << 6, Size2 = 1U << 12; - scudo::TwoLevelByteMap<Size1, Size2> Map; - testMap(Map, Size1 * Size2); - Map.unmapTestOnly(); -} - -using TestByteMap = scudo::TwoLevelByteMap<1U << 12, 1U << 13>; - -struct TestByteMapParam { - TestByteMap *Map; - scudo::uptr Shard; - scudo::uptr NumberOfShards; -}; - -void *populateByteMap(void *Param) { - TestByteMapParam *P = reinterpret_cast<TestByteMapParam *>(Param); - for (scudo::uptr I = P->Shard; I < P->Map->size(); I += P->NumberOfShards) { - scudo::u8 V = static_cast<scudo::u8>((I % 100) + 1); - P->Map->set(I, V); - EXPECT_EQ((*P->Map)[I], V); - } - return 0; -} - -TEST(ScudoByteMapTest, ThreadedTwoLevelByteMap) { - TestByteMap Map; - Map.init(); - static const scudo::uptr NumberOfThreads = 16U; - pthread_t T[NumberOfThreads]; - TestByteMapParam P[NumberOfThreads]; - for (scudo::uptr I = 0; I < NumberOfThreads; I++) { - P[I].Map = ⤅ - P[I].Shard = I; - P[I].NumberOfShards = NumberOfThreads; - pthread_create(&T[I], 0, populateByteMap, &P[I]); - } - for (scudo::uptr I = 0; I < NumberOfThreads; I++) - pthread_join(T[I], 0); - Map.unmapTestOnly(); -} diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp index fec5f864aeb..7e04afb90bb 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -22,6 +22,51 @@ static bool Ready = false; static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc; +static void disableDebuggerdMaybe() { +#if SCUDO_ANDROID + // Disable the debuggerd signal handler on Android, without this we can end + // up spending a significant amount of time creating tombstones. + signal(SIGSEGV, SIG_DFL); +#endif +} + +template <class AllocatorT> +bool isTaggedAllocation(AllocatorT *Allocator, scudo::uptr Size, + scudo::uptr Alignment) { + if (!Allocator->useMemoryTagging() || + !scudo::systemDetectsMemoryTagFaultsTestOnly()) + return false; + + const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG; + if (Alignment < MinAlignment) + Alignment = MinAlignment; + const scudo::uptr NeededSize = + scudo::roundUpTo(Size, MinAlignment) + + ((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize()); + return AllocatorT::PrimaryT::canAllocate(NeededSize); +} + +template <class AllocatorT> +void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size, + scudo::uptr Alignment) { + if (!isTaggedAllocation(Allocator, Size, Alignment)) + return; + + Size = scudo::roundUpTo(Size, scudo::archMemoryTagGranuleSize()); + EXPECT_DEATH( + { + disableDebuggerdMaybe(); + reinterpret_cast<char *>(P)[-1] = 0xaa; + }, + ""); + EXPECT_DEATH( + { + disableDebuggerdMaybe(); + reinterpret_cast<char *>(P)[Size] = 0xaa; + }, + ""); +} + template <class Config> static void testAllocator() { using AllocatorT = scudo::Allocator<Config>; auto Deleter = [](AllocatorT *A) { @@ -56,6 +101,7 @@ template <class Config> static void testAllocator() { EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr>(P), Align)); EXPECT_LE(Size, Allocator->getUsableSize(P)); memset(P, 0xaa, Size); + checkMemoryTaggingMaybe(Allocator.get(), P, Size, Align); Allocator->deallocate(P, Origin, Size); } } @@ -69,7 +115,44 @@ template <class Config> static void testAllocator() { void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true); EXPECT_NE(P, nullptr); for (scudo::uptr I = 0; I < Size; I++) - EXPECT_EQ((reinterpret_cast<char *>(P))[I], 0); + ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0); + memset(P, 0xaa, Size); + Allocator->deallocate(P, Origin, Size); + } + } + Allocator->releaseToOS(); + + // Ensure that specifying ZeroContents returns a zero'd out block. + Allocator->setFillContents(scudo::ZeroFill); + for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) { + for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) { + const scudo::uptr Size = (1U << SizeLog) + Delta * 128U; + void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false); + EXPECT_NE(P, nullptr); + for (scudo::uptr I = 0; I < Size; I++) + ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0); + memset(P, 0xaa, Size); + Allocator->deallocate(P, Origin, Size); + } + } + Allocator->releaseToOS(); + + // Ensure that specifying PatternOrZeroFill returns a pattern-filled block in + // the primary allocator, and either pattern or zero filled block in the + // secondary. + Allocator->setFillContents(scudo::PatternOrZeroFill); + for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) { + for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) { + const scudo::uptr Size = (1U << SizeLog) + Delta * 128U; + void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false); + EXPECT_NE(P, nullptr); + for (scudo::uptr I = 0; I < Size; I++) { + unsigned char V = (reinterpret_cast<unsigned char *>(P))[I]; + if (AllocatorT::PrimaryT::canAllocate(Size)) + ASSERT_EQ(V, scudo::PatternFillByte); + else + ASSERT_TRUE(V == scudo::PatternFillByte || V == 0); + } memset(P, 0xaa, Size); Allocator->deallocate(P, Origin, Size); } @@ -83,7 +166,8 @@ template <class Config> static void testAllocator() { bool Found = false; for (scudo::uptr I = 0; I < 1024U && !Found; I++) { void *P = Allocator->allocate(NeedleSize, Origin); - if (P == NeedleP) + if (Allocator->untagPointerMaybe(P) == + Allocator->untagPointerMaybe(NeedleP)) Found = true; Allocator->deallocate(P, Origin); } @@ -110,16 +194,18 @@ template <class Config> static void testAllocator() { // Check that reallocating a chunk to a slightly smaller or larger size // returns the same chunk. This requires that all the sizes we iterate on use - // the same block size, but that should be the case for 2048 with our default - // class size maps. - P = Allocator->allocate(DataSize, Origin); - memset(P, Marker, DataSize); + // the same block size, but that should be the case for MaxSize - 64 with our + // default class size maps. + constexpr scudo::uptr ReallocSize = MaxSize - 64; + P = Allocator->allocate(ReallocSize, Origin); + memset(P, Marker, ReallocSize); for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) { - const scudo::uptr NewSize = DataSize + Delta; + const scudo::uptr NewSize = ReallocSize + Delta; void *NewP = Allocator->reallocate(P, NewSize); EXPECT_EQ(NewP, P); - for (scudo::uptr I = 0; I < DataSize - 32; I++) + for (scudo::uptr I = 0; I < ReallocSize - 32; I++) EXPECT_EQ((reinterpret_cast<char *>(NewP))[I], Marker); + checkMemoryTaggingMaybe(Allocator.get(), NewP, NewSize, 0); } Allocator->deallocate(P, Origin); @@ -148,6 +234,58 @@ template <class Config> static void testAllocator() { Allocator->releaseToOS(); + if (Allocator->useMemoryTagging() && + scudo::systemDetectsMemoryTagFaultsTestOnly()) { + // Check that use-after-free is detected. + for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) { + const scudo::uptr Size = 1U << SizeLog; + if (!isTaggedAllocation(Allocator.get(), Size, 1)) + continue; + // UAF detection is probabilistic, so we repeat the test up to 256 times + // if necessary. With 15 possible tags this means a 1 in 15^256 chance of + // a false positive. + EXPECT_DEATH( + { + disableDebuggerdMaybe(); + for (unsigned I = 0; I != 256; ++I) { + void *P = Allocator->allocate(Size, Origin); + Allocator->deallocate(P, Origin); + reinterpret_cast<char *>(P)[0] = 0xaa; + } + }, + ""); + EXPECT_DEATH( + { + disableDebuggerdMaybe(); + for (unsigned I = 0; I != 256; ++I) { + void *P = Allocator->allocate(Size, Origin); + Allocator->deallocate(P, Origin); + reinterpret_cast<char *>(P)[Size - 1] = 0xaa; + } + }, + ""); + } + + // Check that disabling memory tagging works correctly. + void *P = Allocator->allocate(2048, Origin); + EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 0xaa, ""); + scudo::disableMemoryTagChecksTestOnly(); + Allocator->disableMemoryTagging(); + reinterpret_cast<char *>(P)[2048] = 0xaa; + Allocator->deallocate(P, Origin); + + P = Allocator->allocate(2048, Origin); + EXPECT_EQ(Allocator->untagPointerMaybe(P), P); + reinterpret_cast<char *>(P)[2048] = 0xaa; + Allocator->deallocate(P, Origin); + + Allocator->releaseToOS(); + + // Disabling memory tag checks may interfere with subsequent tests. + // Re-enable them now. + scudo::enableMemoryTagChecksTestOnly(); + } + scudo::uptr BufferSize = 8192; std::vector<char> Buffer(BufferSize); scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize); @@ -164,6 +302,17 @@ template <class Config> static void testAllocator() { EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos); } +// Test that multiple instantiations of the allocator have not messed up the +// process's signal handlers (GWP-ASan used to do this). +void testSEGV() { + const scudo::uptr Size = 4 * scudo::getPageSizeCached(); + scudo::MapPlatformData Data = {}; + void *P = scudo::map(nullptr, Size, "testSEGV", MAP_NOACCESS, &Data); + EXPECT_NE(P, nullptr); + EXPECT_DEATH(memset(P, 0xaa, Size), ""); + scudo::unmap(P, Size, UNMAP_ALL, &Data); +} + TEST(ScudoCombinedTest, BasicCombined) { UseQuarantine = false; testAllocator<scudo::AndroidSvelteConfig>(); @@ -173,6 +322,7 @@ TEST(ScudoCombinedTest, BasicCombined) { testAllocator<scudo::DefaultConfig>(); UseQuarantine = true; testAllocator<scudo::AndroidConfig>(); + testSEGV(); #endif } @@ -231,11 +381,22 @@ TEST(ScudoCombinedTest, ThreadedCombined) { #endif } +struct DeathSizeClassConfig { + static const scudo::uptr NumBits = 1; + static const scudo::uptr MinSizeLog = 10; + static const scudo::uptr MidSizeLog = 10; + static const scudo::uptr MaxSizeLog = 13; + static const scudo::u32 MaxNumCachedHint = 4; + static const scudo::uptr MaxBytesCachedLog = 12; +}; + +static const scudo::uptr DeathRegionSizeLog = 20U; struct DeathConfig { - // Tiny allocator, its Primary only serves chunks of 1024 bytes. - using DeathSizeClassMap = scudo::SizeClassMap<1U, 10U, 10U, 10U, 1U, 10U>; - typedef scudo::SizeClassAllocator64<DeathSizeClassMap, 20U> Primary; - typedef scudo::MapAllocator<0U> Secondary; + // Tiny allocator, its Primary only serves chunks of four sizes. + using DeathSizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>; + typedef scudo::SizeClassAllocator64<DeathSizeClassMap, DeathRegionSizeLog> + Primary; + typedef scudo::MapAllocator<scudo::MapAllocatorNoCache> Secondary; template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U>; }; @@ -292,3 +453,41 @@ TEST(ScudoCombinedTest, ReleaseToOS) { Allocator->releaseToOS(); } + +// Verify that when a region gets full, the allocator will still manage to +// fulfill the allocation through a larger size class. +TEST(ScudoCombinedTest, FullRegion) { + using AllocatorT = scudo::Allocator<DeathConfig>; + auto Deleter = [](AllocatorT *A) { + A->unmapTestOnly(); + delete A; + }; + std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT, + Deleter); + Allocator->reset(); + + std::vector<void *> V; + scudo::uptr FailedAllocationsCount = 0; + for (scudo::uptr ClassId = 1U; + ClassId <= DeathConfig::DeathSizeClassMap::LargestClassId; ClassId++) { + const scudo::uptr Size = + DeathConfig::DeathSizeClassMap::getSizeByClassId(ClassId); + // Allocate enough to fill all of the regions above this one. + const scudo::uptr MaxNumberOfChunks = + ((1U << DeathRegionSizeLog) / Size) * + (DeathConfig::DeathSizeClassMap::LargestClassId - ClassId + 1); + void *P; + for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) { + P = Allocator->allocate(Size - 64U, Origin); + if (!P) + FailedAllocationsCount++; + else + V.push_back(P); + } + while (!V.empty()) { + Allocator->deallocate(V.back(), Origin); + V.pop_back(); + } + } + EXPECT_EQ(FailedAllocationsCount, 0U); +} diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/mutex_test.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/mutex_test.cpp index ce715a19332..ed56cb5219e 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/mutex_test.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/mutex_test.cpp @@ -52,7 +52,7 @@ private: static const scudo::u32 Size = 64U; typedef scudo::u64 T; scudo::HybridMutex &Mutex; - ALIGNED(SCUDO_CACHE_LINE_SIZE) T Data[Size]; + alignas(SCUDO_CACHE_LINE_SIZE) T Data[Size]; }; const scudo::u32 NumberOfThreads = 8; diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp index 64b625e79bf..010bf84490e 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp @@ -58,6 +58,7 @@ TEST(ScudoPrimaryTest, BasicPrimary) { testPrimary<scudo::SizeClassAllocator32<SizeClassMap, 18U>>(); #endif testPrimary<scudo::SizeClassAllocator64<SizeClassMap, 24U>>(); + testPrimary<scudo::SizeClassAllocator64<SizeClassMap, 24U, true>>(); } // The 64-bit SizeClassAllocator can be easily OOM'd with small region sizes. @@ -143,6 +144,7 @@ TEST(ScudoPrimaryTest, PrimaryIterate) { testIteratePrimary<scudo::SizeClassAllocator32<SizeClassMap, 18U>>(); #endif testIteratePrimary<scudo::SizeClassAllocator64<SizeClassMap, 24U>>(); + testIteratePrimary<scudo::SizeClassAllocator64<SizeClassMap, 24U, true>>(); } static std::mutex Mutex; @@ -202,6 +204,7 @@ TEST(ScudoPrimaryTest, PrimaryThreaded) { testPrimaryThreaded<scudo::SizeClassAllocator32<SizeClassMap, 18U>>(); #endif testPrimaryThreaded<scudo::SizeClassAllocator64<SizeClassMap, 24U>>(); + testPrimaryThreaded<scudo::SizeClassAllocator64<SizeClassMap, 24U, true>>(); } // Through a simple allocation that spans two pages, verify that releaseToOS @@ -232,4 +235,5 @@ TEST(ScudoPrimaryTest, ReleaseToOS) { testReleaseToOS<scudo::SizeClassAllocator32<SizeClassMap, 18U>>(); #endif testReleaseToOS<scudo::SizeClassAllocator64<SizeClassMap, 24U>>(); + testReleaseToOS<scudo::SizeClassAllocator64<SizeClassMap, 24U, true>>(); } diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/release_test.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/release_test.cpp index 22d73d09d53..a7478f47479 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/release_test.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/release_test.cpp @@ -147,14 +147,14 @@ private: template <class SizeClassMap> void testReleaseFreeMemoryToOS() { typedef FreeBatch<SizeClassMap> Batch; - const scudo::uptr AllocatedPagesCount = 1024; + const scudo::uptr PagesCount = 1024; const scudo::uptr PageSize = scudo::getPageSizeCached(); std::mt19937 R; scudo::u32 RandState = 42; for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) { const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I); - const scudo::uptr MaxBlocks = AllocatedPagesCount * PageSize / BlockSize; + const scudo::uptr MaxBlocks = PagesCount * PageSize / BlockSize; // Generate the random free list. std::vector<scudo::uptr> FreeArray; @@ -190,7 +190,7 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() { // Release the memory. ReleasedPagesRecorder Recorder; - releaseFreeMemoryToOS(FreeList, 0, AllocatedPagesCount, BlockSize, + releaseFreeMemoryToOS(FreeList, 0, MaxBlocks * BlockSize, BlockSize, &Recorder); // Verify that there are no released pages touched by used chunks and all @@ -202,7 +202,7 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() { scudo::uptr CurrentBlock = 0; InFreeRange = false; scudo::uptr CurrentFreeRangeStart = 0; - for (scudo::uptr I = 0; I <= MaxBlocks; I++) { + for (scudo::uptr I = 0; I < MaxBlocks; I++) { const bool IsFreeBlock = FreeBlocks.find(CurrentBlock) != FreeBlocks.end(); if (IsFreeBlock) { @@ -238,6 +238,19 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() { CurrentBlock += BlockSize; } + if (InFreeRange) { + scudo::uptr P = scudo::roundUpTo(CurrentFreeRangeStart, PageSize); + const scudo::uptr EndPage = + scudo::roundUpTo(MaxBlocks * BlockSize, PageSize); + while (P + PageSize <= EndPage) { + const bool PageReleased = + Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end(); + EXPECT_EQ(true, PageReleased); + VerifiedReleasedPages++; + P += PageSize; + } + } + EXPECT_EQ(Recorder.ReportedPages.size(), VerifiedReleasedPages); while (!FreeList.empty()) { diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp index 1e7dcec5861..d2260b9c15b 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp @@ -29,8 +29,8 @@ template <class SecondaryT> static void testSecondaryBasic(void) { memset(P, 'A', Size); EXPECT_GE(SecondaryT::getBlockSize(P), Size); L->deallocate(P); - // If we are not using a free list, blocks are unmapped on deallocation. - if (SecondaryT::getMaxFreeListSize() == 0U) + // If the Secondary can't cache that pointer, it will be unmapped. + if (!SecondaryT::canCache(Size)) EXPECT_DEATH(memset(P, 'A', Size), ""); const scudo::uptr Align = 1U << 16; @@ -55,17 +55,18 @@ template <class SecondaryT> static void testSecondaryBasic(void) { } TEST(ScudoSecondaryTest, SecondaryBasic) { - testSecondaryBasic<scudo::MapAllocator<0U>>(); + testSecondaryBasic<scudo::MapAllocator<scudo::MapAllocatorNoCache>>(); #if !SCUDO_FUCHSIA - testSecondaryBasic<scudo::MapAllocator<>>(); - testSecondaryBasic<scudo::MapAllocator<64U>>(); + testSecondaryBasic<scudo::MapAllocator<scudo::MapAllocatorCache<>>>(); + testSecondaryBasic< + scudo::MapAllocator<scudo::MapAllocatorCache<64U, 1UL << 20>>>(); #endif } #if SCUDO_FUCHSIA -using LargeAllocator = scudo::MapAllocator<0U>; +using LargeAllocator = scudo::MapAllocator<scudo::MapAllocatorNoCache>; #else -using LargeAllocator = scudo::MapAllocator<>; +using LargeAllocator = scudo::MapAllocator<scudo::MapAllocatorCache<>>; #endif // This exercises a variety of combinations of size and alignment for the @@ -136,8 +137,15 @@ static void performAllocations(LargeAllocator *L) { while (!Ready) Cv.wait(Lock); } - for (scudo::uptr I = 0; I < 32U; I++) - V.push_back(L->allocate((std::rand() % 16) * PageSize)); + for (scudo::uptr I = 0; I < 128U; I++) { + // Deallocate 75% of the blocks. + const bool Deallocate = (rand() & 3) != 0; + void *P = L->allocate((std::rand() % 16) * PageSize); + if (Deallocate) + L->deallocate(P); + else + V.push_back(P); + } while (!V.empty()) { L->deallocate(V.back()); V.pop_back(); @@ -146,9 +154,9 @@ static void performAllocations(LargeAllocator *L) { TEST(ScudoSecondaryTest, SecondaryThreadsRace) { LargeAllocator *L = new LargeAllocator; - L->init(nullptr); - std::thread Threads[10]; - for (scudo::uptr I = 0; I < 10U; I++) + L->init(nullptr, /*ReleaseToOsInterval=*/0); + std::thread Threads[16]; + for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++) Threads[I] = std::thread(performAllocations, L); { std::unique_lock<std::mutex> Lock(Mutex); diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/size_class_map_test.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/size_class_map_test.cpp index 55850400a76..88859ded5b2 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/size_class_map_test.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/size_class_map_test.cpp @@ -12,8 +12,8 @@ template <class SizeClassMap> void testSizeClassMap() { typedef SizeClassMap SCMap; - SCMap::print(); - SCMap::validate(); + scudo::printMap<SCMap>(); + scudo::validateMap<SCMap>(); } TEST(ScudoSizeClassMapTest, DefaultSizeClassMap) { @@ -28,12 +28,30 @@ TEST(ScudoSizeClassMapTest, AndroidSizeClassMap) { testSizeClassMap<scudo::AndroidSizeClassMap>(); } +struct OneClassSizeClassConfig { + static const scudo::uptr NumBits = 1; + static const scudo::uptr MinSizeLog = 5; + static const scudo::uptr MidSizeLog = 5; + static const scudo::uptr MaxSizeLog = 5; + static const scudo::u32 MaxNumCachedHint = 0; + static const scudo::uptr MaxBytesCachedLog = 0; +}; + TEST(ScudoSizeClassMapTest, OneClassSizeClassMap) { - testSizeClassMap<scudo::SizeClassMap<1, 5, 5, 5, 0, 0>>(); + testSizeClassMap<scudo::FixedSizeClassMap<OneClassSizeClassConfig>>(); } #if SCUDO_CAN_USE_PRIMARY64 +struct LargeMaxSizeClassConfig { + static const scudo::uptr NumBits = 3; + static const scudo::uptr MinSizeLog = 4; + static const scudo::uptr MidSizeLog = 8; + static const scudo::uptr MaxSizeLog = 63; + static const scudo::u32 MaxNumCachedHint = 128; + static const scudo::uptr MaxBytesCachedLog = 16; +}; + TEST(ScudoSizeClassMapTest, LargeMaxSizeClassMap) { - testSizeClassMap<scudo::SizeClassMap<3, 4, 8, 63, 128, 16>>(); + testSizeClassMap<scudo::FixedSizeClassMap<LargeMaxSizeClassConfig>>(); } #endif diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp index 976ac4f497c..b41908cf478 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp @@ -268,10 +268,26 @@ TEST(ScudoWrappersCTest, MallocIterateBoundary) { const size_t BlockDelta = FIRST_32_SECOND_64(8U, 16U); const size_t SpecialSize = PageSize - BlockDelta; - void *P = malloc(SpecialSize); - EXPECT_NE(P, nullptr); - BoundaryP = reinterpret_cast<uintptr_t>(P); - const uintptr_t Block = BoundaryP - BlockDelta; + // We aren't guaranteed that any size class is exactly a page wide. So we need + // to keep making allocations until we succeed. + // + // With a 16-byte block alignment and 4096-byte page size, each allocation has + // a probability of (1 - (16/4096)) of failing to meet the alignment + // requirements, and the probability of failing 65536 times is + // (1 - (16/4096))^65536 < 10^-112. So if we still haven't succeeded after + // 65536 tries, give up. + uintptr_t Block; + void *P = nullptr; + for (unsigned I = 0; I != 65536; ++I) { + void *PrevP = P; + P = malloc(SpecialSize); + EXPECT_NE(P, nullptr); + *reinterpret_cast<void **>(P) = PrevP; + BoundaryP = reinterpret_cast<uintptr_t>(P); + Block = BoundaryP - BlockDelta; + if ((Block & (PageSize - 1)) == 0U) + break; + } EXPECT_EQ((Block & (PageSize - 1)), 0U); Count = 0U; @@ -281,7 +297,11 @@ TEST(ScudoWrappersCTest, MallocIterateBoundary) { malloc_enable(); EXPECT_EQ(Count, 1U); - free(P); + while (P) { + void *NextP = *reinterpret_cast<void **>(P); + free(P); + P = NextP; + } } // We expect heap operations within a disable/enable scope to deadlock. @@ -303,7 +323,11 @@ TEST(ScudoWrappersCTest, MallocDisableDeadlock) { #if !SCUDO_FUCHSIA TEST(ScudoWrappersCTest, MallocInfo) { - char Buffer[64]; + // Use volatile so that the allocations don't get optimized away. + void *volatile P1 = malloc(1234); + void *volatile P2 = malloc(4321); + + char Buffer[16384]; FILE *F = fmemopen(Buffer, sizeof(Buffer), "w+"); EXPECT_NE(F, nullptr); errno = 0; @@ -311,6 +335,11 @@ TEST(ScudoWrappersCTest, MallocInfo) { EXPECT_EQ(errno, 0); fclose(F); EXPECT_EQ(strncmp(Buffer, "<malloc version=\"scudo-", 23), 0); + EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"1234\" count=\"")); + EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"4321\" count=\"")); + + free(P1); + free(P2); } TEST(ScudoWrappersCTest, Fork) { @@ -343,6 +372,7 @@ TEST(ScudoWrappersCTest, Fork) { static pthread_mutex_t Mutex; static pthread_cond_t Conditional = PTHREAD_COND_INITIALIZER; +static bool Ready; static void *enableMalloc(void *Unused) { // Initialize the allocator for this thread. @@ -353,6 +383,7 @@ static void *enableMalloc(void *Unused) { // Signal the main thread we are ready. pthread_mutex_lock(&Mutex); + Ready = true; pthread_cond_signal(&Conditional); pthread_mutex_unlock(&Mutex); @@ -369,7 +400,8 @@ TEST(ScudoWrappersCTest, DisableForkEnable) { // Wait for the thread to be warmed up. pthread_mutex_lock(&Mutex); - pthread_cond_wait(&Conditional, &Mutex); + while (!Ready) + pthread_cond_wait(&Conditional, &Mutex); pthread_mutex_unlock(&Mutex); // Disable the allocator and fork. fork should succeed after malloc_enable. diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/tools/compute_size_class_config.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/tools/compute_size_class_config.cpp new file mode 100644 index 00000000000..82f37b6647e --- /dev/null +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/tools/compute_size_class_config.cpp @@ -0,0 +1,161 @@ +//===-- compute_size_class_config.cpp -------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include <algorithm> +#include <vector> + +struct Alloc { + size_t size, count; +}; + +size_t measureWastage(const std::vector<Alloc> &allocs, + const std::vector<size_t> &classes, + size_t pageSize, + size_t headerSize) { + size_t totalWastage = 0; + for (auto &a : allocs) { + size_t sizePlusHeader = a.size + headerSize; + size_t wastage = -1ull; + for (auto c : classes) + if (c >= sizePlusHeader && c - sizePlusHeader < wastage) + wastage = c - sizePlusHeader; + if (wastage == -1ull) + continue; + if (wastage > 2 * pageSize) + wastage = 2 * pageSize; + totalWastage += wastage * a.count; + } + return totalWastage; +} + +void readAllocs(std::vector<Alloc> &allocs, const char *path) { + FILE *f = fopen(path, "r"); + if (!f) { + fprintf(stderr, "compute_size_class_config: could not open %s: %s\n", path, + strerror(errno)); + exit(1); + } + + const char header[] = "<malloc version=\"scudo-1\">\n"; + char buf[sizeof(header) - 1]; + if (fread(buf, 1, sizeof(header) - 1, f) != sizeof(header) - 1 || + memcmp(buf, header, sizeof(header) - 1) != 0) { + fprintf(stderr, "compute_size_class_config: invalid input format\n"); + exit(1); + } + + Alloc a; + while (fscanf(f, "<alloc size=\"%zu\" count=\"%zu\"/>\n", &a.size, &a.count) == 2) + allocs.push_back(a); + fclose(f); +} + +size_t log2Floor(size_t x) { return sizeof(long) * 8 - 1 - __builtin_clzl(x); } + +void usage() { + fprintf(stderr, + "usage: compute_size_class_config [-p pageSize] [-c largestClass] " + "[-h headerSize] [-n numClasses] [-b numBits] profile...\n"); + exit(1); +} + +int main(int argc, char **argv) { + size_t pageSize = 4096; + size_t largestClass = 65552; + size_t headerSize = 16; + size_t numClasses = 32; + size_t numBits = 5; + + std::vector<Alloc> allocs; + for (size_t i = 1; i != argc;) { + auto matchArg = [&](size_t &arg, const char *name) { + if (strcmp(argv[i], name) == 0) { + if (i + 1 != argc) { + arg = atoi(argv[i + 1]); + i += 2; + } else { + usage(); + } + return true; + } + return false; + }; + if (matchArg(pageSize, "-p") || matchArg(largestClass, "-c") || + matchArg(headerSize, "-h") || matchArg(numClasses, "-n") || + matchArg(numBits, "-b")) + continue; + readAllocs(allocs, argv[i]); + ++i; + } + + if (allocs.empty()) + usage(); + + std::vector<size_t> classes; + classes.push_back(largestClass); + for (size_t i = 1; i != numClasses; ++i) { + size_t minWastage = -1ull; + size_t minWastageClass; + for (size_t newClass = 16; newClass != largestClass; newClass += 16) { + // Skip classes with more than numBits bits, ignoring leading or trailing + // zero bits. + if (__builtin_ctzl(newClass - headerSize) + + __builtin_clzl(newClass - headerSize) < + sizeof(long) * 8 - numBits) + continue; + + classes.push_back(newClass); + size_t newWastage = measureWastage(allocs, classes, pageSize, headerSize); + classes.pop_back(); + if (newWastage < minWastage) { + minWastage = newWastage; + minWastageClass = newClass; + } + } + classes.push_back(minWastageClass); + } + + std::sort(classes.begin(), classes.end()); + size_t minSizeLog = log2Floor(headerSize); + size_t midSizeIndex = 0; + while (classes[midSizeIndex + 1] - classes[midSizeIndex] == (1 << minSizeLog)) + midSizeIndex++; + size_t midSizeLog = log2Floor(classes[midSizeIndex] - headerSize); + size_t maxSizeLog = log2Floor(classes.back() - headerSize - 1) + 1; + + printf(R"(// wastage = %zu + +struct MySizeClassConfig { + static const uptr NumBits = %zu; + static const uptr MinSizeLog = %zu; + static const uptr MidSizeLog = %zu; + static const uptr MaxSizeLog = %zu; + static const u32 MaxNumCachedHint = 14; + static const uptr MaxBytesCachedLog = 14; + + static constexpr u32 Classes[] = {)", + measureWastage(allocs, classes, pageSize, headerSize), numBits, + minSizeLog, midSizeLog, maxSizeLog); + for (size_t i = 0; i != classes.size(); ++i) { + if ((i % 8) == 0) + printf("\n "); + else + printf(" "); + printf("0x%05zx,", classes[i]); + } + printf(R"( + }; + static const uptr SizeDelta = %zu; +}; +)", headerSize); +} diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/tsd.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/tsd.h index 20f0d69cabf..b3701c63f8a 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/tsd.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/tsd.h @@ -23,7 +23,7 @@ namespace scudo { -template <class Allocator> struct ALIGNED(SCUDO_CACHE_LINE_SIZE) TSD { +template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD { typename Allocator::CacheT Cache; typename Allocator::QuarantineCacheT QuarantineCache; u8 DestructorIterations; diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/tsd_exclusive.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/tsd_exclusive.h index 69479ea7bdf..3492509b5a8 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/tsd_exclusive.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/tsd_exclusive.h @@ -25,9 +25,7 @@ template <class Allocator> struct TSDRegistryExT { void initLinkerInitialized(Allocator *Instance) { Instance->initLinkerInitialized(); CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0); - FallbackTSD = reinterpret_cast<TSD<Allocator> *>( - map(nullptr, sizeof(TSD<Allocator>), "scudo:tsd")); - FallbackTSD->initLinkerInitialized(Instance); + FallbackTSD.initLinkerInitialized(Instance); Initialized = true; } void init(Allocator *Instance) { @@ -35,9 +33,7 @@ template <class Allocator> struct TSDRegistryExT { initLinkerInitialized(Instance); } - void unmapTestOnly() { - unmap(reinterpret_cast<void *>(FallbackTSD), sizeof(TSD<Allocator>)); - } + void unmapTestOnly() {} ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) { if (LIKELY(State != ThreadState::NotInitialized)) @@ -51,23 +47,22 @@ template <class Allocator> struct TSDRegistryExT { *UnlockRequired = false; return &ThreadTSD; } - DCHECK(FallbackTSD); - FallbackTSD->lock(); + FallbackTSD.lock(); *UnlockRequired = true; - return FallbackTSD; + return &FallbackTSD; } // To disable the exclusive TSD registry, we effectively lock the fallback TSD // and force all threads to attempt to use it instead of their local one. void disable() { Mutex.lock(); - FallbackTSD->lock(); + FallbackTSD.lock(); atomic_store(&Disabled, 1U, memory_order_release); } void enable() { atomic_store(&Disabled, 0U, memory_order_release); - FallbackTSD->unlock(); + FallbackTSD.unlock(); Mutex.unlock(); } @@ -96,7 +91,7 @@ private: pthread_key_t PThreadKey; bool Initialized; atomic_u8 Disabled; - TSD<Allocator> *FallbackTSD; + TSD<Allocator> FallbackTSD; HybridMutex Mutex; static THREADLOCAL ThreadState State; static THREADLOCAL TSD<Allocator> ThreadTSD; diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/tsd_shared.h b/gnu/llvm/compiler-rt/lib/scudo/standalone/tsd_shared.h index 5ab8269519a..038a5905ff4 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/tsd_shared.h +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/tsd_shared.h @@ -18,9 +18,10 @@ template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT { void initLinkerInitialized(Allocator *Instance) { Instance->initLinkerInitialized(); CHECK_EQ(pthread_key_create(&PThreadKey, nullptr), 0); // For non-TLS - NumberOfTSDs = Min(Max(1U, getNumberOfCPUs()), MaxTSDCount); - TSDs = reinterpret_cast<TSD<Allocator> *>( - map(nullptr, sizeof(TSD<Allocator>) * NumberOfTSDs, "scudo:tsd")); + const u32 NumberOfCPUs = getNumberOfCPUs(); + NumberOfTSDs = (SCUDO_ANDROID || NumberOfCPUs == 0) + ? MaxTSDCount + : Min(NumberOfCPUs, MaxTSDCount); for (u32 I = 0; I < NumberOfTSDs; I++) TSDs[I].initLinkerInitialized(Instance); // Compute all the coprimes of NumberOfTSDs. This will be used to walk the @@ -46,8 +47,6 @@ template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT { } void unmapTestOnly() { - unmap(reinterpret_cast<void *>(TSDs), - sizeof(TSD<Allocator>) * NumberOfTSDs); setCurrentTSD(nullptr); pthread_key_delete(PThreadKey); } @@ -77,7 +76,7 @@ template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT { } void enable() { - for (s32 I = NumberOfTSDs - 1; I >= 0; I--) + for (s32 I = static_cast<s32>(NumberOfTSDs - 1); I >= 0; I--) TSDs[I].unlock(); Mutex.unlock(); } @@ -160,11 +159,11 @@ private: pthread_key_t PThreadKey; atomic_u32 CurrentIndex; u32 NumberOfTSDs; - TSD<Allocator> *TSDs; u32 NumberOfCoPrimes; u32 CoPrimes[MaxTSDCount]; bool Initialized; HybridMutex Mutex; + TSD<Allocator> TSDs[MaxTSDCount]; #if SCUDO_LINUX && !_BIONIC static THREADLOCAL TSD<Allocator> *ThreadTSD; #endif diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/wrappers_c.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/wrappers_c.cpp index 93a666c4d61..098cc089a1c 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/wrappers_c.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/wrappers_c.cpp @@ -22,13 +22,11 @@ #define SCUDO_ALLOCATOR Allocator extern "C" void SCUDO_PREFIX(malloc_postinit)(); -static scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)> - SCUDO_ALLOCATOR; -// Pointer to the static allocator so that the C++ wrappers can access it. + +// Export the static allocator so that the C++ wrappers can access it. // Technically we could have a completely separated heap for C & C++ but in // reality the amount of cross pollination between the two is staggering. -scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)> * - CONCATENATE(SCUDO_ALLOCATOR, Ptr) = &SCUDO_ALLOCATOR; +scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)> SCUDO_ALLOCATOR; #include "wrappers_c.inc" diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/gnu/llvm/compiler-rt/lib/scudo/standalone/wrappers_c.inc index 2fd709eaa1f..4396dfc50d1 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/wrappers_c.inc +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/wrappers_c.inc @@ -150,13 +150,25 @@ INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() { } void SCUDO_PREFIX(malloc_postinit)() { + SCUDO_ALLOCATOR.initGwpAsan(); pthread_atfork(SCUDO_PREFIX(malloc_disable), SCUDO_PREFIX(malloc_enable), SCUDO_PREFIX(malloc_enable)); } INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, UNUSED int value) { if (param == M_DECAY_TIME) { - // TODO(kostyak): set release_to_os_interval_ms accordingly. + if (SCUDO_ANDROID) { + if (value == 0) { + // Will set the release values to their minimum values. + value = INT32_MIN; + } else { + // Will set the release values to their maximum values. + value = INT32_MAX; + } + } + + SCUDO_ALLOCATOR.setOption(scudo::Option::ReleaseInterval, + static_cast<scudo::sptr>(value)); return 1; } else if (param == M_PURGE) { SCUDO_ALLOCATOR.releaseToOS(); @@ -179,9 +191,56 @@ INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment, } INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) { - fputs("<malloc version=\"scudo-1\">", stream); - fputs("</malloc>", stream); + const scudo::uptr max_size = + decltype(SCUDO_ALLOCATOR)::PrimaryT::SizeClassMap::MaxSize; + auto *sizes = static_cast<scudo::uptr *>( + SCUDO_PREFIX(calloc)(max_size, sizeof(scudo::uptr))); + auto callback = [](uintptr_t, size_t size, void *arg) { + auto *sizes = reinterpret_cast<scudo::uptr *>(arg); + if (size < max_size) + sizes[size]++; + }; + SCUDO_ALLOCATOR.iterateOverChunks(0, -1ul, callback, sizes); + + fputs("<malloc version=\"scudo-1\">\n", stream); + for (scudo::uptr i = 0; i != max_size; ++i) + if (sizes[i]) + fprintf(stream, "<alloc size=\"%lu\" count=\"%lu\"/>\n", i, sizes[i]); + fputs("</malloc>\n", stream); + SCUDO_PREFIX(free)(sizes); return 0; } +// Disable memory tagging for the heap. The caller must disable memory tag +// checks globally (e.g. by clearing TCF0 on aarch64) before calling this +// function, and may not re-enable them after calling the function. The program +// must be single threaded at the point when the function is called. +INTERFACE WEAK void SCUDO_PREFIX(malloc_disable_memory_tagging)() { + SCUDO_ALLOCATOR.disableMemoryTagging(); +} + +// Sets whether scudo records stack traces and other metadata for allocations +// and deallocations. This function only has an effect if the allocator and +// hardware support memory tagging. The program must be single threaded at the +// point when the function is called. +INTERFACE WEAK void +SCUDO_PREFIX(malloc_set_track_allocation_stacks)(int track) { + SCUDO_ALLOCATOR.setTrackAllocationStacks(track); +} + +// Sets whether scudo zero-initializes all allocated memory. The program must +// be single threaded at the point when the function is called. +INTERFACE WEAK void SCUDO_PREFIX(malloc_set_zero_contents)(int zero_contents) { + SCUDO_ALLOCATOR.setFillContents(zero_contents ? scudo::ZeroFill + : scudo::NoFill); +} + +// Sets whether scudo pattern-initializes all allocated memory. The program must +// be single threaded at the point when the function is called. +INTERFACE WEAK void +SCUDO_PREFIX(malloc_set_pattern_fill_contents)(int pattern_fill_contents) { + SCUDO_ALLOCATOR.setFillContents( + pattern_fill_contents ? scudo::PatternOrZeroFill : scudo::NoFill); +} + } // extern "C" diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp index f004369d96c..4298e69b577 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp @@ -25,11 +25,6 @@ extern "C" void SCUDO_PREFIX(malloc_postinit)(); static scudo::Allocator<scudo::AndroidConfig, SCUDO_PREFIX(malloc_postinit)> SCUDO_ALLOCATOR; -// Pointer to the static allocator so that the C++ wrappers can access it. -// Technically we could have a completely separated heap for C & C++ but in -// reality the amount of cross pollination between the two is staggering. -scudo::Allocator<scudo::AndroidConfig, SCUDO_PREFIX(malloc_postinit)> * - CONCATENATE(SCUDO_ALLOCATOR, Ptr) = &SCUDO_ALLOCATOR; #include "wrappers_c.inc" @@ -44,22 +39,37 @@ extern "C" void SCUDO_PREFIX(malloc_postinit)(); static scudo::Allocator<scudo::AndroidSvelteConfig, SCUDO_PREFIX(malloc_postinit)> SCUDO_ALLOCATOR; -// Pointer to the static allocator so that the C++ wrappers can access it. -// Technically we could have a completely separated heap for C & C++ but in -// reality the amount of cross pollination between the two is staggering. -scudo::Allocator<scudo::AndroidSvelteConfig, SCUDO_PREFIX(malloc_postinit)> * - CONCATENATE(SCUDO_ALLOCATOR, Ptr) = &SCUDO_ALLOCATOR; #include "wrappers_c.inc" #undef SCUDO_ALLOCATOR #undef SCUDO_PREFIX -// The following is the only function that will end up initializing both -// allocators, which will result in a slight increase in memory footprint. -INTERFACE void __scudo_print_stats(void) { - Allocator.printStats(); - SvelteAllocator.printStats(); +// TODO(kostyak): support both allocators. +INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); } + +INTERFACE void __scudo_get_error_info( + struct scudo_error_info *error_info, uintptr_t fault_addr, + const char *stack_depot, const char *region_info, const char *memory, + const char *memory_tags, uintptr_t memory_addr, size_t memory_size) { + Allocator.getErrorInfo(error_info, fault_addr, stack_depot, region_info, + memory, memory_tags, memory_addr, memory_size); +} + +INTERFACE const char *__scudo_get_stack_depot_addr() { + return Allocator.getStackDepotAddress(); +} + +INTERFACE size_t __scudo_get_stack_depot_size() { + return sizeof(scudo::StackDepot); +} + +INTERFACE const char *__scudo_get_region_info_addr() { + return Allocator.getRegionInfoArrayAddress(); +} + +INTERFACE size_t __scudo_get_region_info_size() { + return Allocator.getRegionInfoArraySize(); } #endif // SCUDO_ANDROID && _BIONIC diff --git a/gnu/llvm/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp b/gnu/llvm/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp index 1da5385c778..adb10411812 100644 --- a/gnu/llvm/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp +++ b/gnu/llvm/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp @@ -16,7 +16,7 @@ #include <stdint.h> extern "C" void malloc_postinit(); -extern scudo::Allocator<scudo::Config, malloc_postinit> *AllocatorPtr; +extern HIDDEN scudo::Allocator<scudo::Config, malloc_postinit> Allocator; namespace std { struct nothrow_t {}; @@ -24,85 +24,85 @@ enum class align_val_t : size_t {}; } // namespace std INTERFACE WEAK void *operator new(size_t size) { - return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New); + return Allocator.allocate(size, scudo::Chunk::Origin::New); } INTERFACE WEAK void *operator new[](size_t size) { - return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray); + return Allocator.allocate(size, scudo::Chunk::Origin::NewArray); } INTERFACE WEAK void *operator new(size_t size, std::nothrow_t const &) NOEXCEPT { - return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New); + return Allocator.allocate(size, scudo::Chunk::Origin::New); } INTERFACE WEAK void *operator new[](size_t size, std::nothrow_t const &) NOEXCEPT { - return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray); + return Allocator.allocate(size, scudo::Chunk::Origin::NewArray); } INTERFACE WEAK void *operator new(size_t size, std::align_val_t align) { - return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New, - static_cast<scudo::uptr>(align)); + return Allocator.allocate(size, scudo::Chunk::Origin::New, + static_cast<scudo::uptr>(align)); } INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align) { - return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray, - static_cast<scudo::uptr>(align)); + return Allocator.allocate(size, scudo::Chunk::Origin::NewArray, + static_cast<scudo::uptr>(align)); } INTERFACE WEAK void *operator new(size_t size, std::align_val_t align, std::nothrow_t const &) NOEXCEPT { - return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New, - static_cast<scudo::uptr>(align)); + return Allocator.allocate(size, scudo::Chunk::Origin::New, + static_cast<scudo::uptr>(align)); } INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const &) NOEXCEPT { - return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray, - static_cast<scudo::uptr>(align)); + return Allocator.allocate(size, scudo::Chunk::Origin::NewArray, + static_cast<scudo::uptr>(align)); } INTERFACE WEAK void operator delete(void *ptr)NOEXCEPT { - AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New); + Allocator.deallocate(ptr, scudo::Chunk::Origin::New); } INTERFACE WEAK void operator delete[](void *ptr) NOEXCEPT { - AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray); + Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray); } INTERFACE WEAK void operator delete(void *ptr, std::nothrow_t const &)NOEXCEPT { - AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New); + Allocator.deallocate(ptr, scudo::Chunk::Origin::New); } INTERFACE WEAK void operator delete[](void *ptr, std::nothrow_t const &) NOEXCEPT { - AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray); + Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray); } INTERFACE WEAK void operator delete(void *ptr, size_t size)NOEXCEPT { - AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size); + Allocator.deallocate(ptr, scudo::Chunk::Origin::New, size); } INTERFACE WEAK void operator delete[](void *ptr, size_t size) NOEXCEPT { - AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size); + Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, size); } INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align)NOEXCEPT { - AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0, - static_cast<scudo::uptr>(align)); + Allocator.deallocate(ptr, scudo::Chunk::Origin::New, 0, + static_cast<scudo::uptr>(align)); } INTERFACE WEAK void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT { - AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0, - static_cast<scudo::uptr>(align)); + Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, 0, + static_cast<scudo::uptr>(align)); } INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const &)NOEXCEPT { - AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0, - static_cast<scudo::uptr>(align)); + Allocator.deallocate(ptr, scudo::Chunk::Origin::New, 0, + static_cast<scudo::uptr>(align)); } INTERFACE WEAK void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const &) NOEXCEPT { - AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0, - static_cast<scudo::uptr>(align)); + Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, 0, + static_cast<scudo::uptr>(align)); } INTERFACE WEAK void operator delete(void *ptr, size_t size, std::align_val_t align)NOEXCEPT { - AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size, - static_cast<scudo::uptr>(align)); + Allocator.deallocate(ptr, scudo::Chunk::Origin::New, size, + static_cast<scudo::uptr>(align)); } INTERFACE WEAK void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT { - AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size, - static_cast<scudo::uptr>(align)); + Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, size, + static_cast<scudo::uptr>(align)); } #endif // !SCUDO_ANDROID || !_BIONIC diff --git a/gnu/llvm/compiler-rt/lib/tsan/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/tsan/CMakeLists.txt index 9fd3e2d7792..c99b16d8aaa 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/lib/tsan/CMakeLists.txt @@ -114,6 +114,14 @@ set(TSAN_RUNTIME_LIBRARIES) add_compiler_rt_component(tsan) if(APPLE) + # Ideally we would check the SDK version for the actual platform we are + # building for here. To make our lifes easier we assume the host SDK setup is + # sane and use the macOS SDK version as a proxy for aligned SDKs. + find_darwin_sdk_version(macosx_sdk_version "macosx") + if ("${macosx_sdk_version}" VERSION_LESS 10.12) + message(FATAL_ERROR "Building the TSan runtime requires at least macOS SDK 10.12 (or aligned SDK on other platforms)") + endif() + add_asm_sources(TSAN_ASM_SOURCES rtl/tsan_rtl_amd64.S rtl/tsan_rtl_aarch64.S) set(TSAN_LINK_LIBS ${SANITIZER_COMMON_LINK_LIBS}) diff --git a/gnu/llvm/compiler-rt/lib/tsan/go/build.bat b/gnu/llvm/compiler-rt/lib/tsan/go/build.bat index bf502873b11..0755688e5bd 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/go/build.bat +++ b/gnu/llvm/compiler-rt/lib/tsan/go/build.bat @@ -59,4 +59,4 @@ gcc ^ -DSANITIZER_DEBUG=0 ^ -O3 ^ -fomit-frame-pointer ^ - -std=c++11 + -std=c++14 diff --git a/gnu/llvm/compiler-rt/lib/tsan/go/buildgo.sh b/gnu/llvm/compiler-rt/lib/tsan/go/buildgo.sh index 99a6a9ea19d..2238caf53b3 100755 --- a/gnu/llvm/compiler-rt/lib/tsan/go/buildgo.sh +++ b/gnu/llvm/compiler-rt/lib/tsan/go/buildgo.sh @@ -66,6 +66,10 @@ if [ "`uname -a | grep Linux`" != "" ]; then ARCHCFLAGS="" fi elif [ "`uname -a | grep FreeBSD`" != "" ]; then + # The resulting object still depends on libc. + # We removed this dependency for Go runtime for other OSes, + # and we should remove it for FreeBSD as well, but there is no pressing need. + DEPENDS_ON_LIBC=1 SUFFIX="freebsd_amd64" OSCFLAGS="-fno-strict-aliasing -fPIC -Werror" ARCHCFLAGS="-m64" @@ -83,6 +87,10 @@ elif [ "`uname -a | grep FreeBSD`" != "" ]; then ../../sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp " elif [ "`uname -a | grep NetBSD`" != "" ]; then + # The resulting object still depends on libc. + # We removed this dependency for Go runtime for other OSes, + # and we should remove it for NetBSD as well, but there is no pressing need. + DEPENDS_ON_LIBC=1 SUFFIX="netbsd_amd64" OSCFLAGS="-fno-strict-aliasing -fPIC -Werror" ARCHCFLAGS="-m64" @@ -100,6 +108,27 @@ elif [ "`uname -a | grep NetBSD`" != "" ]; then ../../sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp ../../sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp " +elif [ "`uname -a | grep OpenBSD`" != "" ]; then + # The resulting object still depends on libc. + # We removed this dependency for Go runtime for other OSes, + # and we should remove it for OpenBSD as well, but there is no pressing need. + DEPENDS_ON_LIBC=1 + SUFFIX="openbsd_amd64" + OSCFLAGS="-fno-strict-aliasing -fPIC -Werror" + ARCHCFLAGS="-m64" + OSLDFLAGS="-pthread -fPIC -fpie" + SRCS=" + $SRCS + ../rtl/tsan_platform_linux.cpp + ../../sanitizer_common/sanitizer_posix.cpp + ../../sanitizer_common/sanitizer_posix_libcdep.cpp + ../../sanitizer_common/sanitizer_procmaps_bsd.cpp + ../../sanitizer_common/sanitizer_procmaps_common.cpp + ../../sanitizer_common/sanitizer_linux.cpp + ../../sanitizer_common/sanitizer_linux_libcdep.cpp + ../../sanitizer_common/sanitizer_openbsd.cpp + ../../sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp + " elif [ "`uname -a | grep Darwin`" != "" ]; then SUFFIX="darwin_amd64" OSCFLAGS="-fPIC -Wno-unused-const-variable -Wno-unknown-warning-option -mmacosx-version-min=10.7" @@ -149,16 +178,20 @@ for F in $SRCS; do cat $F >> $DIR/gotsan.cpp done -FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -std=c++11 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO=1 -DSANITIZER_DEADLOCK_DETECTOR_VERSION=2 $OSCFLAGS $ARCHCFLAGS" +FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -std=c++14 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO=1 -DSANITIZER_DEADLOCK_DETECTOR_VERSION=2 $OSCFLAGS $ARCHCFLAGS" +DEBUG_FLAGS="$FLAGS -DSANITIZER_DEBUG=1 -g" +FLAGS="$FLAGS -DSANITIZER_DEBUG=0 -O3 -fomit-frame-pointer" +if [ "$SUFFIX" = "linux_ppc64le" ]; then + FLAGS="$FLAGS -mcpu=power8 -fno-function-sections" +elif [ "$SUFFIX" = "linux_amd64" ]; then + FLAGS="$FLAGS -msse3" +fi + if [ "$DEBUG" = "" ]; then - FLAGS="$FLAGS -DSANITIZER_DEBUG=0 -O3 -fomit-frame-pointer" - if [ "$SUFFIX" = "linux_ppc64le" ]; then - FLAGS="$FLAGS -mcpu=power8 -fno-function-sections" - elif [ "$SUFFIX" = "linux_amd64" ]; then - FLAGS="$FLAGS -msse3" - fi + # Do a build test with debug flags. + $CC $DIR/gotsan.cpp -c -o $DIR/race_debug_$SUFFIX.syso $DEBUG_FLAGS $CFLAGS else - FLAGS="$FLAGS -DSANITIZER_DEBUG=1 -g" + FLAGS="$DEBUG_FLAGS" fi if [ "$SILENT" != "1" ]; then @@ -168,6 +201,18 @@ $CC $DIR/gotsan.cpp -c -o $DIR/race_$SUFFIX.syso $FLAGS $CFLAGS $CC $OSCFLAGS $ARCHCFLAGS test.c $DIR/race_$SUFFIX.syso -g -o $DIR/test $OSLDFLAGS $LDFLAGS +# Verify that no libc specific code is present. +if [ "$DEPENDS_ON_LIBC" != "1" ]; then + if nm $DIR/race_$SUFFIX.syso | grep -q __libc_; then + printf -- '%s seems to link to libc\n' "race_$SUFFIX.syso" + exit 1 + fi +fi + +if [ "`uname -a | grep NetBSD`" != "" ]; then + # Turn off ASLR in the test binary. + /usr/sbin/paxctl +a $DIR/test +fi export GORACE="exitcode=0 atexit_sleep_ms=0" if [ "$SILENT" != "1" ]; then $DIR/test diff --git a/gnu/llvm/compiler-rt/lib/tsan/go/test.c b/gnu/llvm/compiler-rt/lib/tsan/go/test.c index 61be48442c8..787b4c5b7dc 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/go/test.c +++ b/gnu/llvm/compiler-rt/lib/tsan/go/test.c @@ -32,6 +32,7 @@ void __tsan_malloc(void *thr, void *pc, void *p, unsigned long sz); void __tsan_free(void *p, unsigned long sz); void __tsan_acquire(void *thr, void *addr); void __tsan_release(void *thr, void *addr); +void __tsan_release_acquire(void *thr, void *addr); void __tsan_release_merge(void *thr, void *addr); void *current_proc; @@ -77,6 +78,7 @@ int main(void) { __tsan_func_enter(thr0, (char*)&main + 1); __tsan_malloc(thr0, (char*)&barfoo + 1, buf, 10); __tsan_release(thr0, buf); + __tsan_release_acquire(thr0, buf); __tsan_release_merge(thr0, buf); void *thr1 = 0; __tsan_go_start(thr0, &thr1, (char*)&barfoo + 1); diff --git a/gnu/llvm/compiler-rt/lib/tsan/go/tsan_go.cpp b/gnu/llvm/compiler-rt/lib/tsan/go/tsan_go.cpp index f5998c0c781..77987f43bf5 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/go/tsan_go.cpp +++ b/gnu/llvm/compiler-rt/lib/tsan/go/tsan_go.cpp @@ -244,6 +244,10 @@ void __tsan_acquire(ThreadState *thr, void *addr) { Acquire(thr, 0, (uptr)addr); } +void __tsan_release_acquire(ThreadState *thr, void *addr) { + ReleaseStoreAcquire(thr, 0, (uptr)addr); +} + void __tsan_release(ThreadState *thr, void *addr) { ReleaseStore(thr, 0, (uptr)addr); } diff --git a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan.syms.extra b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan.syms.extra index ab5b5a4fcba..4838bb0a727 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan.syms.extra +++ b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan.syms.extra @@ -19,6 +19,11 @@ __tsan_mutex_pre_signal __tsan_mutex_post_signal __tsan_mutex_pre_divert __tsan_mutex_post_divert +__tsan_get_current_fiber +__tsan_create_fiber +__tsan_destroy_fiber +__tsan_switch_to_fiber +__tsan_set_fiber_name __ubsan_* Annotate* WTFAnnotate* diff --git a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_clock.cpp b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_clock.cpp index 4b7aa0653da..c91b29cb22b 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_clock.cpp +++ b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_clock.cpp @@ -30,6 +30,14 @@ // dst->clock[i] = max(dst->clock[i], clock[i]); // } // +// void ThreadClock::releaseStoreAcquire(SyncClock *sc) const { +// for (int i = 0; i < kMaxThreads; i++) { +// tmp = clock[i]; +// clock[i] = max(clock[i], sc->clock[i]); +// sc->clock[i] = tmp; +// } +// } +// // void ThreadClock::ReleaseStore(SyncClock *dst) const { // for (int i = 0; i < kMaxThreads; i++) // dst->clock[i] = clock[i]; @@ -107,13 +115,14 @@ static void UnrefClockBlock(ClockCache *c, u32 idx, uptr blocks) { ThreadClock::ThreadClock(unsigned tid, unsigned reused) : tid_(tid) , reused_(reused + 1) // 0 has special meaning + , last_acquire_() + , global_acquire_() , cached_idx_() , cached_size_() , cached_blocks_() { CHECK_LT(tid, kMaxTidInClock); CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits); nclk_ = tid_ + 1; - last_acquire_ = 0; internal_memset(clk_, 0, sizeof(clk_)); } @@ -177,6 +186,49 @@ void ThreadClock::acquire(ClockCache *c, SyncClock *src) { } } +void ThreadClock::releaseStoreAcquire(ClockCache *c, SyncClock *sc) { + DCHECK_LE(nclk_, kMaxTid); + DCHECK_LE(sc->size_, kMaxTid); + + if (sc->size_ == 0) { + // ReleaseStore will correctly set release_store_tid_, + // which can be important for future operations. + ReleaseStore(c, sc); + return; + } + + nclk_ = max(nclk_, (uptr) sc->size_); + + // Check if we need to resize sc. + if (sc->size_ < nclk_) + sc->Resize(c, nclk_); + + bool acquired = false; + + sc->Unshare(c); + // Update sc->clk_. + sc->FlushDirty(); + uptr i = 0; + for (ClockElem &ce : *sc) { + u64 tmp = clk_[i]; + if (clk_[i] < ce.epoch) { + clk_[i] = ce.epoch; + acquired = true; + } + ce.epoch = tmp; + ce.reused = 0; + i++; + } + sc->release_store_tid_ = kInvalidTid; + sc->release_store_reused_ = 0; + + if (acquired) { + CPP_STAT_INC(StatClockAcquiredSomething); + last_acquire_ = clk_[tid_]; + ResetCached(c); + } +} + void ThreadClock::release(ClockCache *c, SyncClock *dst) { DCHECK_LE(nclk_, kMaxTid); DCHECK_LE(dst->size_, kMaxTid); @@ -196,7 +248,7 @@ void ThreadClock::release(ClockCache *c, SyncClock *dst) { // Check if we had not acquired anything from other threads // since the last release on dst. If so, we need to update // only dst->elem(tid_). - if (dst->elem(tid_).epoch > last_acquire_) { + if (!HasAcquiredAfterRelease(dst)) { UpdateCurrentThread(c, dst); if (dst->release_store_tid_ != tid_ || dst->release_store_reused_ != reused_) @@ -222,8 +274,6 @@ void ThreadClock::release(ClockCache *c, SyncClock *dst) { // Clear 'acquired' flag in the remaining elements. if (nclk_ < dst->size_) CPP_STAT_INC(StatClockReleaseClearTail); - for (uptr i = nclk_; i < dst->size_; i++) - dst->elem(i).reused = 0; dst->release_store_tid_ = kInvalidTid; dst->release_store_reused_ = 0; // If we've acquired dst, remember this fact, @@ -269,7 +319,7 @@ void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) { if (dst->release_store_tid_ == tid_ && dst->release_store_reused_ == reused_ && - dst->elem(tid_).epoch > last_acquire_) { + !HasAcquiredAfterRelease(dst)) { CPP_STAT_INC(StatClockStoreFast); UpdateCurrentThread(c, dst); return; @@ -351,6 +401,14 @@ bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const { return true; } +// Checks whether the current thread has acquired anything +// from other clocks after releasing to dst (directly or indirectly). +bool ThreadClock::HasAcquiredAfterRelease(const SyncClock *dst) const { + const u64 my_epoch = dst->elem(tid_).epoch; + return my_epoch <= last_acquire_ || + my_epoch <= atomic_load_relaxed(&global_acquire_); +} + // Sets a single element in the vector clock. // This function is called only from weird places like AcquireGlobal. void ThreadClock::set(ClockCache *c, unsigned tid, u64 v) { diff --git a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_clock.h b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_clock.h index 6a1d15a2a16..736cdae06ba 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_clock.h +++ b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_clock.h @@ -134,10 +134,12 @@ class ThreadClock { uptr size() const; void acquire(ClockCache *c, SyncClock *src); + void releaseStoreAcquire(ClockCache *c, SyncClock *src); void release(ClockCache *c, SyncClock *dst); void acq_rel(ClockCache *c, SyncClock *dst); void ReleaseStore(ClockCache *c, SyncClock *dst); void ResetCached(ClockCache *c); + void NoteGlobalAcquire(u64 v); void DebugReset(); void DebugDump(int(*printf)(const char *s, ...)); @@ -150,6 +152,53 @@ class ThreadClock { // Current thread time when it acquired something from other threads. u64 last_acquire_; + // Last time another thread has done a global acquire of this thread's clock. + // It helps to avoid problem described in: + // https://github.com/golang/go/issues/39186 + // See test/tsan/java_finalizer2.cpp for a regression test. + // Note the failuire is _extremely_ hard to hit, so if you are trying + // to reproduce it, you may want to run something like: + // $ go get golang.org/x/tools/cmd/stress + // $ stress -p=64 ./a.out + // + // The crux of the problem is roughly as follows. + // A number of O(1) optimizations in the clocks algorithm assume proper + // transitive cumulative propagation of clock values. The AcquireGlobal + // operation may produce an inconsistent non-linearazable view of + // thread clocks. Namely, it may acquire a later value from a thread + // with a higher ID, but fail to acquire an earlier value from a thread + // with a lower ID. If a thread that executed AcquireGlobal then releases + // to a sync clock, it will spoil the sync clock with the inconsistent + // values. If another thread later releases to the sync clock, the optimized + // algorithm may break. + // + // The exact sequence of events that leads to the failure. + // - thread 1 executes AcquireGlobal + // - thread 1 acquires value 1 for thread 2 + // - thread 2 increments clock to 2 + // - thread 2 releases to sync object 1 + // - thread 3 at time 1 + // - thread 3 acquires from sync object 1 + // - thread 3 increments clock to 2 + // - thread 1 acquires value 2 for thread 3 + // - thread 1 releases to sync object 2 + // - sync object 2 clock has 1 for thread 2 and 2 for thread 3 + // - thread 3 releases to sync object 2 + // - thread 3 sees value 2 in the clock for itself + // and decides that it has already released to the clock + // and did not acquire anything from other threads after that + // (the last_acquire_ check in release operation) + // - thread 3 does not update the value for thread 2 in the clock from 1 to 2 + // - thread 4 acquires from sync object 2 + // - thread 4 detects a false race with thread 2 + // as it should have been synchronized with thread 2 up to time 2, + // but because of the broken clock it is now synchronized only up to time 1 + // + // The global_acquire_ value helps to prevent this scenario. + // Namely, thread 3 will not trust any own clock values up to global_acquire_ + // for the purposes of the last_acquire_ optimization. + atomic_uint64_t global_acquire_; + // Cached SyncClock (without dirty entries and release_store_tid_). // We reuse it for subsequent store-release operations without intervening // acquire operations. Since it is shared (and thus constant), clock value @@ -164,6 +213,7 @@ class ThreadClock { u64 clk_[kMaxTidInClock]; // Fixed size vector clock. bool IsAlreadyAcquired(const SyncClock *src) const; + bool HasAcquiredAfterRelease(const SyncClock *dst) const; void UpdateCurrentThread(ClockCache *c, SyncClock *dst) const; }; @@ -185,6 +235,14 @@ ALWAYS_INLINE uptr ThreadClock::size() const { return nclk_; } +ALWAYS_INLINE void ThreadClock::NoteGlobalAcquire(u64 v) { + // Here we rely on the fact that AcquireGlobal is protected by + // ThreadRegistryLock, thus only one thread at a time executes it + // and values passed to this function should not go backwards. + CHECK_LE(atomic_load_relaxed(&global_acquire_), v); + atomic_store_relaxed(&global_acquire_, v); +} + ALWAYS_INLINE SyncClock::Iter SyncClock::begin() { return Iter(this); } diff --git a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_flags.inc b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_flags.inc index bfb74b696e6..2105c754486 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_flags.inc +++ b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_flags.inc @@ -76,6 +76,8 @@ TSAN_FLAG(int, io_sync, 1, TSAN_FLAG(bool, die_after_fork, true, "Die after multi-threaded fork if the child creates new threads.") TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.") +TSAN_FLAG(bool, ignore_interceptors_accesses, SANITIZER_MAC ? true : false, + "Ignore reads and writes from all interceptors.") TSAN_FLAG(bool, ignore_noninstrumented_modules, SANITIZER_MAC ? true : false, "Interceptors should only detect races when called from instrumented " "modules.") diff --git a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp index 91584914d86..aa29536d861 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp +++ b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp @@ -23,11 +23,8 @@ #include <errno.h> #include <libkern/OSAtomic.h> #include <objc/objc-sync.h> -#include <sys/ucontext.h> - -#if defined(__has_include) && __has_include(<os/lock.h>) #include <os/lock.h> -#endif +#include <sys/ucontext.h> #if defined(__has_include) && __has_include(<xpc/xpc.h>) #include <xpc/xpc.h> @@ -250,8 +247,6 @@ TSAN_INTERCEPTOR(void, os_lock_unlock, void *lock) { REAL(os_lock_unlock)(lock); } -#if defined(__has_include) && __has_include(<os/lock.h>) - TSAN_INTERCEPTOR(void, os_unfair_lock_lock, os_unfair_lock_t lock) { if (!cur_thread()->is_inited || cur_thread()->is_dead) { return REAL(os_unfair_lock_lock)(lock); @@ -291,8 +286,6 @@ TSAN_INTERCEPTOR(void, os_unfair_lock_unlock, os_unfair_lock_t lock) { REAL(os_unfair_lock_unlock)(lock); } -#endif // #if defined(__has_include) && __has_include(<os/lock.h>) - #if defined(__has_include) && __has_include(<xpc/xpc.h>) TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler, diff --git a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp index 8aea1e4ec05..9c3e0369bc6 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp +++ b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp @@ -254,7 +254,8 @@ ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname, if (!thr_->ignore_interceptors) FuncEntry(thr, pc); DPrintf("#%d: intercept %s()\n", thr_->tid, fname); ignoring_ = - !thr_->in_ignored_lib && libignore()->IsIgnored(pc, &in_ignored_lib_); + !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses || + libignore()->IsIgnored(pc, &in_ignored_lib_)); EnableIgnores(); } @@ -891,13 +892,16 @@ void DestroyThreadState() { ThreadFinish(thr); ProcUnwire(proc, thr); ProcDestroy(proc); + DTLS_Destroy(); + cur_thread_finalize(); +} + +void PlatformCleanUpThreadState(ThreadState *thr) { ThreadSignalContext *sctx = thr->signal_ctx; if (sctx) { thr->signal_ctx = 0; UnmapOrDie(sctx, sizeof(*sctx)); } - DTLS_Destroy(); - cur_thread_finalize(); } } // namespace __tsan @@ -1016,7 +1020,7 @@ TSAN_INTERCEPTOR(int, pthread_create, TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) { SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret); - int tid = ThreadTid(thr, pc, (uptr)th); + int tid = ThreadConsumeTid(thr, pc, (uptr)th); ThreadIgnoreBegin(thr, pc); int res = BLOCK_REAL(pthread_join)(th, ret); ThreadIgnoreEnd(thr, pc); @@ -1029,8 +1033,8 @@ TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) { DEFINE_REAL_PTHREAD_FUNCTIONS TSAN_INTERCEPTOR(int, pthread_detach, void *th) { - SCOPED_TSAN_INTERCEPTOR(pthread_detach, th); - int tid = ThreadTid(thr, pc, (uptr)th); + SCOPED_INTERCEPTOR_RAW(pthread_detach, th); + int tid = ThreadConsumeTid(thr, pc, (uptr)th); int res = REAL(pthread_detach)(th); if (res == 0) { ThreadDetach(thr, pc, tid); @@ -1050,8 +1054,8 @@ TSAN_INTERCEPTOR(void, pthread_exit, void *retval) { #if SANITIZER_LINUX TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) { - SCOPED_TSAN_INTERCEPTOR(pthread_tryjoin_np, th, ret); - int tid = ThreadTid(thr, pc, (uptr)th); + SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret); + int tid = ThreadConsumeTid(thr, pc, (uptr)th); ThreadIgnoreBegin(thr, pc); int res = REAL(pthread_tryjoin_np)(th, ret); ThreadIgnoreEnd(thr, pc); @@ -1064,8 +1068,8 @@ TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) { TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret, const struct timespec *abstime) { - SCOPED_TSAN_INTERCEPTOR(pthread_timedjoin_np, th, ret, abstime); - int tid = ThreadTid(thr, pc, (uptr)th); + SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime); + int tid = ThreadConsumeTid(thr, pc, (uptr)th); ThreadIgnoreBegin(thr, pc); int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime); ThreadIgnoreEnd(thr, pc); diff --git a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_platform.h b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_platform.h index 63eb14fcd34..7256d64e507 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_platform.h +++ b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_platform.h @@ -1021,6 +1021,7 @@ int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, void(*cleanup)(void *arg), void *arg); void DestroyThreadState(); +void PlatformCleanUpThreadState(ThreadState *thr); } // namespace __tsan diff --git a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp index 33fa586ca1b..645152a06c3 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp +++ b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp @@ -8,25 +8,26 @@ // // This file is a part of ThreadSanitizer (TSan), a race detector. // -// Linux- and FreeBSD-specific code. +// Linux- and BSD-specific code. //===----------------------------------------------------------------------===// - #include "sanitizer_common/sanitizer_platform.h" -#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD +#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \ + SANITIZER_OPENBSD #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_linux.h" #include "sanitizer_common/sanitizer_platform_limits_netbsd.h" +#include "sanitizer_common/sanitizer_platform_limits_openbsd.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" #include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_procmaps.h" -#include "sanitizer_common/sanitizer_stoptheworld.h" #include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_stoptheworld.h" +#include "tsan_flags.h" #include "tsan_platform.h" #include "tsan_rtl.h" -#include "tsan_flags.h" #include <fcntl.h> #include <pthread.h> @@ -512,4 +513,5 @@ void cur_thread_finalize() { } // namespace __tsan -#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD +#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || + // SANITIZER_OPENBSD diff --git a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp index ae65dd3fd99..eea52a34e97 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp +++ b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp @@ -19,6 +19,7 @@ #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_procmaps.h" +#include "sanitizer_common/sanitizer_ptrauth.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "tsan_platform.h" #include "tsan_rtl.h" @@ -257,7 +258,7 @@ void InitializePlatform() { pthread_introspection_hook_install(&my_pthread_introspection_hook); #endif - if (GetMacosVersion() >= MACOS_VERSION_MOJAVE) { + if (GetMacosAlignedVersion() >= MacosVersion(10, 14)) { // Libsystem currently uses a process-global key; this might change. const unsigned kTLSLongjmpXorKeySlot = 0x7; longjmp_xor_key = (uptr)pthread_getspecific(kTLSLongjmpXorKeySlot); @@ -266,7 +267,7 @@ void InitializePlatform() { #ifdef __aarch64__ # define LONG_JMP_SP_ENV_SLOT \ - ((GetMacosVersion() >= MACOS_VERSION_MOJAVE) ? 12 : 13) + ((GetMacosAlignedVersion() >= MacosVersion(10, 14)) ? 12 : 13) #else # define LONG_JMP_SP_ENV_SLOT 2 #endif @@ -274,6 +275,8 @@ void InitializePlatform() { uptr ExtractLongJmpSp(uptr *env) { uptr mangled_sp = env[LONG_JMP_SP_ENV_SLOT]; uptr sp = mangled_sp ^ longjmp_xor_key; + sp = (uptr)ptrauth_auth_data((void *)sp, ptrauth_key_asdb, + ptrauth_string_discriminator("sp")); return sp; } diff --git a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp index 3f3c0cce119..13c9b770f50 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp +++ b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp @@ -144,7 +144,7 @@ static void MemoryProfiler(Context *ctx, fd_t fd, int i) { WriteToFile(fd, buf.data(), internal_strlen(buf.data())); } -static void BackgroundThread(void *arg) { +static void *BackgroundThread(void *arg) { // This is a non-initialized non-user thread, nothing to see here. // We don't use ScopedIgnoreInterceptors, because we want ignores to be // enabled even when the thread function exits (e.g. during pthread thread @@ -220,6 +220,7 @@ static void BackgroundThread(void *arg) { } } } + return nullptr; } static void StartBackgroundThread() { @@ -494,14 +495,23 @@ int Finalize(ThreadState *thr) { void ForkBefore(ThreadState *thr, uptr pc) { ctx->thread_registry->Lock(); ctx->report_mtx.Lock(); + // Ignore memory accesses in the pthread_atfork callbacks. + // If any of them triggers a data race we will deadlock + // on the report_mtx. + // We could ignore interceptors and sync operations as well, + // but so far it's unclear if it will do more good or harm. + // Unnecessarily ignoring things can lead to false positives later. + ThreadIgnoreBegin(thr, pc); } void ForkParentAfter(ThreadState *thr, uptr pc) { + ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore. ctx->report_mtx.Unlock(); ctx->thread_registry->Unlock(); } void ForkChildAfter(ThreadState *thr, uptr pc) { + ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore. ctx->report_mtx.Unlock(); ctx->thread_registry->Unlock(); diff --git a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl.h index c38fc43a9f8..d3bb61ff87d 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -775,7 +775,7 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached); void ThreadStart(ThreadState *thr, int tid, tid_t os_id, ThreadType thread_type); void ThreadFinish(ThreadState *thr); -int ThreadTid(ThreadState *thr, uptr pc, uptr uid); +int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid); void ThreadJoin(ThreadState *thr, uptr pc, int tid); void ThreadDetach(ThreadState *thr, uptr pc, int tid); void ThreadFinalize(ThreadState *thr); @@ -813,10 +813,12 @@ void Acquire(ThreadState *thr, uptr pc, uptr addr); // approximation of the actual required synchronization. void AcquireGlobal(ThreadState *thr, uptr pc); void Release(ThreadState *thr, uptr pc, uptr addr); +void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr); void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); void AfterSleep(ThreadState *thr, uptr pc); void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c); void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); +void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c); void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c); void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); diff --git a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp index ce6e7cb2c4e..ebd0d722181 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp +++ b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp @@ -415,8 +415,10 @@ static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) { ThreadState *thr = reinterpret_cast<ThreadState*>(arg); ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); u64 epoch = tctx->epoch1; - if (tctx->status == ThreadStatusRunning) + if (tctx->status == ThreadStatusRunning) { epoch = tctx->thr->fast_state.epoch(); + tctx->thr->clock.NoteGlobalAcquire(epoch); + } thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch); } @@ -429,6 +431,18 @@ void AcquireGlobal(ThreadState *thr, uptr pc) { UpdateClockCallback, thr); } +void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) { + DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr); + if (thr->ignore_sync) + return; + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + ReleaseStoreAcquireImpl(thr, pc, &s->clock); + s->mtx.Unlock(); +} + void Release(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: Release %zx\n", thr->tid, addr); if (thr->ignore_sync) @@ -482,6 +496,15 @@ void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) { StatInc(thr, StatSyncAcquire); } +void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) { + if (thr->ignore_sync) + return; + thr->clock.set(thr->fast_state.epoch()); + thr->fast_synch_epoch = thr->fast_state.epoch(); + thr->clock.releaseStoreAcquire(&thr->proc()->clock_cache, c); + StatInc(thr, StatSyncReleaseStoreAcquire); +} + void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) { if (thr->ignore_sync) return; diff --git a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp index 949beac1c55..3354546c2a1 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp +++ b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp @@ -439,65 +439,61 @@ void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, ExtractTagFromStack(stk, tag); } -static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], - uptr addr_min, uptr addr_max) { - bool equal_stack = false; - RacyStacks hash; - bool equal_address = false; - RacyAddress ra0 = {addr_min, addr_max}; - { - ReadLock lock(&ctx->racy_mtx); - if (flags()->suppress_equal_stacks) { - hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); - hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); - for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { - if (hash == ctx->racy_stacks[i]) { - VPrintf(2, - "ThreadSanitizer: suppressing report as doubled (stack)\n"); - equal_stack = true; - break; - } - } - } - if (flags()->suppress_equal_addresses) { - for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { - RacyAddress ra2 = ctx->racy_addresses[i]; - uptr maxbeg = max(ra0.addr_min, ra2.addr_min); - uptr minend = min(ra0.addr_max, ra2.addr_max); - if (maxbeg < minend) { - VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n"); - equal_address = true; - break; - } - } +static bool FindRacyStacks(const RacyStacks &hash) { + for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { + if (hash == ctx->racy_stacks[i]) { + VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n"); + return true; } } - if (!equal_stack && !equal_address) + return false; +} + +static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) { + if (!flags()->suppress_equal_stacks) return false; - if (!equal_stack) { - Lock lock(&ctx->racy_mtx); - ctx->racy_stacks.PushBack(hash); - } - if (!equal_address) { - Lock lock(&ctx->racy_mtx); - ctx->racy_addresses.PushBack(ra0); + RacyStacks hash; + hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); + hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); + { + ReadLock lock(&ctx->racy_mtx); + if (FindRacyStacks(hash)) + return true; } - return true; + Lock lock(&ctx->racy_mtx); + if (FindRacyStacks(hash)) + return true; + ctx->racy_stacks.PushBack(hash); + return false; } -static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], - uptr addr_min, uptr addr_max) { - Lock lock(&ctx->racy_mtx); - if (flags()->suppress_equal_stacks) { - RacyStacks hash; - hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); - hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); - ctx->racy_stacks.PushBack(hash); +static bool FindRacyAddress(const RacyAddress &ra0) { + for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { + RacyAddress ra2 = ctx->racy_addresses[i]; + uptr maxbeg = max(ra0.addr_min, ra2.addr_min); + uptr minend = min(ra0.addr_max, ra2.addr_max); + if (maxbeg < minend) { + VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n"); + return true; + } } - if (flags()->suppress_equal_addresses) { - RacyAddress ra0 = {addr_min, addr_max}; - ctx->racy_addresses.PushBack(ra0); + return false; +} + +static bool HandleRacyAddress(ThreadState *thr, uptr addr_min, uptr addr_max) { + if (!flags()->suppress_equal_addresses) + return false; + RacyAddress ra0 = {addr_min, addr_max}; + { + ReadLock lock(&ctx->racy_mtx); + if (FindRacyAddress(ra0)) + return true; } + Lock lock(&ctx->racy_mtx); + if (FindRacyAddress(ra0)) + return true; + ctx->racy_addresses.PushBack(ra0); + return false; } bool OutputReport(ThreadState *thr, const ScopedReport &srep) { @@ -618,6 +614,8 @@ void ReportRace(ThreadState *thr) { if (IsExpectedReport(addr_min, addr_max - addr_min)) return; } + if (HandleRacyAddress(thr, addr_min, addr_max)) + return; ReportType typ = ReportTypeRace; if (thr->is_vptr_access && freed) @@ -668,7 +666,7 @@ void ReportRace(ThreadState *thr) { if (IsFiredSuppression(ctx, typ, traces[1])) return; - if (HandleRacyStacks(thr, traces, addr_min, addr_max)) + if (HandleRacyStacks(thr, traces)) return; // If any of the accesses has a tag, treat this as an "external" race. @@ -711,7 +709,6 @@ void ReportRace(ThreadState *thr) { if (!OutputReport(thr, rep)) return; - AddRacyStacks(thr, traces, addr_min, addr_max); } void PrintCurrentStack(ThreadState *thr, uptr pc) { diff --git a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp index 0ac1ee99c47..d80146735ea 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp +++ b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp @@ -145,6 +145,9 @@ void ThreadContext::OnFinished() { #if !SANITIZER_GO thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache); #endif +#if !SANITIZER_GO + PlatformCleanUpThreadState(thr); +#endif thr->~ThreadState(); #if TSAN_COLLECT_STATS StatAggregate(ctx->stat, thr->stat); @@ -285,19 +288,34 @@ void ThreadFinish(ThreadState *thr) { ctx->thread_registry->FinishThread(thr->tid); } -static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) { - uptr uid = (uptr)arg; - if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) { +struct ConsumeThreadContext { + uptr uid; + ThreadContextBase *tctx; +}; + +static bool ConsumeThreadByUid(ThreadContextBase *tctx, void *arg) { + ConsumeThreadContext *findCtx = (ConsumeThreadContext *)arg; + if (tctx->user_id == findCtx->uid && tctx->status != ThreadStatusInvalid) { + if (findCtx->tctx) { + // Ensure that user_id is unique. If it's not the case we are screwed. + // Something went wrong before, but now there is no way to recover. + // Returning a wrong thread is not an option, it may lead to very hard + // to debug false positives (e.g. if we join a wrong thread). + Report("ThreadSanitizer: dup thread with used id 0x%zx\n", findCtx->uid); + Die(); + } + findCtx->tctx = tctx; tctx->user_id = 0; - return true; } return false; } -int ThreadTid(ThreadState *thr, uptr pc, uptr uid) { - int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid); - DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res); - return res; +int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) { + ConsumeThreadContext findCtx = {uid, nullptr}; + ctx->thread_registry->FindThread(ConsumeThreadByUid, &findCtx); + int tid = findCtx.tctx ? findCtx.tctx->tid : ThreadRegistry::kUnknownTid; + DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, tid); + return tid; } void ThreadJoin(ThreadState *thr, uptr pc, int tid) { diff --git a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_stat.h b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_stat.h index 94e18bc66df..8b26a59bb2e 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_stat.h +++ b/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_stat.h @@ -68,6 +68,7 @@ enum StatType { StatSyncDestroyed, StatSyncAcquire, StatSyncRelease, + StatSyncReleaseStoreAcquire, // Clocks - acquire. StatClockAcquire, diff --git a/gnu/llvm/compiler-rt/lib/tsan/tests/rtl/tsan_test_util_posix.cpp b/gnu/llvm/compiler-rt/lib/tsan/tests/rtl/tsan_test_util_posix.cpp index a24d04f4700..733e5d282a3 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/tests/rtl/tsan_test_util_posix.cpp +++ b/gnu/llvm/compiler-rt/lib/tsan/tests/rtl/tsan_test_util_posix.cpp @@ -27,6 +27,8 @@ #include <unistd.h> #include <errno.h> +#define CALLERPC (__builtin_return_address(0)) + using namespace __tsan; static __thread bool expect_report; @@ -249,22 +251,42 @@ void ScopedThread::Impl::HandleEvent(Event *ev) { switch (ev->type) { case Event::READ: case Event::WRITE: { - void (*tsan_mop)(void *addr) = 0; + void (*tsan_mop)(void *addr, void *pc) = 0; if (ev->type == Event::READ) { switch (ev->arg /*size*/) { - case 1: tsan_mop = __tsan_read1; break; - case 2: tsan_mop = __tsan_read2; break; - case 4: tsan_mop = __tsan_read4; break; - case 8: tsan_mop = __tsan_read8; break; - case 16: tsan_mop = __tsan_read16; break; + case 1: + tsan_mop = __tsan_read1_pc; + break; + case 2: + tsan_mop = __tsan_read2_pc; + break; + case 4: + tsan_mop = __tsan_read4_pc; + break; + case 8: + tsan_mop = __tsan_read8_pc; + break; + case 16: + tsan_mop = __tsan_read16_pc; + break; } } else { switch (ev->arg /*size*/) { - case 1: tsan_mop = __tsan_write1; break; - case 2: tsan_mop = __tsan_write2; break; - case 4: tsan_mop = __tsan_write4; break; - case 8: tsan_mop = __tsan_write8; break; - case 16: tsan_mop = __tsan_write16; break; + case 1: + tsan_mop = __tsan_write1_pc; + break; + case 2: + tsan_mop = __tsan_write2_pc; + break; + case 4: + tsan_mop = __tsan_write4_pc; + break; + case 8: + tsan_mop = __tsan_write8_pc; + break; + case 16: + tsan_mop = __tsan_write16_pc; + break; } } CHECK_NE(tsan_mop, 0); @@ -274,7 +296,7 @@ void ScopedThread::Impl::HandleEvent(Event *ev) { const int ErrCode = ECHRNG; #endif errno = ErrCode; - tsan_mop(ev->ptr); + tsan_mop(ev->ptr, (void *)ev->arg2); CHECK_EQ(ErrCode, errno); // In no case must errno be changed. break; } @@ -327,7 +349,7 @@ void ScopedThread::Impl::HandleEvent(Event *ev) { } void *ScopedThread::Impl::ScopedThreadCallback(void *arg) { - __tsan_func_entry(__builtin_return_address(0)); + __tsan_func_entry(CALLERPC); Impl *impl = (Impl*)arg; for (;;) { Event* ev = (Event*)atomic_load(&impl->event, memory_order_acquire); @@ -392,7 +414,8 @@ void ScopedThread::Detach() { void ScopedThread::Access(void *addr, bool is_write, int size, bool expect_race) { - Event event(is_write ? Event::WRITE : Event::READ, addr, size); + Event event(is_write ? Event::WRITE : Event::READ, addr, size, + (uptr)CALLERPC); if (expect_race) event.ExpectReport(ReportTypeRace); impl_->send(&event); diff --git a/gnu/llvm/compiler-rt/lib/tsan/tests/unit/tsan_clock_test.cpp b/gnu/llvm/compiler-rt/lib/tsan/tests/unit/tsan_clock_test.cpp index 6d835ba85c3..cdaaf30b1b2 100644 --- a/gnu/llvm/compiler-rt/lib/tsan/tests/unit/tsan_clock_test.cpp +++ b/gnu/llvm/compiler-rt/lib/tsan/tests/unit/tsan_clock_test.cpp @@ -108,6 +108,31 @@ TEST(Clock, RepeatedAcquire) { sync.Reset(&cache); } +TEST(Clock, releaseStoreAcquire) { + ThreadClock thr0(0); + thr0.tick(); + ThreadClock thr1(1); + thr1.tick(); + SyncClock syncA; + SyncClock syncB; + ASSERT_EQ(syncA.size(), 0U); + ASSERT_EQ(syncB.size(), 0U); + thr1.releaseStoreAcquire(&cache, &syncB); + ASSERT_EQ(syncB.size(), 2U); // T0 and T1 + // releaseStoreAcquire to an empty SyncClock + thr0.releaseStoreAcquire(&cache, &syncA); + ASSERT_EQ(syncA.size(), 1U); + // releaseStoreAcquire from a non-empty SyncClock + // T0 learns about T1 + thr0.releaseStoreAcquire(&cache, &syncB); + // releaseStoreAcquire to the originally empty SyncClock + // T0 deposits info about T1 into syncA + thr0.releaseStoreAcquire(&cache, &syncA); + ASSERT_EQ(syncA.size(), 2U); + syncA.Reset(&cache); + syncB.Reset(&cache); +} + TEST(Clock, ManyThreads) { SyncClock chunked; for (unsigned i = 0; i < 200; i++) { @@ -336,6 +361,18 @@ struct SimpleThreadClock { dst->clock[i] = max(dst->clock[i], clock[i]); } + void releaseStoreAcquire(SimpleSyncClock *sc) { + if (sc->size < size) + sc->size = size; + else + size = sc->size; + for (uptr i = 0; i < kThreads; i++) { + uptr tmp = clock[i]; + clock[i] = max(sc->clock[i], clock[i]); + sc->clock[i] = tmp; + } + } + void acq_rel(SimpleSyncClock *dst) { acquire(dst); release(dst); @@ -390,7 +427,7 @@ static bool ClockFuzzer(bool printing) { thr0[tid]->tick(); thr1[tid]->tick(); - switch (rand() % 6) { + switch (rand() % 7) { case 0: if (printing) printf("acquire thr%d <- clk%d\n", tid, cid); @@ -423,6 +460,12 @@ static bool ClockFuzzer(bool printing) { break; case 5: if (printing) + printf("releaseStoreAcquire thr%d -> clk%d\n", tid, cid); + thr0[tid]->releaseStoreAcquire(sync0[cid]); + thr1[tid]->releaseStoreAcquire(&cache, sync1[cid]); + break; + case 6: + if (printing) printf("reset thr%d\n", tid); u64 epoch = thr0[tid]->clock[tid] + 1; reused[tid]++; diff --git a/gnu/llvm/compiler-rt/lib/ubsan/CMakeLists.txt b/gnu/llvm/compiler-rt/lib/ubsan/CMakeLists.txt index 378cbac6a31..dca02a65e97 100644 --- a/gnu/llvm/compiler-rt/lib/ubsan/CMakeLists.txt +++ b/gnu/llvm/compiler-rt/lib/ubsan/CMakeLists.txt @@ -52,6 +52,8 @@ set(UBSAN_CXXFLAGS ${SANITIZER_COMMON_CFLAGS}) append_rtti_flag(ON UBSAN_CXXFLAGS) append_list_if(SANITIZER_CAN_USE_CXXABI -DUBSAN_CAN_USE_CXXABI UBSAN_CXXFLAGS) +set(UBSAN_LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS}) + set(UBSAN_DYNAMIC_LIBS ${SANITIZER_CXX_ABI_LIBRARIES} ${SANITIZER_COMMON_LINK_LIBS}) append_list_if(COMPILER_RT_HAS_LIBDL dl UBSAN_DYNAMIC_LIBS) @@ -199,21 +201,46 @@ else() PARENT_TARGET ubsan) if (FUCHSIA OR UNIX) - add_compiler_rt_runtime(clang_rt.ubsan_standalone - SHARED + file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/dummy.cpp "") + add_compiler_rt_object_libraries(RTUbsan_dynamic_version_script_dummy ARCHS ${UBSAN_SUPPORTED_ARCH} - OBJECT_LIBS RTSanitizerCommon - RTSanitizerCommonLibc - RTSanitizerCommonCoverage - RTSanitizerCommonSymbolizer - RTUbsan - RTUbsan_cxx - RTUbsan_standalone - RTInterception - CFLAGS ${UBSAN_CFLAGS} - LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS} - LINK_LIBS ${UBSAN_DYNAMIC_LIBS} - PARENT_TARGET ubsan) + SOURCES ${CMAKE_CURRENT_BINARY_DIR}/dummy.cpp + CFLAGS ${UBSAN_CFLAGS}) + + foreach(arch ${UBSAN_SUPPORTED_ARCH}) + add_sanitizer_rt_version_list(clang_rt.ubsan_standalone-dynamic-${arch} + LIBS clang_rt.ubsan_standalone-${arch} + clang_rt.ubsan_standalone_cxx-${arch} + EXTRA ubsan.syms.extra) + set(VERSION_SCRIPT_FLAG + -Wl,--version-script,${CMAKE_CURRENT_BINARY_DIR}/clang_rt.ubsan_standalone-dynamic-${arch}.vers) + # The Solaris 11.4 linker supports a subset of GNU ld version scripts, + # but requires a special option to enable it. + if (OS_NAME MATCHES "SunOS") + list(APPEND VERSION_SCRIPT_FLAG -Wl,-z,gnu-version-script-compat) + endif() + set_property(SOURCE + ${CMAKE_CURRENT_BINARY_DIR}/dummy.cpp + APPEND PROPERTY + OBJECT_DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/clang_rt.ubsan_standalone-dynamic-${arch}.vers) + + add_compiler_rt_runtime(clang_rt.ubsan_standalone + SHARED + ARCHS ${arch} + OBJECT_LIBS RTSanitizerCommon + RTSanitizerCommonLibc + RTSanitizerCommonCoverage + RTSanitizerCommonSymbolizer + RTUbsan + RTUbsan_cxx + RTUbsan_standalone + RTInterception + RTUbsan_dynamic_version_script_dummy + CFLAGS ${UBSAN_CFLAGS} + LINK_FLAGS ${UBSAN_LINK_FLAGS} ${VERSION_SCRIPT_FLAG} + LINK_LIBS ${UBSAN_DYNAMIC_LIBS} + PARENT_TARGET ubsan) + endforeach() set(ARCHS_FOR_SYMBOLS ${UBSAN_SUPPORTED_ARCH}) list(REMOVE_ITEM ARCHS_FOR_SYMBOLS i386) diff --git a/gnu/llvm/compiler-rt/lib/ubsan/ubsan_checks.inc b/gnu/llvm/compiler-rt/lib/ubsan/ubsan_checks.inc index 33a8dfcde02..846cd89ee19 100644 --- a/gnu/llvm/compiler-rt/lib/ubsan/ubsan_checks.inc +++ b/gnu/llvm/compiler-rt/lib/ubsan/ubsan_checks.inc @@ -18,6 +18,8 @@ UBSAN_CHECK(GenericUB, "undefined-behavior", "undefined") UBSAN_CHECK(NullPointerUse, "null-pointer-use", "null") +UBSAN_CHECK(NullPointerUseWithNullability, "null-pointer-use", + "nullability-assign") UBSAN_CHECK(NullptrWithOffset, "nullptr-with-offset", "pointer-overflow") UBSAN_CHECK(NullptrWithNonZeroOffset, "nullptr-with-nonzero-offset", "pointer-overflow") @@ -35,6 +37,7 @@ UBSAN_CHECK(IntegerDivideByZero, "integer-divide-by-zero", "integer-divide-by-zero") UBSAN_CHECK(FloatDivideByZero, "float-divide-by-zero", "float-divide-by-zero") UBSAN_CHECK(InvalidBuiltin, "invalid-builtin-use", "invalid-builtin-use") +UBSAN_CHECK(InvalidObjCCast, "invalid-objc-cast", "invalid-objc-cast") UBSAN_CHECK(ImplicitUnsignedIntegerTruncation, "implicit-unsigned-integer-truncation", "implicit-unsigned-integer-truncation") @@ -59,6 +62,10 @@ UBSAN_CHECK(InvalidEnumLoad, "invalid-enum-load", "enum") UBSAN_CHECK(FunctionTypeMismatch, "function-type-mismatch", "function") UBSAN_CHECK(InvalidNullReturn, "invalid-null-return", "returns-nonnull-attribute") +UBSAN_CHECK(InvalidNullReturnWithNullability, "invalid-null-return", + "nullability-return") UBSAN_CHECK(InvalidNullArgument, "invalid-null-argument", "nonnull-attribute") +UBSAN_CHECK(InvalidNullArgumentWithNullability, "invalid-null-argument", + "nullability-arg") UBSAN_CHECK(DynamicTypeMismatch, "dynamic-type-mismatch", "vptr") UBSAN_CHECK(CFIBadType, "cfi-bad-type", "cfi") diff --git a/gnu/llvm/compiler-rt/lib/ubsan/ubsan_handlers.cpp b/gnu/llvm/compiler-rt/lib/ubsan/ubsan_handlers.cpp index 3f9da75a12a..e201e6bba22 100644 --- a/gnu/llvm/compiler-rt/lib/ubsan/ubsan_handlers.cpp +++ b/gnu/llvm/compiler-rt/lib/ubsan/ubsan_handlers.cpp @@ -16,6 +16,7 @@ #include "ubsan_diag.h" #include "ubsan_flags.h" #include "ubsan_monitor.h" +#include "ubsan_value.h" #include "sanitizer_common/sanitizer_common.h" @@ -36,6 +37,45 @@ bool ignoreReport(SourceLocation SLoc, ReportOptions Opts, ErrorType ET) { return SLoc.isDisabled() || IsPCSuppressed(ET, Opts.pc, SLoc.getFilename()); } +/// Situations in which we might emit a check for the suitability of a +/// pointer or glvalue. Needs to be kept in sync with CodeGenFunction.h in +/// clang. +enum TypeCheckKind { + /// Checking the operand of a load. Must be suitably sized and aligned. + TCK_Load, + /// Checking the destination of a store. Must be suitably sized and aligned. + TCK_Store, + /// Checking the bound value in a reference binding. Must be suitably sized + /// and aligned, but is not required to refer to an object (until the + /// reference is used), per core issue 453. + TCK_ReferenceBinding, + /// Checking the object expression in a non-static data member access. Must + /// be an object within its lifetime. + TCK_MemberAccess, + /// Checking the 'this' pointer for a call to a non-static member function. + /// Must be an object within its lifetime. + TCK_MemberCall, + /// Checking the 'this' pointer for a constructor call. + TCK_ConstructorCall, + /// Checking the operand of a static_cast to a derived pointer type. Must be + /// null or an object within its lifetime. + TCK_DowncastPointer, + /// Checking the operand of a static_cast to a derived reference type. Must + /// be an object within its lifetime. + TCK_DowncastReference, + /// Checking the operand of a cast to a base object. Must be suitably sized + /// and aligned. + TCK_Upcast, + /// Checking the operand of a cast to a virtual base object. Must be an + /// object within its lifetime. + TCK_UpcastToVirtualBase, + /// Checking the value assigned to a _Nonnull pointer. Must not be null. + TCK_NonnullAssign, + /// Checking the operand of a dynamic_cast or a typeid expression. Must be + /// null or an object within its lifetime. + TCK_DynamicOperation +}; + const char *TypeCheckKinds[] = { "load of", "store to", "reference binding to", "member access within", "member call on", "constructor call on", "downcast of", "downcast of", @@ -50,7 +90,9 @@ static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer, uptr Alignment = (uptr)1 << Data->LogAlignment; ErrorType ET; if (!Pointer) - ET = ErrorType::NullPointerUse; + ET = (Data->TypeCheckKind == TCK_NonnullAssign) + ? ErrorType::NullPointerUseWithNullability + : ErrorType::NullPointerUse; else if (Pointer & (Alignment - 1)) ET = ErrorType::MisalignedPointerUse; else @@ -71,6 +113,7 @@ static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer, switch (ET) { case ErrorType::NullPointerUse: + case ErrorType::NullPointerUseWithNullability: Diag(Loc, DL_Error, ET, "%0 null pointer of type %1") << TypeCheckKinds[Data->TypeCheckKind] << Data->Type; break; @@ -598,13 +641,44 @@ void __ubsan::__ubsan_handle_invalid_builtin_abort(InvalidBuiltinData *Data) { Die(); } +static void handleInvalidObjCCast(InvalidObjCCast *Data, ValueHandle Pointer, + ReportOptions Opts) { + SourceLocation Loc = Data->Loc.acquire(); + ErrorType ET = ErrorType::InvalidObjCCast; + + if (ignoreReport(Loc, Opts, ET)) + return; + + ScopedReport R(Opts, Loc, ET); + + const char *GivenClass = getObjCClassName(Pointer); + const char *GivenClassStr = GivenClass ? GivenClass : "<unknown type>"; + + Diag(Loc, DL_Error, ET, + "invalid ObjC cast, object is a '%0', but expected a %1") + << GivenClassStr << Data->ExpectedType; +} + +void __ubsan::__ubsan_handle_invalid_objc_cast(InvalidObjCCast *Data, + ValueHandle Pointer) { + GET_REPORT_OPTIONS(false); + handleInvalidObjCCast(Data, Pointer, Opts); +} +void __ubsan::__ubsan_handle_invalid_objc_cast_abort(InvalidObjCCast *Data, + ValueHandle Pointer) { + GET_REPORT_OPTIONS(true); + handleInvalidObjCCast(Data, Pointer, Opts); + Die(); +} + static void handleNonNullReturn(NonNullReturnData *Data, SourceLocation *LocPtr, ReportOptions Opts, bool IsAttr) { if (!LocPtr) UNREACHABLE("source location pointer is null!"); SourceLocation Loc = LocPtr->acquire(); - ErrorType ET = ErrorType::InvalidNullReturn; + ErrorType ET = IsAttr ? ErrorType::InvalidNullReturn + : ErrorType::InvalidNullReturnWithNullability; if (ignoreReport(Loc, Opts, ET)) return; @@ -648,7 +722,8 @@ void __ubsan::__ubsan_handle_nullability_return_v1_abort( static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts, bool IsAttr) { SourceLocation Loc = Data->Loc.acquire(); - ErrorType ET = ErrorType::InvalidNullArgument; + ErrorType ET = IsAttr ? ErrorType::InvalidNullArgument + : ErrorType::InvalidNullArgumentWithNullability; if (ignoreReport(Loc, Opts, ET)) return; diff --git a/gnu/llvm/compiler-rt/lib/ubsan/ubsan_handlers.h b/gnu/llvm/compiler-rt/lib/ubsan/ubsan_handlers.h index 22ca9642238..219fb15de55 100644 --- a/gnu/llvm/compiler-rt/lib/ubsan/ubsan_handlers.h +++ b/gnu/llvm/compiler-rt/lib/ubsan/ubsan_handlers.h @@ -168,6 +168,14 @@ struct InvalidBuiltinData { /// Handle a builtin called in an invalid way. RECOVERABLE(invalid_builtin, InvalidBuiltinData *Data) +struct InvalidObjCCast { + SourceLocation Loc; + const TypeDescriptor &ExpectedType; +}; + +/// Handle an invalid ObjC cast. +RECOVERABLE(invalid_objc_cast, InvalidObjCCast *Data, ValueHandle Pointer) + struct NonNullReturnData { SourceLocation AttrLoc; }; diff --git a/gnu/llvm/compiler-rt/lib/ubsan/ubsan_init.cpp b/gnu/llvm/compiler-rt/lib/ubsan/ubsan_init.cpp index 1a3b7d37267..e0be5a72ec4 100644 --- a/gnu/llvm/compiler-rt/lib/ubsan/ubsan_init.cpp +++ b/gnu/llvm/compiler-rt/lib/ubsan/ubsan_init.cpp @@ -37,10 +37,12 @@ static void CommonStandaloneInit() { SanitizerToolName = GetSanititizerToolName(); CacheBinaryName(); InitializeFlags(); + __sanitizer::InitializePlatformEarly(); __sanitizer_set_report_path(common_flags()->log_path); AndroidLogInit(); InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); CommonInit(); + Symbolizer::LateInitialize(); } void __ubsan::InitAsStandalone() { diff --git a/gnu/llvm/compiler-rt/lib/ubsan/ubsan_interface.inc b/gnu/llvm/compiler-rt/lib/ubsan/ubsan_interface.inc index 1e44bc2171d..94337d85017 100644 --- a/gnu/llvm/compiler-rt/lib/ubsan/ubsan_interface.inc +++ b/gnu/llvm/compiler-rt/lib/ubsan/ubsan_interface.inc @@ -27,6 +27,8 @@ INTERFACE_FUNCTION(__ubsan_handle_implicit_conversion) INTERFACE_FUNCTION(__ubsan_handle_implicit_conversion_abort) INTERFACE_FUNCTION(__ubsan_handle_invalid_builtin) INTERFACE_FUNCTION(__ubsan_handle_invalid_builtin_abort) +INTERFACE_FUNCTION(__ubsan_handle_invalid_objc_cast) +INTERFACE_FUNCTION(__ubsan_handle_invalid_objc_cast_abort) INTERFACE_FUNCTION(__ubsan_handle_load_invalid_value) INTERFACE_FUNCTION(__ubsan_handle_load_invalid_value_abort) INTERFACE_FUNCTION(__ubsan_handle_missing_return) diff --git a/gnu/llvm/compiler-rt/lib/ubsan/ubsan_type_hash_itanium.cpp b/gnu/llvm/compiler-rt/lib/ubsan/ubsan_type_hash_itanium.cpp index 97846d4dd43..4f1708ba190 100644 --- a/gnu/llvm/compiler-rt/lib/ubsan/ubsan_type_hash_itanium.cpp +++ b/gnu/llvm/compiler-rt/lib/ubsan/ubsan_type_hash_itanium.cpp @@ -16,6 +16,7 @@ #include "ubsan_type_hash.h" #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_ptrauth.h" // The following are intended to be binary compatible with the definitions // given in the Itanium ABI. We make no attempt to be ODR-compatible with @@ -194,6 +195,7 @@ struct VtablePrefix { std::type_info *TypeInfo; }; VtablePrefix *getVtablePrefix(void *Vtable) { + Vtable = ptrauth_auth_data(Vtable, ptrauth_key_cxx_vtable_pointer, 0); VtablePrefix *Vptr = reinterpret_cast<VtablePrefix*>(Vtable); VtablePrefix *Prefix = Vptr - 1; if (!IsAccessibleMemoryRange((uptr)Prefix, sizeof(VtablePrefix))) diff --git a/gnu/llvm/compiler-rt/lib/ubsan/ubsan_value.cpp b/gnu/llvm/compiler-rt/lib/ubsan/ubsan_value.cpp index 60f0b5c9934..79c3ba991d3 100644 --- a/gnu/llvm/compiler-rt/lib/ubsan/ubsan_value.cpp +++ b/gnu/llvm/compiler-rt/lib/ubsan/ubsan_value.cpp @@ -16,9 +16,57 @@ #include "ubsan_value.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_mutex.h" + +// TODO(dliew): Prefer '__APPLE__' here over 'SANITIZER_MAC', as the latter is +// unclear. rdar://58124919 tracks using a more obviously portable guard. +#if defined(__APPLE__) +#include <dlfcn.h> +#endif using namespace __ubsan; +typedef const char *(*ObjCGetClassNameTy)(void *); + +const char *__ubsan::getObjCClassName(ValueHandle Pointer) { +#if defined(__APPLE__) + // We need to query the ObjC runtime for some information, but do not want + // to introduce a static dependency from the ubsan runtime onto ObjC. Try to + // grab a handle to the ObjC runtime used by the process. + static bool AttemptedDlopen = false; + static void *ObjCHandle = nullptr; + static void *ObjCObjectGetClassName = nullptr; + + // Prevent threads from racing to dlopen(). + static __sanitizer::StaticSpinMutex Lock; + { + __sanitizer::SpinMutexLock Guard(&Lock); + + if (!AttemptedDlopen) { + ObjCHandle = dlopen( + "/usr/lib/libobjc.A.dylib", + RTLD_LAZY // Only bind symbols when used. + | RTLD_LOCAL // Only make symbols available via the handle. + | RTLD_NOLOAD // Do not load the dylib, just grab a handle if the + // image is already loaded. + | RTLD_FIRST // Only search the image pointed-to by the handle. + ); + AttemptedDlopen = true; + if (!ObjCHandle) + return nullptr; + ObjCObjectGetClassName = dlsym(ObjCHandle, "object_getClassName"); + } + } + + if (!ObjCObjectGetClassName) + return nullptr; + + return ObjCGetClassNameTy(ObjCObjectGetClassName)((void *)Pointer); +#else + return nullptr; +#endif +} + SIntMax Value::getSIntValue() const { CHECK(getType().isSignedIntegerTy()); if (isInlineInt()) { diff --git a/gnu/llvm/compiler-rt/lib/ubsan/ubsan_value.h b/gnu/llvm/compiler-rt/lib/ubsan/ubsan_value.h index a216e3a147e..e0957276dd2 100644 --- a/gnu/llvm/compiler-rt/lib/ubsan/ubsan_value.h +++ b/gnu/llvm/compiler-rt/lib/ubsan/ubsan_value.h @@ -135,6 +135,9 @@ public: /// \brief An opaque handle to a value. typedef uptr ValueHandle; +/// Returns the class name of the given ObjC object, or null if the name +/// cannot be found. +const char *getObjCClassName(ValueHandle Pointer); /// \brief Representation of an operand value provided by the instrumented code. /// diff --git a/gnu/llvm/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp b/gnu/llvm/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp index ed62ddd0fa3..8654c705cfb 100644 --- a/gnu/llvm/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp +++ b/gnu/llvm/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp @@ -109,6 +109,7 @@ HANDLER(vla_bound_not_positive, "vla-bound-not-positive") HANDLER(float_cast_overflow, "float-cast-overflow") HANDLER(load_invalid_value, "load-invalid-value") HANDLER(invalid_builtin, "invalid-builtin") +HANDLER(invalid_objc_cast, "invalid-objc-cast") HANDLER(function_type_mismatch, "function-type-mismatch") HANDLER(implicit_conversion, "implicit-conversion") HANDLER(nonnull_arg, "nonnull-arg") diff --git a/gnu/llvm/compiler-rt/lib/xray/xray_AArch64.cpp b/gnu/llvm/compiler-rt/lib/xray/xray_AArch64.cpp index 081941b7037..00105d30b4d 100644 --- a/gnu/llvm/compiler-rt/lib/xray/xray_AArch64.cpp +++ b/gnu/llvm/compiler-rt/lib/xray/xray_AArch64.cpp @@ -61,7 +61,7 @@ inline static bool patchSled(const bool Enable, const uint32_t FuncId, // When |Enable|==false, we set back the first instruction in the sled to be // B #32 - uint32_t *FirstAddress = reinterpret_cast<uint32_t *>(Sled.Address); + uint32_t *FirstAddress = reinterpret_cast<uint32_t *>(Sled.address()); uint32_t *CurAddress = FirstAddress + 1; if (Enable) { *CurAddress = uint32_t(PatchOpcodes::PO_LdrW0_12); diff --git a/gnu/llvm/compiler-rt/lib/xray/xray_arm.cpp b/gnu/llvm/compiler-rt/lib/xray/xray_arm.cpp index 9ad8065eb88..e1818555906 100644 --- a/gnu/llvm/compiler-rt/lib/xray/xray_arm.cpp +++ b/gnu/llvm/compiler-rt/lib/xray/xray_arm.cpp @@ -102,7 +102,7 @@ inline static bool patchSled(const bool Enable, const uint32_t FuncId, // When |Enable|==false, we set back the first instruction in the sled to be // B #20 - uint32_t *FirstAddress = reinterpret_cast<uint32_t *>(Sled.Address); + uint32_t *FirstAddress = reinterpret_cast<uint32_t *>(Sled.address()); uint32_t *CurAddress = FirstAddress + 1; if (Enable) { CurAddress = diff --git a/gnu/llvm/compiler-rt/lib/xray/xray_init.cpp b/gnu/llvm/compiler-rt/lib/xray/xray_init.cpp index 40839647797..00ba5fe4a52 100644 --- a/gnu/llvm/compiler-rt/lib/xray/xray_init.cpp +++ b/gnu/llvm/compiler-rt/lib/xray/xray_init.cpp @@ -84,8 +84,24 @@ void __xray_init() XRAY_NEVER_INSTRUMENT { SpinMutexLock Guard(&XRayInstrMapMutex); XRayInstrMap.Sleds = __start_xray_instr_map; XRayInstrMap.Entries = __stop_xray_instr_map - __start_xray_instr_map; - XRayInstrMap.SledsIndex = __start_xray_fn_idx; - XRayInstrMap.Functions = __stop_xray_fn_idx - __start_xray_fn_idx; + if (__start_xray_fn_idx != nullptr) { + XRayInstrMap.SledsIndex = __start_xray_fn_idx; + XRayInstrMap.Functions = __stop_xray_fn_idx - __start_xray_fn_idx; + } else { + size_t CountFunctions = 0; + uint64_t LastFnAddr = 0; + + for (std::size_t I = 0; I < XRayInstrMap.Entries; I++) { + const auto &Sled = XRayInstrMap.Sleds[I]; + const auto Function = Sled.function(); + if (Function != LastFnAddr) { + CountFunctions++; + LastFnAddr = Function; + } + } + + XRayInstrMap.Functions = CountFunctions; + } } atomic_store(&XRayInitialized, true, memory_order_release); diff --git a/gnu/llvm/compiler-rt/lib/xray/xray_interface.cpp b/gnu/llvm/compiler-rt/lib/xray/xray_interface.cpp index 0d22893eb30..7669b9ab82b 100644 --- a/gnu/llvm/compiler-rt/lib/xray/xray_interface.cpp +++ b/gnu/llvm/compiler-rt/lib/xray/xray_interface.cpp @@ -175,6 +175,33 @@ bool patchSled(const XRaySledEntry &Sled, bool Enable, return Success; } +const XRayFunctionSledIndex +findFunctionSleds(int32_t FuncId, + const XRaySledMap &InstrMap) XRAY_NEVER_INSTRUMENT { + int32_t CurFn = 0; + uint64_t LastFnAddr = 0; + XRayFunctionSledIndex Index = {nullptr, nullptr}; + + for (std::size_t I = 0; I < InstrMap.Entries && CurFn <= FuncId; I++) { + const auto &Sled = InstrMap.Sleds[I]; + const auto Function = Sled.function(); + if (Function != LastFnAddr) { + CurFn++; + LastFnAddr = Function; + } + + if (CurFn == FuncId) { + if (Index.Begin == nullptr) + Index.Begin = &Sled; + Index.End = &Sled; + } + } + + Index.End += 1; + + return Index; +} + XRayPatchingStatus patchFunction(int32_t FuncId, bool Enable) XRAY_NEVER_INSTRUMENT { if (!atomic_load(&XRayInitialized, @@ -205,10 +232,10 @@ XRayPatchingStatus patchFunction(int32_t FuncId, } // Now we patch ths sleds for this specific function. - auto SledRange = InstrMap.SledsIndex[FuncId - 1]; + auto SledRange = InstrMap.SledsIndex ? InstrMap.SledsIndex[FuncId - 1] + : findFunctionSleds(FuncId, InstrMap); auto *f = SledRange.Begin; auto *e = SledRange.End; - bool SucceedOnce = false; while (f != e) SucceedOnce |= patchSled(*f++, Enable, FuncId); @@ -264,14 +291,14 @@ XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT { // now we're assuming we can mprotect the whole section of text between the // minimum sled address and the maximum sled address (+ the largest sled // size). - auto MinSled = InstrMap.Sleds[0]; - auto MaxSled = InstrMap.Sleds[InstrMap.Entries - 1]; + auto *MinSled = &InstrMap.Sleds[0]; + auto *MaxSled = &InstrMap.Sleds[InstrMap.Entries - 1]; for (std::size_t I = 0; I < InstrMap.Entries; I++) { const auto &Sled = InstrMap.Sleds[I]; - if (Sled.Address < MinSled.Address) - MinSled = Sled; - if (Sled.Address > MaxSled.Address) - MaxSled = Sled; + if (Sled.address() < MinSled->address()) + MinSled = &Sled; + if (Sled.address() > MaxSled->address()) + MaxSled = &Sled; } const size_t PageSize = flags()->xray_page_size_override > 0 @@ -283,9 +310,10 @@ XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT { } void *PageAlignedAddr = - reinterpret_cast<void *>(MinSled.Address & ~(PageSize - 1)); + reinterpret_cast<void *>(MinSled->address() & ~(PageSize - 1)); size_t MProtectLen = - (MaxSled.Address - reinterpret_cast<uptr>(PageAlignedAddr)) + cSledLength; + (MaxSled->address() - reinterpret_cast<uptr>(PageAlignedAddr)) + + cSledLength; MProtectHelper Protector(PageAlignedAddr, MProtectLen, PageSize); if (Protector.MakeWriteable() == -1) { Report("Failed mprotect: %d\n", errno); @@ -294,7 +322,7 @@ XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT { for (std::size_t I = 0; I < InstrMap.Entries; ++I) { auto &Sled = InstrMap.Sleds[I]; - auto F = Sled.Function; + auto F = Sled.function(); if (CurFun == 0) CurFun = F; if (F != CurFun) { @@ -334,23 +362,25 @@ XRayPatchingStatus mprotectAndPatchFunction(int32_t FuncId, // Here we compute the minumum sled and maximum sled associated with a // particular function ID. - auto SledRange = InstrMap.SledsIndex[FuncId - 1]; + auto SledRange = InstrMap.SledsIndex ? InstrMap.SledsIndex[FuncId - 1] + : findFunctionSleds(FuncId, InstrMap); auto *f = SledRange.Begin; auto *e = SledRange.End; - auto MinSled = *f; - auto MaxSled = *(SledRange.End - 1); + auto *MinSled = f; + auto *MaxSled = (SledRange.End - 1); while (f != e) { - if (f->Address < MinSled.Address) - MinSled = *f; - if (f->Address > MaxSled.Address) - MaxSled = *f; + if (f->address() < MinSled->address()) + MinSled = f; + if (f->address() > MaxSled->address()) + MaxSled = f; ++f; } void *PageAlignedAddr = - reinterpret_cast<void *>(MinSled.Address & ~(PageSize - 1)); + reinterpret_cast<void *>(MinSled->address() & ~(PageSize - 1)); size_t MProtectLen = - (MaxSled.Address - reinterpret_cast<uptr>(PageAlignedAddr)) + cSledLength; + (MaxSled->address() - reinterpret_cast<uptr>(PageAlignedAddr)) + + cSledLength; MProtectHelper Protector(PageAlignedAddr, MProtectLen, PageSize); if (Protector.MakeWriteable() == -1) { Report("Failed mprotect: %d\n", errno); @@ -461,10 +491,18 @@ int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, uint64_t)) { int __xray_remove_handler_arg1() { return __xray_set_handler_arg1(nullptr); } uintptr_t __xray_function_address(int32_t FuncId) XRAY_NEVER_INSTRUMENT { - SpinMutexLock Guard(&XRayInstrMapMutex); - if (FuncId <= 0 || static_cast<size_t>(FuncId) > XRayInstrMap.Functions) + XRaySledMap InstrMap; + { + SpinMutexLock Guard(&XRayInstrMapMutex); + InstrMap = XRayInstrMap; + } + + if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions) return 0; - return XRayInstrMap.SledsIndex[FuncId - 1].Begin->Function + const XRaySledEntry *Sled = InstrMap.SledsIndex + ? InstrMap.SledsIndex[FuncId - 1].Begin + : findFunctionSleds(FuncId, InstrMap).Begin; + return Sled->function() // On PPC, function entries are always aligned to 16 bytes. The beginning of a // sled might be a local entry, which is always +8 based on the global entry. // Always return the global entry. diff --git a/gnu/llvm/compiler-rt/lib/xray/xray_interface_internal.h b/gnu/llvm/compiler-rt/lib/xray/xray_interface_internal.h index 0fea6377648..390f389b1dc 100644 --- a/gnu/llvm/compiler-rt/lib/xray/xray_interface_internal.h +++ b/gnu/llvm/compiler-rt/lib/xray/xray_interface_internal.h @@ -29,6 +29,18 @@ struct XRaySledEntry { unsigned char AlwaysInstrument; unsigned char Version; unsigned char Padding[13]; // Need 32 bytes + uint64_t function() const { + if (Version < 2) + return Function; + // The target address is relative to the location of the Function variable. + return reinterpret_cast<uint64_t>(&Function) + Function; + } + uint64_t address() const { + if (Version < 2) + return Address; + // The target address is relative to the location of the Address variable. + return reinterpret_cast<uint64_t>(&Address) + Address; + } #elif SANITIZER_WORDSIZE == 32 uint32_t Address; uint32_t Function; @@ -36,6 +48,18 @@ struct XRaySledEntry { unsigned char AlwaysInstrument; unsigned char Version; unsigned char Padding[5]; // Need 16 bytes + uint32_t function() const { + if (Version < 2) + return Function; + // The target address is relative to the location of the Function variable. + return reinterpret_cast<uint32_t>(&Function) + Function; + } + uint32_t address() const { + if (Version < 2) + return Address; + // The target address is relative to the location of the Address variable. + return reinterpret_cast<uint32_t>(&Address) + Address; + } #else #error "Unsupported word size." #endif diff --git a/gnu/llvm/compiler-rt/lib/xray/xray_powerpc64.cpp b/gnu/llvm/compiler-rt/lib/xray/xray_powerpc64.cpp index b41f1bce6f2..c3553d84831 100644 --- a/gnu/llvm/compiler-rt/lib/xray/xray_powerpc64.cpp +++ b/gnu/llvm/compiler-rt/lib/xray/xray_powerpc64.cpp @@ -52,35 +52,37 @@ namespace __xray { bool patchFunctionEntry(const bool Enable, uint32_t FuncId, const XRaySledEntry &Sled, void (*Trampoline)()) XRAY_NEVER_INSTRUMENT { + const uint64_t Address = Sled.address(); if (Enable) { // lis 0, FuncId[16..32] // li 0, FuncId[0..15] - *reinterpret_cast<uint64_t *>(Sled.Address) = + *reinterpret_cast<uint64_t *>(Address) = (0x3c000000ull + (FuncId >> 16)) + ((0x60000000ull + (FuncId & 0xffff)) << 32); } else { // b +JumpOverInstNum instructions. - *reinterpret_cast<uint32_t *>(Sled.Address) = + *reinterpret_cast<uint32_t *>(Address) = 0x48000000ull + (JumpOverInstNum << 2); } - clearCache(reinterpret_cast<void *>(Sled.Address), 8); + clearCache(reinterpret_cast<void *>(Address), 8); return true; } bool patchFunctionExit(const bool Enable, uint32_t FuncId, const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { + const uint64_t Address = Sled.address(); if (Enable) { // lis 0, FuncId[16..32] // li 0, FuncId[0..15] - *reinterpret_cast<uint64_t *>(Sled.Address) = + *reinterpret_cast<uint64_t *>(Address) = (0x3c000000ull + (FuncId >> 16)) + ((0x60000000ull + (FuncId & 0xffff)) << 32); } else { // Copy the blr/b instruction after JumpOverInstNum instructions. - *reinterpret_cast<uint32_t *>(Sled.Address) = - *(reinterpret_cast<uint32_t *>(Sled.Address) + JumpOverInstNum); + *reinterpret_cast<uint32_t *>(Address) = + *(reinterpret_cast<uint32_t *>(Address) + JumpOverInstNum); } - clearCache(reinterpret_cast<void *>(Sled.Address), 8); + clearCache(reinterpret_cast<void *>(Address), 8); return true; } diff --git a/gnu/llvm/compiler-rt/lib/xray/xray_trampoline_AArch64.S b/gnu/llvm/compiler-rt/lib/xray/xray_trampoline_AArch64.S index 4d1b04fb7d9..3bf52cef60f 100644 --- a/gnu/llvm/compiler-rt/lib/xray/xray_trampoline_AArch64.S +++ b/gnu/llvm/compiler-rt/lib/xray/xray_trampoline_AArch64.S @@ -7,6 +7,7 @@ .p2align 2 /* Let C/C++ see the symbol */ .global __xray_FunctionEntry + .hidden __xray_FunctionEntry .type __xray_FunctionEntry, %function /* In C++ it is void extern "C" __xray_FunctionEntry(uint32_t FuncId) with FuncId passed in W0 register. */ @@ -26,10 +27,14 @@ __xray_FunctionEntry: STP Q2, Q3, [SP, #-32]! STP Q4, Q5, [SP, #-32]! STP Q6, Q7, [SP, #-32]! - /* Load the address of _ZN6__xray19XRayPatchedFunctionE into X1 */ - LDR X1, =_ZN6__xray19XRayPatchedFunctionE + /* X8 is the indirect result register and needs to be preserved for the body + of the function to use */ + STP X8, X0, [SP, #-16]! + + /* Load the page address of _ZN6__xray19XRayPatchedFunctionE into X1 */ + ADRP X1, _ZN6__xray19XRayPatchedFunctionE /* Load the handler function pointer into X2 */ - LDR X2, [X1] + LDR X2, [X1, #:lo12:_ZN6__xray19XRayPatchedFunctionE] /* Handler address is nullptr if handler is not set */ CMP X2, #0 BEQ FunctionEntry_restore @@ -40,6 +45,7 @@ __xray_FunctionEntry: BLR X2 FunctionEntry_restore: /* Pop the saved registers */ + LDP X8, X0, [SP], #16 LDP Q6, Q7, [SP], #32 LDP Q4, Q5, [SP], #32 LDP Q2, Q3, [SP], #32 @@ -54,6 +60,7 @@ FunctionEntry_restore: .p2align 2 /* Let C/C++ see the symbol */ .global __xray_FunctionExit + .hidden __xray_FunctionExit .type __xray_FunctionExit, %function /* In C++ it is void extern "C" __xray_FunctionExit(uint32_t FuncId) with FuncId passed in W0 register. */ @@ -69,11 +76,18 @@ __xray_FunctionExit: STP X3, X4, [SP, #-16]! STP X5, X6, [SP, #-16]! STP X7, X30, [SP, #-16]! - STR Q0, [SP, #-16]! - /* Load the address of _ZN6__xray19XRayPatchedFunctionE into X1 */ - LDR X1, =_ZN6__xray19XRayPatchedFunctionE + STP Q0, Q1, [SP, #-32]! + STP Q2, Q3, [SP, #-32]! + STP Q4, Q5, [SP, #-32]! + STP Q6, Q7, [SP, #-32]! + /* X8 is the indirect result register and needs to be preserved for the body + of the function to use */ + STP X8, X0, [SP, #-16]! + + /* Load the page address of _ZN6__xray19XRayPatchedFunctionE into X1 */ + ADRP X1, _ZN6__xray19XRayPatchedFunctionE /* Load the handler function pointer into X2 */ - LDR X2, [X1] + LDR X2, [X1, #:lo12:_ZN6__xray19XRayPatchedFunctionE] /* Handler address is nullptr if handler is not set */ CMP X2, #0 BEQ FunctionExit_restore @@ -83,7 +97,11 @@ __xray_FunctionExit: /* Call the handler with 2 parameters in W0 and X1 */ BLR X2 FunctionExit_restore: - LDR Q0, [SP], #16 + LDP X8, X0, [SP], #16 + LDP Q6, Q7, [SP], #32 + LDP Q4, Q5, [SP], #32 + LDP Q2, Q3, [SP], #32 + LDP Q0, Q1, [SP], #32 LDP X7, X30, [SP], #16 LDP X5, X6, [SP], #16 LDP X3, X4, [SP], #16 @@ -94,6 +112,7 @@ FunctionExit_restore: .p2align 2 /* Let C/C++ see the symbol */ .global __xray_FunctionTailExit + .hidden __xray_FunctionTailExit .type __xray_FunctionTailExit, %function /* In C++ it is void extern "C" __xray_FunctionTailExit(uint32_t FuncId) with FuncId passed in W0 register. */ @@ -114,10 +133,10 @@ __xray_FunctionTailExit: STP Q2, Q3, [SP, #-32]! STP Q4, Q5, [SP, #-32]! STP Q6, Q7, [SP, #-32]! - /* Load the address of _ZN6__xray19XRayPatchedFunctionE into X1 */ - LDR X1, =_ZN6__xray19XRayPatchedFunctionE + /* Load the page address of _ZN6__xray19XRayPatchedFunctionE into X1 */ + ADRP X1, _ZN6__xray19XRayPatchedFunctionE /* Load the handler function pointer into X2 */ - LDR X2, [X1] + LDR X2, [X1, #:lo12:_ZN6__xray19XRayPatchedFunctionE] /* Handler address is nullptr if handler is not set */ CMP X2, #0 BEQ FunctionTailExit_restore diff --git a/gnu/llvm/compiler-rt/lib/xray/xray_trampoline_arm.S b/gnu/llvm/compiler-rt/lib/xray/xray_trampoline_arm.S index 71dbee65d82..3ffc1e44376 100644 --- a/gnu/llvm/compiler-rt/lib/xray/xray_trampoline_arm.S +++ b/gnu/llvm/compiler-rt/lib/xray/xray_trampoline_arm.S @@ -10,6 +10,7 @@ .p2align 2 @ Let C/C++ see the symbol .global __xray_FunctionEntry + .hidden __xray_FunctionEntry @ It preserves all registers except r0, r12(ip), r14(lr) and r15(pc) @ Assume that "q" part of the floating-point registers is not used @ for passing parameters to C/C++ functions. @@ -20,9 +21,9 @@ __xray_FunctionEntry: PUSH {r1-r3,lr} @ Save floating-point parameters of the instrumented function VPUSH {d0-d7} - MOVW r1,#:lower16:_ZN6__xray19XRayPatchedFunctionE - MOVT r1,#:upper16:_ZN6__xray19XRayPatchedFunctionE - LDR r2, [r1] + MOVW r1, #:lower16:_ZN6__xray19XRayPatchedFunctionE - (. + 16) + MOVT r1, #:upper16:_ZN6__xray19XRayPatchedFunctionE - (. + 12) + LDR r2, [pc, r1] @ Handler address is nullptr if handler is not set CMP r2, #0 BEQ FunctionEntry_restore @@ -40,6 +41,7 @@ FunctionEntry_restore: .p2align 2 @ Let C/C++ see the symbol .global __xray_FunctionExit + .hidden __xray_FunctionExit @ Assume that d1-d7 are not used for the return value. @ Assume that "q" part of the floating-point registers is not used for the @ return value in C/C++. @@ -51,9 +53,9 @@ __xray_FunctionExit: @ Save the floating-point return value of the instrumented function VPUSH {d0} @ Load the handler address - MOVW r1,#:lower16:_ZN6__xray19XRayPatchedFunctionE - MOVT r1,#:upper16:_ZN6__xray19XRayPatchedFunctionE - LDR r2, [r1] + MOVW r1, #:lower16:_ZN6__xray19XRayPatchedFunctionE - (. + 16) + MOVT r1, #:upper16:_ZN6__xray19XRayPatchedFunctionE - (. + 12) + LDR r2, [pc, r1] @ Handler address is nullptr if handler is not set CMP r2, #0 BEQ FunctionExit_restore @@ -71,6 +73,7 @@ FunctionExit_restore: .p2align 2 @ Let C/C++ see the symbol .global __xray_FunctionTailExit + .hidden __xray_FunctionTailExit @ It preserves all registers except r0, r12(ip), r14(lr) and r15(pc) @ Assume that "q" part of the floating-point registers is not used @ for passing parameters to C/C++ functions. @@ -81,9 +84,9 @@ __xray_FunctionTailExit: PUSH {r1-r3,lr} @ Save floating-point parameters of the instrumented function VPUSH {d0-d7} - MOVW r1,#:lower16:_ZN6__xray19XRayPatchedFunctionE - MOVT r1,#:upper16:_ZN6__xray19XRayPatchedFunctionE - LDR r2, [r1] + MOVW r1, #:lower16:_ZN6__xray19XRayPatchedFunctionE - (. + 16) + MOVT r1, #:upper16:_ZN6__xray19XRayPatchedFunctionE - (. + 12) + LDR r2, [pc, r1] @ Handler address is nullptr if handler is not set CMP r2, #0 BEQ FunctionTailExit_restore diff --git a/gnu/llvm/compiler-rt/lib/xray/xray_trampoline_x86_64.S b/gnu/llvm/compiler-rt/lib/xray/xray_trampoline_x86_64.S index 1e58362cdc8..12c5a6ccd9a 100644 --- a/gnu/llvm/compiler-rt/lib/xray/xray_trampoline_x86_64.S +++ b/gnu/llvm/compiler-rt/lib/xray/xray_trampoline_x86_64.S @@ -98,6 +98,7 @@ //===----------------------------------------------------------------------===// .globl ASM_SYMBOL(__xray_FunctionEntry) + ASM_HIDDEN(__xray_FunctionEntry) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_FunctionEntry) # LLVM-MCA-BEGIN __xray_FunctionEntry @@ -126,6 +127,7 @@ ASM_SYMBOL(__xray_FunctionEntry): //===----------------------------------------------------------------------===// .globl ASM_SYMBOL(__xray_FunctionExit) + ASM_HIDDEN(__xray_FunctionExit) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_FunctionExit) # LLVM-MCA-BEGIN __xray_FunctionExit @@ -166,6 +168,7 @@ ASM_SYMBOL(__xray_FunctionExit): //===----------------------------------------------------------------------===// .globl ASM_SYMBOL(__xray_FunctionTailExit) + ASM_HIDDEN(__xray_FunctionTailExit) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_FunctionTailExit) # LLVM-MCA-BEGIN __xray_FunctionTailExit @@ -192,6 +195,7 @@ ASM_SYMBOL(__xray_FunctionTailExit): //===----------------------------------------------------------------------===// .globl ASM_SYMBOL(__xray_ArgLoggerEntry) + ASM_HIDDEN(__xray_ArgLoggerEntry) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_ArgLoggerEntry) # LLVM-MCA-BEGIN __xray_ArgLoggerEntry @@ -231,6 +235,7 @@ ASM_SYMBOL(__xray_ArgLoggerEntry): //===----------------------------------------------------------------------===// .global ASM_SYMBOL(__xray_CustomEvent) + ASM_HIDDEN(__xray_CustomEvent) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_CustomEvent) # LLVM-MCA-BEGIN __xray_CustomEvent @@ -256,6 +261,7 @@ ASM_SYMBOL(__xray_CustomEvent): //===----------------------------------------------------------------------===// .global ASM_SYMBOL(__xray_TypedEvent) + ASM_HIDDEN(__xray_TypedEvent) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_TypedEvent) # LLVM-MCA-BEGIN __xray_TypedEvent diff --git a/gnu/llvm/compiler-rt/lib/xray/xray_utils.cpp b/gnu/llvm/compiler-rt/lib/xray/xray_utils.cpp index 1036d17a772..4c8ad5b92be 100644 --- a/gnu/llvm/compiler-rt/lib/xray/xray_utils.cpp +++ b/gnu/llvm/compiler-rt/lib/xray/xray_utils.cpp @@ -69,6 +69,10 @@ void LogWriter::WriteAll(const char *Begin, const char *End) XRAY_NEVER_INSTRUME return; } Offset += TotalBytes; + + // Record the data size as a property of the VMO. + _zx_object_set_property(Vmo, ZX_PROP_VMO_CONTENT_SIZE, + &Offset, sizeof(Offset)); } void LogWriter::Flush() XRAY_NEVER_INSTRUMENT { diff --git a/gnu/llvm/compiler-rt/lib/xray/xray_x86_64.cpp b/gnu/llvm/compiler-rt/lib/xray/xray_x86_64.cpp index e63ee1b3bd0..f3742ac7129 100644 --- a/gnu/llvm/compiler-rt/lib/xray/xray_x86_64.cpp +++ b/gnu/llvm/compiler-rt/lib/xray/xray_x86_64.cpp @@ -151,23 +151,24 @@ bool patchFunctionEntry(const bool Enable, const uint32_t FuncId, // opcode and first operand. // // Prerequisite is to compute the relative offset to the trampoline's address. + const uint64_t Address = Sled.address(); int64_t TrampolineOffset = reinterpret_cast<int64_t>(Trampoline) - - (static_cast<int64_t>(Sled.Address) + 11); + (static_cast<int64_t>(Address) + 11); if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) { - Report("XRay Entry trampoline (%p) too far from sled (%p)\n", - Trampoline, reinterpret_cast<void *>(Sled.Address)); + Report("XRay Entry trampoline (%p) too far from sled (%p)\n", Trampoline, + reinterpret_cast<void *>(Address)); return false; } if (Enable) { - *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId; - *reinterpret_cast<uint8_t *>(Sled.Address + 6) = CallOpCode; - *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset; + *reinterpret_cast<uint32_t *>(Address + 2) = FuncId; + *reinterpret_cast<uint8_t *>(Address + 6) = CallOpCode; + *reinterpret_cast<uint32_t *>(Address + 7) = TrampolineOffset; std::atomic_store_explicit( - reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq, + reinterpret_cast<std::atomic<uint16_t> *>(Address), MovR10Seq, std::memory_order_release); } else { std::atomic_store_explicit( - reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp9Seq, + reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp9Seq, std::memory_order_release); // FIXME: Write out the nops still? } @@ -196,23 +197,24 @@ bool patchFunctionExit(const bool Enable, const uint32_t FuncId, // // Prerequisite is to compute the relative offset fo the // __xray_FunctionExit function's address. + const uint64_t Address = Sled.address(); int64_t TrampolineOffset = reinterpret_cast<int64_t>(__xray_FunctionExit) - - (static_cast<int64_t>(Sled.Address) + 11); + (static_cast<int64_t>(Address) + 11); if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) { Report("XRay Exit trampoline (%p) too far from sled (%p)\n", - __xray_FunctionExit, reinterpret_cast<void *>(Sled.Address)); + __xray_FunctionExit, reinterpret_cast<void *>(Address)); return false; } if (Enable) { - *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId; - *reinterpret_cast<uint8_t *>(Sled.Address + 6) = JmpOpCode; - *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset; + *reinterpret_cast<uint32_t *>(Address + 2) = FuncId; + *reinterpret_cast<uint8_t *>(Address + 6) = JmpOpCode; + *reinterpret_cast<uint32_t *>(Address + 7) = TrampolineOffset; std::atomic_store_explicit( - reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq, + reinterpret_cast<std::atomic<uint16_t> *>(Address), MovR10Seq, std::memory_order_release); } else { std::atomic_store_explicit( - reinterpret_cast<std::atomic<uint8_t> *>(Sled.Address), RetOpCode, + reinterpret_cast<std::atomic<uint8_t> *>(Address), RetOpCode, std::memory_order_release); // FIXME: Write out the nops still? } @@ -223,24 +225,25 @@ bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId, const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { // Here we do the dance of replacing the tail call sled with a similar // sequence as the entry sled, but calls the tail exit sled instead. + const uint64_t Address = Sled.address(); int64_t TrampolineOffset = reinterpret_cast<int64_t>(__xray_FunctionTailExit) - - (static_cast<int64_t>(Sled.Address) + 11); + (static_cast<int64_t>(Address) + 11); if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) { Report("XRay Tail Exit trampoline (%p) too far from sled (%p)\n", - __xray_FunctionTailExit, reinterpret_cast<void *>(Sled.Address)); + __xray_FunctionTailExit, reinterpret_cast<void *>(Address)); return false; } if (Enable) { - *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId; - *reinterpret_cast<uint8_t *>(Sled.Address + 6) = CallOpCode; - *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset; + *reinterpret_cast<uint32_t *>(Address + 2) = FuncId; + *reinterpret_cast<uint8_t *>(Address + 6) = CallOpCode; + *reinterpret_cast<uint32_t *>(Address + 7) = TrampolineOffset; std::atomic_store_explicit( - reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq, + reinterpret_cast<std::atomic<uint16_t> *>(Address), MovR10Seq, std::memory_order_release); } else { std::atomic_store_explicit( - reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp9Seq, + reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp9Seq, std::memory_order_release); // FIXME: Write out the nops still? } @@ -267,26 +270,28 @@ bool patchCustomEvent(const bool Enable, const uint32_t FuncId, // // --- // - // In Version 1: + // In Version 1 or 2: // // The jump offset is now 15 bytes (0x0f), so when restoring the nopw back // to a jmp, use 15 bytes instead. // + const uint64_t Address = Sled.address(); if (Enable) { std::atomic_store_explicit( - reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), NopwSeq, + reinterpret_cast<std::atomic<uint16_t> *>(Address), NopwSeq, std::memory_order_release); } else { switch (Sled.Version) { case 1: + case 2: std::atomic_store_explicit( - reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp15Seq, + reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp15Seq, std::memory_order_release); break; case 0: default: std::atomic_store_explicit( - reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp20Seq, + reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp20Seq, std::memory_order_release); break; } @@ -313,14 +318,15 @@ bool patchTypedEvent(const bool Enable, const uint32_t FuncId, // unstashes the registers and returns. If the arguments are already in // the correct registers, the stashing and unstashing become equivalently // sized nops. + const uint64_t Address = Sled.address(); if (Enable) { std::atomic_store_explicit( - reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), NopwSeq, + reinterpret_cast<std::atomic<uint16_t> *>(Address), NopwSeq, std::memory_order_release); } else { - std::atomic_store_explicit( - reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp20Seq, - std::memory_order_release); + std::atomic_store_explicit( + reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp20Seq, + std::memory_order_release); } return false; } diff --git a/gnu/llvm/compiler-rt/utils/generate_netbsd_ioctls.awk b/gnu/llvm/compiler-rt/utils/generate_netbsd_ioctls.awk index 0d3f01a54b6..29840c99068 100755 --- a/gnu/llvm/compiler-rt/utils/generate_netbsd_ioctls.awk +++ b/gnu/llvm/compiler-rt/utils/generate_netbsd_ioctls.awk @@ -256,6 +256,8 @@ END { # Add compat entries add_compat("dev/filemon/filemon.h (compat <= 9.99.26)", "FILEMON_SET_FD", "READWRITE", "sizeof(int)") add_compat("", "FILEMON_SET_PID", "READWRITE", "sizeof(int)") + add_compat("dev/usb/urio.h (compat <= 9.99.43)", "URIO_SEND_COMMAND", "READWRITE", "struct_urio_command_sz") + add_compat("", "URIO_RECV_COMMAND", "READWRITE", "struct_urio_command_sz") # Generate sanitizer_interceptors_ioctl_netbsd.inc |