tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

stacktrace_aarch64-inl.inc (11115B)


      1 #ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
      2 #define ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
      3 
      4 // Generate stack tracer for aarch64
      5 
      6 #if defined(__linux__)
      7 #include <signal.h>
      8 #include <sys/mman.h>
      9 #include <ucontext.h>
     10 #include <unistd.h>
     11 #endif
     12 
     13 #include <atomic>
     14 #include <cassert>
     15 #include <cstdint>
     16 #include <iostream>
     17 #include <limits>
     18 
     19 #include "absl/base/attributes.h"
     20 #include "absl/debugging/internal/address_is_readable.h"
     21 #include "absl/debugging/internal/addresses.h"
     22 #include "absl/debugging/internal/vdso_support.h"  // a no-op on non-elf or non-glibc systems
     23 #include "absl/debugging/stacktrace.h"
     24 
     25 static const size_t kUnknownFrameSize = 0;
     26 // Stack end to use when we don't know the actual stack end
     27 // (effectively just the end of address space).
     28 constexpr uintptr_t kUnknownStackEnd =
     29    std::numeric_limits<size_t>::max() - sizeof(void *);
     30 
     31 #if defined(__linux__)
     32 // Returns the address of the VDSO __kernel_rt_sigreturn function, if present.
     33 static const unsigned char* GetKernelRtSigreturnAddress() {
     34  constexpr uintptr_t kImpossibleAddress = 1;
     35  ABSL_CONST_INIT static std::atomic<uintptr_t> memoized{kImpossibleAddress};
     36  uintptr_t address = memoized.load(std::memory_order_relaxed);
     37  if (address != kImpossibleAddress) {
     38    return reinterpret_cast<const unsigned char*>(address);
     39  }
     40 
     41  address = reinterpret_cast<uintptr_t>(nullptr);
     42 
     43 #ifdef ABSL_HAVE_VDSO_SUPPORT
     44  absl::debugging_internal::VDSOSupport vdso;
     45  if (vdso.IsPresent()) {
     46    absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info;
     47    auto lookup = [&](int type) {
     48      return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", type,
     49                               &symbol_info);
     50    };
     51    if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) ||
     52        symbol_info.address == nullptr) {
     53      // Unexpected: VDSO is present, yet the expected symbol is missing
     54      // or null.
     55      assert(false && "VDSO is present, but doesn't have expected symbol");
     56    } else {
     57      if (reinterpret_cast<uintptr_t>(symbol_info.address) !=
     58          kImpossibleAddress) {
     59        address = reinterpret_cast<uintptr_t>(symbol_info.address);
     60      } else {
     61        assert(false && "VDSO returned invalid address");
     62      }
     63    }
     64  }
     65 #endif
     66 
     67  memoized.store(address, std::memory_order_relaxed);
     68  return reinterpret_cast<const unsigned char*>(address);
     69 }
     70 #endif  // __linux__
     71 
     72 // Compute the size of a stack frame in [low..high).  We assume that
     73 // low < high.  Return size of kUnknownFrameSize.
     74 template<typename T>
     75 static size_t ComputeStackFrameSize(const T* low,
     76                                           const T* high) {
     77  const char* low_char_ptr = reinterpret_cast<const char *>(low);
     78  const char* high_char_ptr = reinterpret_cast<const char *>(high);
     79  return low < high ? static_cast<size_t>(high_char_ptr - low_char_ptr)
     80                    : kUnknownFrameSize;
     81 }
     82 
     83 // Saves stack info that is expensive to calculate to avoid recalculating per frame.
     84 struct StackInfo {
     85  uintptr_t stack_low;
     86  uintptr_t stack_high;
     87  uintptr_t sig_stack_low;
     88  uintptr_t sig_stack_high;
     89 };
     90 
     91 static bool InsideSignalStack(void** ptr, const StackInfo* stack_info) {
     92  uintptr_t comparable_ptr = reinterpret_cast<uintptr_t>(ptr);
     93  if (stack_info->sig_stack_high == kUnknownStackEnd)
     94    return false;
     95  return (comparable_ptr >= stack_info->sig_stack_low &&
     96          comparable_ptr < stack_info->sig_stack_high);
     97 }
     98 
     99 // Given a pointer to a stack frame, locate and return the calling
    100 // stackframe, or return null if no stackframe can be found. Perform sanity
    101 // checks (the strictness of which is controlled by the boolean parameter
    102 // "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
    103 template<bool STRICT_UNWINDING, bool WITH_CONTEXT>
    104 ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS  // May read random elements from stack.
    105 ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY  // May read random elements from stack.
    106 static void **NextStackFrame(void **old_frame_pointer, const void *uc,
    107                             const StackInfo *stack_info) {
    108  void **new_frame_pointer = reinterpret_cast<void**>(*old_frame_pointer);
    109 
    110 #if defined(__linux__)
    111  if (WITH_CONTEXT && uc != nullptr) {
    112    // Check to see if next frame's return address is __kernel_rt_sigreturn.
    113    if (old_frame_pointer[1] == GetKernelRtSigreturnAddress()) {
    114      const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
    115      // old_frame_pointer[0] is not suitable for unwinding, look at
    116      // ucontext to discover frame pointer before signal.
    117      void **const pre_signal_frame_pointer =
    118          reinterpret_cast<void **>(ucv->uc_mcontext.regs[29]);
    119 
    120      // The most recent signal always needs special handling to find the frame
    121      // pointer, but a nested signal does not.  If pre_signal_frame_pointer is
    122      // earlier in the stack than the old_frame_pointer, then use it. If it is
    123      // later, then we have already unwound through it and it needs no special
    124      // handling.
    125      if (pre_signal_frame_pointer >= old_frame_pointer) {
    126        new_frame_pointer = pre_signal_frame_pointer;
    127      }
    128  }
    129 #endif
    130 
    131  // The frame pointer should be 8-byte aligned.
    132  if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 7) != 0)
    133    return nullptr;
    134 
    135  // Check that alleged frame pointer is actually readable. This is to
    136  // prevent "double fault" in case we hit the first fault due to e.g.
    137  // stack corruption.
    138  if (!absl::debugging_internal::AddressIsReadable(
    139          new_frame_pointer))
    140    return nullptr;
    141  }
    142 
    143  // Only check the size if both frames are in the same stack.
    144  if (InsideSignalStack(new_frame_pointer, stack_info) ==
    145      InsideSignalStack(old_frame_pointer, stack_info)) {
    146    // Check frame size.  In strict mode, we assume frames to be under
    147    // 100,000 bytes.  In non-strict mode, we relax the limit to 1MB.
    148    const size_t max_size = STRICT_UNWINDING ? 100000 : 1000000;
    149    const size_t frame_size =
    150        ComputeStackFrameSize(old_frame_pointer, new_frame_pointer);
    151    if (frame_size == kUnknownFrameSize)
    152       return nullptr;
    153    // A very large frame may mean corrupt memory or an erroneous frame
    154    // pointer. But also maybe just a plain-old large frame.  Assume that if the
    155    // frame is within a known stack, then it is valid.
    156    if (frame_size > max_size) {
    157      size_t stack_low = stack_info->stack_low;
    158      size_t stack_high = stack_info->stack_high;
    159      if (InsideSignalStack(new_frame_pointer, stack_info)) {
    160        stack_low = stack_info->sig_stack_low;
    161        stack_high = stack_info->sig_stack_high;
    162      }
    163      if (stack_high < kUnknownStackEnd &&
    164          static_cast<size_t>(getpagesize()) < stack_low) {
    165        const uintptr_t new_fp_u =
    166            reinterpret_cast<uintptr_t>(new_frame_pointer);
    167        // Stack bounds are known.
    168        if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) {
    169          // new_frame_pointer is not within a known stack.
    170          return nullptr;
    171        }
    172      } else {
    173        // Stack bounds are unknown, prefer truncated stack to possible crash.
    174        return nullptr;
    175      }
    176    }
    177  }
    178 
    179  return new_frame_pointer;
    180 }
    181 
    182 template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
    183 // We count on the bottom frame being this one. See the comment
    184 // at prev_return_address
    185 ABSL_ATTRIBUTE_NOINLINE
    186 ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS  // May read random elements from stack.
    187 ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY   // May read random elements from stack.
    188 static int UnwindImpl(void **result, uintptr_t *frames, int *sizes,
    189                      int max_depth, int skip_count, const void *ucp,
    190                      int *min_dropped_frames) {
    191 #ifdef __GNUC__
    192  void **frame_pointer = reinterpret_cast<void**>(__builtin_frame_address(0));
    193 #else
    194 # error reading stack point not yet supported on this platform.
    195 #endif
    196  skip_count++;    // Skip the frame for this function.
    197  int n = 0;
    198 
    199  // Assume that the first page is not stack.
    200  StackInfo stack_info;
    201  stack_info.stack_low = static_cast<uintptr_t>(getpagesize());
    202  stack_info.stack_high = kUnknownStackEnd;
    203  stack_info.sig_stack_low = stack_info.stack_low;
    204  stack_info.sig_stack_high = kUnknownStackEnd;
    205 
    206  // The frame pointer points to low address of a frame.  The first 64-bit
    207  // word of a frame points to the next frame up the call chain, which normally
    208  // is just after the high address of the current frame.  The second word of
    209  // a frame contains return address of to the caller.   To find a pc value
    210  // associated with the current frame, we need to go down a level in the call
    211  // chain.  So we remember return the address of the last frame seen.  This
    212  // does not work for the first stack frame, which belongs to UnwindImp() but
    213  // we skip the frame for UnwindImp() anyway.
    214  void* prev_return_address = nullptr;
    215  // The nth frame size is the difference between the nth frame pointer and the
    216  // the frame pointer below it in the call chain. There is no frame below the
    217  // leaf frame, but this function is the leaf anyway, and we skip it.
    218  void** prev_frame_pointer = nullptr;
    219 
    220   while (frame_pointer && n < max_depth) {
    221    if (skip_count > 0) {
    222      skip_count--;
    223    } else {
    224      result[n] = reinterpret_cast<void *>(
    225          absl::debugging_internal::StripPointerMetadata(prev_return_address));
    226      if (IS_STACK_FRAMES) {
    227        if (frames != nullptr) {
    228          frames[n] = absl::debugging_internal::StripPointerMetadata(
    229                          prev_frame_pointer) +
    230                      2 * sizeof(void *) /* go past the return address */;
    231        }
    232        if (sizes != nullptr) {
    233          sizes[n] = static_cast<int>(
    234              ComputeStackFrameSize(prev_frame_pointer, frame_pointer));
    235        }
    236      }
    237      n++;
    238    }
    239    prev_return_address = frame_pointer[1];
    240    prev_frame_pointer = frame_pointer;
    241    // The absl::GetStackFrames routine is called when we are in some
    242    // informational context (the failure signal handler for example).
    243    // Use the non-strict unwinding rules to produce a stack trace
    244    // that is as complete as possible (even if it contains a few bogus
    245    // entries in some rare cases).
    246    frame_pointer = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(
    247        frame_pointer, ucp, &stack_info);
    248  }
    249 
    250  if (min_dropped_frames != nullptr) {
    251    // Implementation detail: we clamp the max of frames we are willing to
    252    // count, so as not to spend too much time in the loop below.
    253    const int kMaxUnwind = 200;
    254    int num_dropped_frames = 0;
    255    for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) {
    256      if (skip_count > 0) {
    257        skip_count--;
    258      } else {
    259        num_dropped_frames++;
    260      }
    261      frame_pointer = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(
    262          frame_pointer, ucp, &stack_info);
    263    }
    264    *min_dropped_frames = num_dropped_frames;
    265  }
    266  return n;
    267 }
    268 
    269 namespace absl {
    270 ABSL_NAMESPACE_BEGIN
    271 namespace debugging_internal {
    272 bool StackTraceWorksForTest() {
    273  return true;
    274 }
    275 }  // namespace debugging_internal
    276 ABSL_NAMESPACE_END
    277 }  // namespace absl
    278 
    279 #endif  // ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_