tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

BytecodeAnalysis.cpp (11005B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/BytecodeAnalysis.h"
      8 
      9 #include "mozilla/PodOperations.h"
     10 
     11 #include "jit/JitSpewer.h"
     12 #include "jit/WarpBuilder.h"
     13 #include "vm/BytecodeIterator.h"
     14 #include "vm/BytecodeLocation.h"
     15 #include "vm/BytecodeUtil.h"
     16 #include "vm/Opcodes.h"
     17 
     18 #include "vm/BytecodeIterator-inl.h"
     19 #include "vm/BytecodeLocation-inl.h"
     20 #include "vm/JSScript-inl.h"
     21 
     22 using namespace js;
     23 using namespace js::jit;
     24 
     25 // While Warp can compile generators and async functions, it may not aways be
     26 // profitable to due to the incomplete support that we have (See bug 1681338 for
     27 // details)
     28 //
     29 // As an example, in Bug 1839078 the overhead of constantly OSR'ing back into a
     30 // Warp body eats any benefit that might have been obtained via warp.
     31 //
     32 // This class implements the heuristic that yield can only be allowed in a Warp
     33 // body under two circumstances:
     34 //
     35 // - There is an inner loop, which is presumed to do work that will provide
     36 //   enough  work to avoid pathological cases
     37 // - There is sufficient bytecode around the yield that we expect Warp
     38 //   compilation to drive enough benefit that we will still let yield occur.
     39 //
     40 // This is of course a heuristic, and can of course be defeated.
     41 class YieldAnalyzer {
     42  struct LoopInfo {
     43    bool hasInnerLoop = false;
     44    bool sawYield = false;
     45    size_t bytecodeOps = 0;
     46  };
     47 
     48  // The minimum amount of bytecode to allow in a yielding loop.
     49  //
     50  // This number is extremely arbitrary, and may be too low by an order of
     51  // magnitude or more.
     52  static const size_t BYTECODE_MINIUM = 40;
     53 
     54  Vector<LoopInfo, 0, JitAllocPolicy> loopInfos;
     55  bool allowIon = true;
     56 
     57 public:
     58  explicit YieldAnalyzer(TempAllocator& alloc) : loopInfos(alloc) {}
     59 
     60  [[nodiscard]] bool init() {
     61    // a pretend outer loop for the function body.
     62    return loopInfos.emplaceBack();
     63  }
     64 
     65  void analyzeBackedgeForIon() {
     66    const LoopInfo& loopInfo = loopInfos.back();
     67    if (loopInfo.sawYield) {
     68      if (!loopInfo.hasInnerLoop && loopInfo.bytecodeOps < BYTECODE_MINIUM) {
     69        allowIon = false;
     70      }
     71    }
     72 
     73    loopInfos.popBack();
     74  }
     75 
     76  bool canIon() {
     77    // Analyze the host function as if it were  a loop;
     78    //
     79    // This should help us avoid ion compiling a tiny function which just
     80    // yields.
     81    analyzeBackedgeForIon();
     82 
     83    MOZ_ASSERT(loopInfos.empty());
     84 
     85    return allowIon;
     86  }
     87 
     88  [[nodiscard]] bool handleBytecode(BytecodeLocation loc) {
     89    LoopInfo& loopInfo = loopInfos.back();
     90 
     91    loopInfo.bytecodeOps++;
     92 
     93    if (loc.is(JSOp::LoopHead)) {
     94      loopInfo.hasInnerLoop = true;
     95 
     96      // Bail out here because the below two cases won't be hit.
     97      return loopInfos.emplaceBack();
     98    }
     99 
    100    if (loc.is(JSOp::Yield) || loc.is(JSOp::FinalYieldRval)) {
    101      loopInfo.sawYield = true;
    102    }
    103 
    104    if (loc.isBackedge()) {
    105      analyzeBackedgeForIon();
    106    }
    107 
    108    return true;
    109  }
    110 };
    111 
    112 BytecodeAnalysis::BytecodeAnalysis(TempAllocator& alloc, JSScript* script)
    113    : script_(script), infos_(alloc) {}
    114 
    115 bool BytecodeAnalysis::init(TempAllocator& alloc) {
    116  if (!infos_.growByUninitialized(script_->length())) {
    117    return false;
    118  }
    119 
    120  // Clear all BytecodeInfo.
    121  mozilla::PodZero(infos_.begin(), infos_.length());
    122  infos_[0].init(/*stackDepth=*/0);
    123 
    124  // WarpBuilder can compile try blocks, but doesn't support handling
    125  // exceptions. If exception unwinding would resume in a catch or finally
    126  // block, we instead bail out to the baseline interpreter. Finally blocks can
    127  // still be reached by normal means, but the catch block is unreachable and is
    128  // not compiled. We therefore need some special machinery to prevent OSR into
    129  // Warp code in the following cases:
    130  //
    131  // (1) Loops in catch blocks:
    132  //
    133  //       try {
    134  //         ..
    135  //       } catch (e) {
    136  //         while (..) {} // Can't OSR here.
    137  //       }
    138  //
    139  // (2) Loops only reachable via a catch block:
    140  //
    141  //       for (;;) {
    142  //         try {
    143  //           throw 3;
    144  //         } catch (e) {
    145  //           break;
    146  //         }
    147  //       }
    148  //       while (..) {} // Loop is only reachable via the catch-block.
    149  //
    150  // To deal with both of these cases, we track whether the current op is
    151  // 'normally reachable' (reachable without exception handling).
    152  // Forward jumps propagate this flag to their jump targets (see
    153  // BytecodeInfo::jumpTargetNormallyReachable) and when the analysis reaches a
    154  // jump target it updates its normallyReachable flag based on the target's
    155  // flag.
    156  //
    157  // Inlining a function without a normally reachable return can cause similar
    158  // problems. To avoid this, we mark such functions as uninlineable.
    159  bool normallyReachable = true;
    160  bool normallyReachableReturn = false;
    161 
    162  YieldAnalyzer analyzer(alloc);
    163  if (!analyzer.init()) {
    164    return false;
    165  }
    166 
    167  for (const BytecodeLocation& it : AllBytecodesIterable(script_)) {
    168    JSOp op = it.getOp();
    169    if (!analyzer.handleBytecode(it)) {
    170      return false;
    171    }
    172 
    173    uint32_t offset = it.bytecodeToOffset(script_);
    174 
    175    JitSpew(JitSpew_BaselineOp, "Analyzing op @ %u (end=%u): %s",
    176            unsigned(offset), unsigned(script_->length()), CodeName(op));
    177 
    178    checkWarpSupport(op);
    179 
    180    // If this bytecode info has not yet been initialized, it's not reachable.
    181    if (!infos_[offset].initialized) {
    182      continue;
    183    }
    184 
    185    uint32_t stackDepth = infos_[offset].stackDepth;
    186 
    187    if (infos_[offset].jumpTarget) {
    188      normallyReachable = infos_[offset].jumpTargetNormallyReachable;
    189    }
    190 
    191 #ifdef DEBUG
    192    size_t endOffset = offset + it.length();
    193    for (size_t checkOffset = offset + 1; checkOffset < endOffset;
    194         checkOffset++) {
    195      MOZ_ASSERT(!infos_[checkOffset].initialized);
    196    }
    197 #endif
    198    uint32_t nuses = it.useCount();
    199    uint32_t ndefs = it.defCount();
    200 
    201    MOZ_ASSERT(stackDepth >= nuses);
    202    stackDepth -= nuses;
    203    stackDepth += ndefs;
    204 
    205    // If stack depth exceeds max allowed by analysis, fail fast.
    206    MOZ_ASSERT(stackDepth <= BytecodeInfo::MAX_STACK_DEPTH);
    207 
    208    switch (op) {
    209      case JSOp::TableSwitch: {
    210        uint32_t defaultOffset = it.getTableSwitchDefaultOffset(script_);
    211        int32_t low = it.getTableSwitchLow();
    212        int32_t high = it.getTableSwitchHigh();
    213 
    214        infos_[defaultOffset].init(stackDepth);
    215        infos_[defaultOffset].setJumpTarget(normallyReachable);
    216 
    217        uint32_t ncases = high - low + 1;
    218 
    219        for (uint32_t i = 0; i < ncases; i++) {
    220          uint32_t targetOffset = it.tableSwitchCaseOffset(script_, i);
    221          if (targetOffset != defaultOffset) {
    222            infos_[targetOffset].init(stackDepth);
    223            infos_[targetOffset].setJumpTarget(normallyReachable);
    224          }
    225        }
    226        break;
    227      }
    228 
    229      case JSOp::Try: {
    230        for (const TryNote& tn : script_->trynotes()) {
    231          if (tn.start == offset + JSOpLength_Try &&
    232              (tn.kind() == TryNoteKind::Catch ||
    233               tn.kind() == TryNoteKind::Finally)) {
    234            uint32_t catchOrFinallyOffset = tn.start + tn.length;
    235            uint32_t targetDepth =
    236                tn.kind() == TryNoteKind::Finally ? stackDepth + 3 : stackDepth;
    237            BytecodeInfo& targetInfo = infos_[catchOrFinallyOffset];
    238            targetInfo.init(targetDepth);
    239            targetInfo.setJumpTarget(/* normallyReachable = */ false);
    240          }
    241        }
    242        break;
    243      }
    244 
    245      case JSOp::LoopHead:
    246        infos_[offset].loopHeadCanOsr = normallyReachable;
    247        break;
    248 
    249 #ifdef DEBUG
    250      case JSOp::Exception:
    251      case JSOp::ExceptionAndStack:
    252        // Sanity check: ops only emitted in catch blocks are never
    253        // normally reachable.
    254        MOZ_ASSERT(!normallyReachable);
    255        break;
    256 #endif
    257 
    258      case JSOp::Return:
    259      case JSOp::RetRval:
    260        if (normallyReachable) {
    261          normallyReachableReturn = true;
    262        }
    263        break;
    264 
    265      default:
    266        break;
    267    }
    268 
    269    bool jump = it.isJump();
    270    if (jump) {
    271      // Case instructions do not push the lvalue back when branching.
    272      uint32_t newStackDepth = stackDepth;
    273      if (it.is(JSOp::Case)) {
    274        newStackDepth--;
    275      }
    276 
    277      uint32_t targetOffset = it.getJumpTargetOffset(script_);
    278 
    279 #ifdef DEBUG
    280      // If this is a backedge, the target JSOp::LoopHead must have been
    281      // analyzed already. Furthermore, if the backedge is normally reachable,
    282      // the loop head must be normally reachable too (loopHeadCanOsr can be
    283      // used to check this since it's equivalent).
    284      if (targetOffset < offset) {
    285        MOZ_ASSERT(infos_[targetOffset].initialized);
    286        MOZ_ASSERT_IF(normallyReachable, infos_[targetOffset].loopHeadCanOsr);
    287      }
    288 #endif
    289 
    290      infos_[targetOffset].init(newStackDepth);
    291      infos_[targetOffset].setJumpTarget(normallyReachable);
    292    }
    293 
    294    // Handle any fallthrough from this opcode.
    295    if (it.fallsThrough()) {
    296      BytecodeLocation fallthroughLoc = it.next();
    297      MOZ_ASSERT(fallthroughLoc.isInBounds(script_));
    298      uint32_t fallthroughOffset = fallthroughLoc.bytecodeToOffset(script_);
    299 
    300      infos_[fallthroughOffset].init(stackDepth);
    301 
    302      // Treat the fallthrough of a branch instruction as a jump target.
    303      if (jump) {
    304        infos_[fallthroughOffset].setJumpTarget(normallyReachable);
    305      }
    306    }
    307  }
    308 
    309  // Flag (reachable) resume offset instructions.
    310  for (uint32_t offset : script_->resumeOffsets()) {
    311    BytecodeInfo& info = infos_[offset];
    312    if (info.initialized) {
    313      info.hasResumeOffset = true;
    314    }
    315  }
    316 
    317  if (!normallyReachableReturn) {
    318    disableInlining();
    319  }
    320 
    321  if (!analyzer.canIon()) {
    322    if (!ionDisabled()) {
    323      JitSpew(
    324          JitSpew_IonAbort,
    325          "Disabling Warp support for %s:%d:%d due to Yield being in a loop",
    326          script_->filename(), script_->lineno(),
    327          script_->column().oneOriginValue());
    328      disableIon();
    329    }
    330  }
    331 
    332  return true;
    333 }
    334 
    335 void BytecodeAnalysis::checkWarpSupport(JSOp op) {
    336  switch (op) {
    337 #define DEF_CASE(OP) case JSOp::OP:
    338    WARP_UNSUPPORTED_OPCODE_LIST(DEF_CASE)
    339 #undef DEF_CASE
    340    if (!ionDisabled()) {
    341      JitSpew(JitSpew_IonAbort, "Disabling Warp support for %s:%d:%d due to %s",
    342              script_->filename(), script_->lineno(),
    343              script_->column().oneOriginValue(), CodeName(op));
    344      disableIon();
    345    }
    346    break;
    347    default:
    348      break;
    349  }
    350 }
    351 
    352 bool js::jit::ScriptUsesEnvironmentChain(JSScript* script) {
    353  if (script->isModule() || script->initialEnvironmentShape() ||
    354      (script->function() &&
    355       script->function()->needsSomeEnvironmentObject())) {
    356    return true;
    357  }
    358 
    359  AllBytecodesIterable iterator(script);
    360 
    361  for (const BytecodeLocation& location : iterator) {
    362    if (OpUsesEnvironmentChain(location.getOp())) {
    363      return true;
    364    }
    365  }
    366 
    367  return false;
    368 }