CacheIR.h (19969B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * vim: set ts=8 sts=2 et sw=2 tw=80: 3 * This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #ifndef jit_CacheIR_h 8 #define jit_CacheIR_h 9 10 #include "mozilla/Assertions.h" 11 #include "mozilla/Attributes.h" 12 13 #include <stddef.h> 14 #include <stdint.h> 15 16 #include "jstypes.h" 17 18 #include "jit/CacheIROpsGenerated.h" 19 #include "js/GCAnnotations.h" 20 #include "js/Value.h" 21 22 struct JS_PUBLIC_API JSContext; 23 24 namespace js { 25 namespace jit { 26 27 // [SMDOC] CacheIR 28 // 29 // CacheIR is an (extremely simple) linear IR language for inline caches. 30 // From this IR, we can generate machine code for Baseline or Ion IC stubs. 31 // 32 // IRWriter 33 // -------- 34 // CacheIR bytecode is written using IRWriter. This class also records some 35 // metadata that's used by the Baseline and Ion code generators to generate 36 // (efficient) machine code. 37 // 38 // Sharing Baseline stub code 39 // -------------------------- 40 // Baseline stores data (like Shape* and fixed slot offsets) inside the ICStub 41 // structure, instead of embedding them directly in the JitCode. This makes 42 // Baseline IC code slightly slower, but allows us to share IC code between 43 // caches. CacheIR makes it easy to share code between stubs: stubs that have 44 // the same CacheIR (and CacheKind), will have the same Baseline stub code. 45 // 46 // Baseline stubs that share JitCode also share a CacheIRStubInfo structure. 47 // This class stores the CacheIR and the location of GC things stored in the 48 // stub, for the GC. 49 // 50 // JitZone has a CacheIRStubInfo* -> JitCode* weak map that's used to share both 51 // the IR and JitCode between Baseline CacheIR stubs. This HashMap owns the 52 // stubInfo (it uses UniquePtr), so once there are no references left to the 53 // shared stub code, we can also free the CacheIRStubInfo. 54 // 55 // Ion stubs 56 // --------- 57 // Unlike Baseline stubs, Ion stubs do not share stub code, and data stored in 58 // the IonICStub is baked into JIT code. This is one of the reasons Ion stubs 59 // are faster than Baseline stubs. Also note that Ion ICs contain more state 60 // (see IonGetPropertyIC for example) and use dynamic input/output registers, 61 // so sharing stub code for Ion would be much more difficult. 62 63 // An OperandId represents either a cache input or a value returned by a 64 // CacheIR instruction. Most code should use the ValOperandId and ObjOperandId 65 // classes below. The ObjOperandId class represents an operand that's known to 66 // be an object, just as StringOperandId represents a known string, etc. 67 class OperandId { 68 protected: 69 static const uint16_t InvalidId = UINT16_MAX; 70 uint16_t id_; 71 72 explicit OperandId(uint16_t id) : id_(id) {} 73 74 public: 75 OperandId() : id_(InvalidId) {} 76 uint16_t id() const { return id_; } 77 bool valid() const { return id_ != InvalidId; } 78 }; 79 80 class ValOperandId : public OperandId { 81 public: 82 ValOperandId() = default; 83 explicit ValOperandId(uint16_t id) : OperandId(id) {} 84 85 bool operator==(const ValOperandId& other) const { return id_ == other.id_; } 86 }; 87 88 class ValueTagOperandId : public OperandId { 89 public: 90 ValueTagOperandId() = default; 91 explicit ValueTagOperandId(uint16_t id) : OperandId(id) {} 92 }; 93 94 class IntPtrOperandId : public OperandId { 95 public: 96 IntPtrOperandId() = default; 97 explicit IntPtrOperandId(uint16_t id) : OperandId(id) {} 98 }; 99 100 class ObjOperandId : public OperandId { 101 public: 102 ObjOperandId() = default; 103 explicit ObjOperandId(uint16_t id) : OperandId(id) {} 104 105 bool operator==(const ObjOperandId& other) const { return id_ == other.id_; } 106 bool operator!=(const ObjOperandId& other) const { return id_ != other.id_; } 107 }; 108 109 class NumberOperandId : public ValOperandId { 110 public: 111 NumberOperandId() = default; 112 explicit NumberOperandId(uint16_t id) : ValOperandId(id) {} 113 }; 114 115 class StringOperandId : public OperandId { 116 public: 117 StringOperandId() = default; 118 explicit StringOperandId(uint16_t id) : OperandId(id) {} 119 }; 120 121 class SymbolOperandId : public OperandId { 122 public: 123 SymbolOperandId() = default; 124 explicit SymbolOperandId(uint16_t id) : OperandId(id) {} 125 }; 126 127 class BigIntOperandId : public OperandId { 128 public: 129 BigIntOperandId() = default; 130 explicit BigIntOperandId(uint16_t id) : OperandId(id) {} 131 }; 132 133 class BooleanOperandId : public OperandId { 134 public: 135 BooleanOperandId() = default; 136 explicit BooleanOperandId(uint16_t id) : OperandId(id) {} 137 }; 138 139 class Int32OperandId : public OperandId { 140 public: 141 Int32OperandId() = default; 142 explicit Int32OperandId(uint16_t id) : OperandId(id) {} 143 }; 144 145 class TypedOperandId : public OperandId { 146 JSValueType type_; 147 148 public: 149 MOZ_IMPLICIT TypedOperandId(ObjOperandId id) 150 : OperandId(id.id()), type_(JSVAL_TYPE_OBJECT) {} 151 MOZ_IMPLICIT TypedOperandId(StringOperandId id) 152 : OperandId(id.id()), type_(JSVAL_TYPE_STRING) {} 153 MOZ_IMPLICIT TypedOperandId(SymbolOperandId id) 154 : OperandId(id.id()), type_(JSVAL_TYPE_SYMBOL) {} 155 MOZ_IMPLICIT TypedOperandId(BigIntOperandId id) 156 : OperandId(id.id()), type_(JSVAL_TYPE_BIGINT) {} 157 MOZ_IMPLICIT TypedOperandId(BooleanOperandId id) 158 : OperandId(id.id()), type_(JSVAL_TYPE_BOOLEAN) {} 159 MOZ_IMPLICIT TypedOperandId(Int32OperandId id) 160 : OperandId(id.id()), type_(JSVAL_TYPE_INT32) {} 161 162 MOZ_IMPLICIT TypedOperandId(ValueTagOperandId val) 163 : OperandId(val.id()), type_(JSVAL_TYPE_UNKNOWN) {} 164 MOZ_IMPLICIT TypedOperandId(IntPtrOperandId id) 165 : OperandId(id.id()), type_(JSVAL_TYPE_UNKNOWN) {} 166 167 TypedOperandId(ValOperandId val, JSValueType type) 168 : OperandId(val.id()), type_(type) {} 169 170 JSValueType type() const { return type_; } 171 }; 172 173 #define CACHE_IR_KINDS(_) \ 174 _(GetProp) \ 175 _(GetElem) \ 176 _(GetName) \ 177 _(GetPropSuper) \ 178 _(GetElemSuper) \ 179 _(GetImport) \ 180 _(LazyConstant) \ 181 _(SetProp) \ 182 _(SetElem) \ 183 _(BindName) \ 184 _(In) \ 185 _(HasOwn) \ 186 _(CheckPrivateField) \ 187 _(TypeOf) \ 188 _(TypeOfEq) \ 189 _(ToPropertyKey) \ 190 _(InstanceOf) \ 191 _(GetIterator) \ 192 _(CloseIter) \ 193 _(OptimizeGetIterator) \ 194 _(OptimizeSpreadCall) \ 195 _(Compare) \ 196 _(ToBool) \ 197 _(Call) \ 198 _(UnaryArith) \ 199 _(BinaryArith) \ 200 _(NewObject) \ 201 _(NewArray) \ 202 _(Lambda) 203 204 enum class CacheKind : uint8_t { 205 #define DEFINE_KIND(kind) kind, 206 CACHE_IR_KINDS(DEFINE_KIND) 207 #undef DEFINE_KIND 208 }; 209 210 extern const char* const CacheKindNames[]; 211 212 extern size_t NumInputsForCacheKind(CacheKind kind); 213 214 enum class CacheOp : uint16_t { 215 #define DEFINE_OP(op, ...) op, 216 CACHE_IR_OPS(DEFINE_OP) 217 #undef DEFINE_OP 218 NumOpcodes, 219 }; 220 221 // CacheIR opcode info that's read in performance-sensitive code. Stored as a 222 // single byte per op for better cache locality. 223 struct CacheIROpInfo { 224 uint8_t argLength : 7; 225 bool transpile : 1; 226 }; 227 static_assert(sizeof(CacheIROpInfo) == 1); 228 extern const CacheIROpInfo CacheIROpInfos[]; 229 230 extern const char* const CacheIROpNames[]; 231 232 inline const char* CacheIRCodeName(CacheOp op) { 233 return CacheIROpNames[static_cast<size_t>(op)]; 234 } 235 236 extern const uint32_t CacheIROpHealth[]; 237 238 class StubField { 239 public: 240 enum class Type : uint8_t { 241 // These fields take up a single word. 242 RawInt32, 243 RawPointer, 244 Shape, 245 WeakShape, 246 JSObject, 247 WeakObject, 248 Symbol, 249 String, 250 WeakBaseScript, 251 JitCode, 252 253 Id, 254 AllocSite, 255 256 // These fields take up 64 bits on all platforms. 257 RawInt64, 258 First64BitType = RawInt64, 259 Value, 260 WeakValue, 261 Double, 262 263 Limit 264 }; 265 266 static bool sizeIsWord(Type type) { 267 MOZ_ASSERT(type != Type::Limit); 268 return type < Type::First64BitType; 269 } 270 271 static bool sizeIsInt64(Type type) { 272 MOZ_ASSERT(type != Type::Limit); 273 return type >= Type::First64BitType; 274 } 275 276 static size_t sizeInBytes(Type type) { 277 if (sizeIsWord(type)) { 278 return sizeof(uintptr_t); 279 } 280 MOZ_ASSERT(sizeIsInt64(type)); 281 return sizeof(int64_t); 282 } 283 284 private: 285 uint64_t data_; 286 Type type_; 287 288 public: 289 StubField(uint64_t data, Type type) : data_(data), type_(type) { 290 MOZ_ASSERT_IF(sizeIsWord(), data <= UINTPTR_MAX); 291 } 292 293 Type type() const { return type_; } 294 295 bool sizeIsWord() const { return sizeIsWord(type_); } 296 bool sizeIsInt64() const { return sizeIsInt64(type_); } 297 298 size_t sizeInBytes() const { return sizeInBytes(type_); } 299 300 uintptr_t asWord() const { 301 MOZ_ASSERT(sizeIsWord()); 302 return uintptr_t(data_); 303 } 304 uint64_t asInt64() const { 305 MOZ_ASSERT(sizeIsInt64()); 306 return data_; 307 } 308 uint64_t rawData() const { return data_; } 309 } JS_HAZ_GC_POINTER; 310 311 inline const char* StubFieldTypeName(StubField::Type ty) { 312 switch (ty) { 313 case StubField::Type::RawInt32: 314 return "RawInt32"; 315 case StubField::Type::RawPointer: 316 return "RawPointer"; 317 case StubField::Type::Shape: 318 return "Shape"; 319 case StubField::Type::WeakShape: 320 return "WeakShape"; 321 case StubField::Type::JSObject: 322 return "JSObject"; 323 case StubField::Type::WeakObject: 324 return "WeakObject"; 325 case StubField::Type::Symbol: 326 return "Symbol"; 327 case StubField::Type::String: 328 return "String"; 329 case StubField::Type::WeakBaseScript: 330 return "WeakBaseScript"; 331 case StubField::Type::JitCode: 332 return "JitCode"; 333 case StubField::Type::Id: 334 return "Id"; 335 case StubField::Type::AllocSite: 336 return "AllocSite"; 337 case StubField::Type::RawInt64: 338 return "RawInt64"; 339 case StubField::Type::Value: 340 return "Value"; 341 case StubField::Type::WeakValue: 342 return "WeakValue"; 343 case StubField::Type::Double: 344 return "Double"; 345 case StubField::Type::Limit: 346 return "Limit"; 347 } 348 MOZ_CRASH("Unknown StubField::Type"); 349 } 350 351 // This class is used to wrap up information about a call to make it 352 // easier to convey from one function to another. (In particular, 353 // CacheIRWriter encodes the CallFlags in CacheIR, and CacheIRReader 354 // decodes them and uses them for compilation.) 355 class CallFlags { 356 public: 357 enum ArgFormat : uint8_t { 358 Unknown, 359 Standard, 360 Spread, 361 FunCall, 362 FunApplyArgsObj, 363 FunApplyArray, 364 FunApplyNullUndefined, 365 LastArgFormat = FunApplyNullUndefined 366 }; 367 368 CallFlags() = default; 369 explicit CallFlags(ArgFormat format) : argFormat_(format) {} 370 CallFlags(ArgFormat format, bool isConstructing, bool isSameRealm, 371 bool needsUninitializedThis) 372 : argFormat_(format), 373 isConstructing_(isConstructing), 374 isSameRealm_(isSameRealm), 375 needsUninitializedThis_(needsUninitializedThis) {} 376 CallFlags(bool isConstructing, bool isSpread, bool isSameRealm = false, 377 bool needsUninitializedThis = false) 378 : argFormat_(isSpread ? Spread : Standard), 379 isConstructing_(isConstructing), 380 isSameRealm_(isSameRealm), 381 needsUninitializedThis_(needsUninitializedThis) {} 382 383 ArgFormat getArgFormat() const { return argFormat_; } 384 bool isConstructing() const { 385 MOZ_ASSERT_IF(isConstructing_, 386 argFormat_ == Standard || argFormat_ == Spread); 387 return isConstructing_; 388 } 389 bool isSameRealm() const { return isSameRealm_; } 390 void setIsSameRealm() { isSameRealm_ = true; } 391 392 bool needsUninitializedThis() const { return needsUninitializedThis_; } 393 void setNeedsUninitializedThis() { needsUninitializedThis_ = true; } 394 395 uint8_t toByte() const { 396 // See CacheIRReader::callFlags() 397 MOZ_ASSERT(argFormat_ != ArgFormat::Unknown); 398 uint8_t value = getArgFormat(); 399 if (isConstructing()) { 400 value |= CallFlags::IsConstructing; 401 } 402 if (isSameRealm()) { 403 value |= CallFlags::IsSameRealm; 404 } 405 if (needsUninitializedThis()) { 406 value |= CallFlags::NeedsUninitializedThis; 407 } 408 return value; 409 } 410 411 private: 412 ArgFormat argFormat_ = ArgFormat::Unknown; 413 bool isConstructing_ = false; 414 bool isSameRealm_ = false; 415 bool needsUninitializedThis_ = false; 416 417 // Used for encoding/decoding 418 static const uint8_t ArgFormatBits = 4; 419 static const uint8_t ArgFormatMask = (1 << ArgFormatBits) - 1; 420 static_assert(LastArgFormat <= ArgFormatMask, "Not enough arg format bits"); 421 static const uint8_t IsConstructing = 1 << 5; 422 static const uint8_t IsSameRealm = 1 << 6; 423 static const uint8_t NeedsUninitializedThis = 1 << 7; 424 425 friend class CacheIRReader; 426 friend class CacheIRWriter; 427 }; 428 429 // In baseline, we have to copy args onto the stack. Below this threshold, we 430 // will unroll the arg copy loop. We need to clamp this before providing it as 431 // an arg to a CacheIR op so that everything 5 or greater can share an IC. 432 const uint32_t MaxUnrolledArgCopy = 5; 433 inline uint32_t ClampFixedArgc(uint32_t argc) { 434 return std::min(argc, MaxUnrolledArgCopy); 435 } 436 437 enum class AttachDecision { 438 // We cannot attach a stub. 439 NoAction, 440 441 // We can attach a stub. 442 Attach, 443 444 // We cannot currently attach a stub, but we expect to be able to do so in the 445 // future. In this case, we do not call trackNotAttached(). 446 TemporarilyUnoptimizable, 447 448 // We want to attach a stub, but the result of the operation is 449 // needed to generate that stub. For example, AddSlot needs to know 450 // the resulting shape. Note: the attached stub will inspect the 451 // inputs to the operation, so most input checks should be done 452 // before the actual operation, with only minimal checks remaining 453 // for the deferred portion. This prevents arbitrary scripted code 454 // run by the operation from interfering with the conditions being 455 // checked. 456 Deferred 457 }; 458 459 // If the input expression evaluates to an AttachDecision other than NoAction, 460 // return that AttachDecision. If it is NoAction, do nothing. 461 #define TRY_ATTACH(expr) \ 462 do { \ 463 AttachDecision tryAttachTempResult_ = expr; \ 464 if (tryAttachTempResult_ != AttachDecision::NoAction) { \ 465 return tryAttachTempResult_; \ 466 } \ 467 } while (0) 468 469 // Set of arguments supported by GetIndexOfArgument. 470 // Support for higher argument indices can be added easily, but is currently 471 // unneeded. 472 enum class ArgumentKind : uint8_t { 473 Callee, 474 This, 475 NewTarget, 476 Arg0, 477 Arg1, 478 Arg2, 479 Arg3, 480 Arg4, 481 Arg5, 482 Arg6, 483 Arg7, 484 NumKinds 485 }; 486 487 const uint8_t ArgumentKindArgIndexLimit = 488 uint8_t(ArgumentKind::NumKinds) - uint8_t(ArgumentKind::Arg0); 489 490 inline ArgumentKind ArgumentKindForArgIndex(uint32_t idx) { 491 MOZ_ASSERT(idx < ArgumentKindArgIndexLimit); 492 return ArgumentKind(uint32_t(ArgumentKind::Arg0) + idx); 493 } 494 495 // This function calculates the index of an argument based on the call flags. 496 // addArgc is an out-parameter, indicating whether the value of argc should 497 // be added to the return value to find the actual index. 498 inline int32_t GetIndexOfArgument(ArgumentKind kind, CallFlags flags, 499 bool* addArgc) { 500 // *** STACK LAYOUT (bottom to top) *** ******** INDEX ******** 501 // Callee <-- argc+1 + isConstructing 502 // ThisValue <-- argc + isConstructing 503 // Args: | Arg0 | | ArgArray | <-- argc-1 + isConstructing 504 // | Arg1 | --or-- | | <-- argc-2 + isConstructing 505 // | ... | | (if spread | <-- ... 506 // | ArgN | | call) | <-- 0 + isConstructing 507 // NewTarget (only if constructing) <-- 0 (if it exists) 508 // 509 // If this is a spread call, then argc is always 1, and we can calculate the 510 // index directly. If this is not a spread call, then the index of any 511 // argument other than NewTarget depends on argc. 512 513 // First we determine whether the caller needs to add argc. 514 switch (flags.getArgFormat()) { 515 case CallFlags::Standard: 516 *addArgc = true; 517 break; 518 case CallFlags::Spread: 519 // Spread calls do not have Arg1 or higher. 520 MOZ_ASSERT(kind <= ArgumentKind::Arg0); 521 *addArgc = false; 522 break; 523 case CallFlags::Unknown: 524 case CallFlags::FunCall: 525 case CallFlags::FunApplyArgsObj: 526 case CallFlags::FunApplyArray: 527 case CallFlags::FunApplyNullUndefined: 528 MOZ_CRASH("Currently unreachable"); 529 break; 530 } 531 532 // Second, we determine the offset relative to argc. 533 bool hasArgumentArray = !*addArgc; 534 switch (kind) { 535 case ArgumentKind::Callee: 536 return flags.isConstructing() + hasArgumentArray + 1; 537 case ArgumentKind::This: 538 return flags.isConstructing() + hasArgumentArray; 539 case ArgumentKind::Arg0: 540 return flags.isConstructing() + hasArgumentArray - 1; 541 case ArgumentKind::Arg1: 542 return flags.isConstructing() + hasArgumentArray - 2; 543 case ArgumentKind::Arg2: 544 return flags.isConstructing() + hasArgumentArray - 3; 545 case ArgumentKind::Arg3: 546 return flags.isConstructing() + hasArgumentArray - 4; 547 case ArgumentKind::Arg4: 548 return flags.isConstructing() + hasArgumentArray - 5; 549 case ArgumentKind::Arg5: 550 return flags.isConstructing() + hasArgumentArray - 6; 551 case ArgumentKind::Arg6: 552 return flags.isConstructing() + hasArgumentArray - 7; 553 case ArgumentKind::Arg7: 554 return flags.isConstructing() + hasArgumentArray - 8; 555 case ArgumentKind::NewTarget: 556 MOZ_ASSERT(flags.isConstructing()); 557 *addArgc = false; 558 return 0; 559 default: 560 MOZ_CRASH("Invalid argument kind"); 561 } 562 } 563 564 // We use this enum as GuardClass operand, instead of storing Class* pointers 565 // in the IR, to keep the IR compact and the same size on all platforms. 566 enum class GuardClassKind : uint8_t { 567 Array, 568 PlainObject, 569 FixedLengthArrayBuffer, 570 ImmutableArrayBuffer, 571 ResizableArrayBuffer, 572 FixedLengthSharedArrayBuffer, 573 GrowableSharedArrayBuffer, 574 FixedLengthDataView, 575 ImmutableDataView, 576 ResizableDataView, 577 MappedArguments, 578 UnmappedArguments, 579 WindowProxy, 580 JSFunction, 581 BoundFunction, 582 Set, 583 Map, 584 Date, 585 WeakMap, 586 WeakSet, 587 }; 588 589 const JSClass* ClassFor(GuardClassKind kind); 590 591 enum class ArrayBufferViewKind : uint8_t { 592 FixedLength, 593 Immutable, 594 Resizable, 595 }; 596 597 inline const char* GuardClassKindEnumName(GuardClassKind kind) { 598 switch (kind) { 599 case GuardClassKind::Array: 600 return "Array"; 601 case GuardClassKind::PlainObject: 602 return "PlainObject"; 603 case GuardClassKind::FixedLengthArrayBuffer: 604 return "FixedLengthArrayBuffer"; 605 case GuardClassKind::ImmutableArrayBuffer: 606 return "ImmutableArrayBuffer"; 607 case GuardClassKind::ResizableArrayBuffer: 608 return "ResizableArrayBuffer"; 609 case GuardClassKind::FixedLengthSharedArrayBuffer: 610 return "FixedLengthSharedArrayBuffer"; 611 case GuardClassKind::GrowableSharedArrayBuffer: 612 return "GrowableSharedArrayBuffer"; 613 case GuardClassKind::FixedLengthDataView: 614 return "FixedLengthDataView"; 615 case GuardClassKind::ImmutableDataView: 616 return "ImmutableDataView"; 617 case GuardClassKind::ResizableDataView: 618 return "ResizableDataView"; 619 case GuardClassKind::MappedArguments: 620 return "MappedArguments"; 621 case GuardClassKind::UnmappedArguments: 622 return "UnmappedArguments"; 623 case GuardClassKind::WindowProxy: 624 return "WindowProxy"; 625 case GuardClassKind::JSFunction: 626 return "JSFunction"; 627 case GuardClassKind::BoundFunction: 628 return "BoundFunction"; 629 case GuardClassKind::Set: 630 return "Set"; 631 case GuardClassKind::Map: 632 return "Map"; 633 case GuardClassKind::Date: 634 return "Date"; 635 case GuardClassKind::WeakMap: 636 return "WeakMap"; 637 case GuardClassKind::WeakSet: 638 return "WeakSet"; 639 } 640 MOZ_CRASH("Unknown GuardClassKind"); 641 } 642 643 } // namespace jit 644 } // namespace js 645 646 #endif /* jit_CacheIR_h */