Snapshots.cpp (23937B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * vim: set ts=8 sts=2 et sw=2 tw=80: 3 * This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #include "jit/Snapshots.h" 8 9 #include "jit/JitSpewer.h" 10 #ifdef TRACK_SNAPSHOTS 11 # include "jit/LIR.h" 12 #endif 13 #include "jit/MIR-wasm.h" 14 #include "jit/MIR.h" 15 #include "jit/Recover.h" 16 #include "js/Printer.h" 17 18 using namespace js; 19 using namespace js::jit; 20 21 // [SMDOC] IonMonkey Snapshot encoding 22 // 23 // Encodings: 24 // [ptr] A fixed-size pointer. 25 // [vwu] A variable-width unsigned integer. 26 // [vws] A variable-width signed integer. 27 // [u8] An 8-bit unsigned integer. 28 // [u8'] An 8-bit unsigned integer which is potentially extended with packed 29 // data. 30 // [u8"] Packed data which is stored and packed in the previous [u8']. 31 // [vwu*] A list of variable-width unsigned integers. 32 // [pld] Payload of Recover Value Allocation: 33 // PAYLOAD_NONE: 34 // There is no payload. 35 // 36 // PAYLOAD_INDEX: 37 // [vwu] Index, such as the constant pool index. 38 // 39 // PAYLOAD_STACK_OFFSET: 40 // [vws] Stack offset based on the base of the Ion frame. 41 // 42 // PAYLOAD_GPR: 43 // [u8] Code of the general register. 44 // 45 // PAYLOAD_FPU: 46 // [u8] Code of the FPU register. 47 // 48 // PAYLOAD_PACKED_TAG: 49 // [u8"] Bits 5-7: JSValueType is encoded on the low bits of the Mode 50 // of the RValueAllocation. 51 // 52 // Snapshot header: 53 // 54 // [vwu] bits ((n+1)-31]: recover instruction offset 55 // bits [0,n): bailout kind (n = SNAPSHOT_BAILOUTKIND_BITS) 56 // 57 // Snapshot body, repeated "frame count" times, from oldest frame to newest 58 // frame. Note that the first frame doesn't have the "parent PC" field. 59 // 60 // [ptr] Debug only: JSScript* 61 // [vwu] pc offset 62 // [vwu] # of RVA's indexes, including nargs 63 // [vwu*] List of indexes to R(ecover)ValueAllocation table. Contains 64 // nargs + nfixed + stackDepth items. 65 // 66 // Recover value allocations are encoded at the end of the Snapshot buffer, and 67 // they are padded on ALLOCATION_TABLE_ALIGNMENT. The encoding of each 68 // allocation is determined by the RValueAllocation::Layout, which can be 69 // obtained from the RValueAllocation::Mode with layoutFromMode function. The 70 // layout structure list the type of payload which are used to serialized / 71 // deserialized / dumped the content of the allocations. 72 // 73 // R(ecover)ValueAllocation items: 74 // [u8'] Mode, which defines the type of the payload as well as the 75 // interpretation. 76 // [pld] first payload (packed tag, index, stack offset, register, ...) 77 // [pld] second payload (register, stack offset, none) 78 // 79 // Modes: 80 // CONSTANT [INDEX] 81 // Index into the constant pool. 82 // 83 // CST_UNDEFINED [] 84 // Constant value which correspond to the "undefined" JS value. 85 // 86 // CST_NULL [] 87 // Constant value which correspond to the "null" JS value. 88 // 89 // DOUBLE_REG [FPU_REG] 90 // Double value stored in a FPU register. 91 // 92 // FLOAT32_REG [FPU_REG] 93 // Float32 value stored in a FPU register. 94 // 95 // FLOAT32_STACK [STACK_OFFSET] 96 // Float32 value stored on the stack. 97 // 98 // UNTYPED_REG [GPR_REG] 99 // UNTYPED_STACK [STACK_OFFSET] 100 // UNTYPED_REG_REG [GPR_REG, GPR_REG] 101 // UNTYPED_REG_STACK [GPR_REG, STACK_OFFSET] 102 // UNTYPED_STACK_REG [STACK_OFFSET, GPR_REG] 103 // UNTYPED_STACK_STACK [STACK_OFFSET, STACK_OFFSET] 104 // Value with dynamically known type. On 32 bits architecture, the 105 // first register/stack-offset correspond to the holder of the type, 106 // and the second correspond to the payload of the JS Value. 107 // 108 // RECOVER_INSTRUCTION [INDEX] 109 // Index into the list of recovered instruction results. 110 // 111 // RI_WITH_DEFAULT_CST [INDEX] [INDEX] 112 // The first payload is the index into the list of recovered 113 // instruction results. The second payload is the index in the 114 // constant pool. 115 // 116 // INTPTR_CST [INDEX]: (32-bit platform) 117 // INTPTR_CST [INDEX] [INDEX]: (64-bit platform) 118 // Unpacked IntPtr value stored in intptr_t. Split into either one or 119 // two int32_t values, whose indices into the constant pool is stored 120 // in the payloads. 121 // 122 // INTPTR_REG [GPR_REG]: 123 // Unpacked IntPtr value stored in intptr_t. Payload is stored in a 124 // register. 125 // 126 // INTPTR_STACK [STACK_OFFSET]: 127 // Unpacked IntPtr value stored in intptr_t. Payload is stored at an 128 // offset on the stack. 129 // 130 // INTPTR_INT32_STACK [STACK_OFFSET]: 131 // Unpacked IntPtr value stored in int32_t. Payload is stored at an 132 // offset on the stack. 133 // 134 // TYPED_REG [PACKED_TAG, GPR_REG]: 135 // Value with statically known type, which payload is stored in a 136 // register. 137 // 138 // TYPED_STACK [PACKED_TAG, STACK_OFFSET]: 139 // Value with statically known type, which payload is stored at an 140 // offset on the stack. 141 // 142 // INT64_CST [INDEX] [INDEX]: 143 // Unpacked Int64 value stored in int64_t. Split into two int32_t 144 // values, whose indices into the constant pool is stored in the 145 // payloads. 146 // 147 // INT64_REG [GPR_REG]: 148 // INT64_STACK [STACK_OFFSET]: 149 // INT64_REG_REG [GPR_REG, GPR_REG] 150 // INT64_REG_STACK [GPR_REG, STACK_OFFSET] 151 // INT64_STACK_REG [STACK_OFFSET, GPR_REG] 152 // INT64_STACK_STACK [STACK_OFFSET, STACK_OFFSET] 153 // Unpacked Int64 value. On 32 bits architecture, the first 154 // register/stack-offset correspond to the low 32-bits, and the 155 // second correspond to the high 32-bits. 156 // 157 158 const RValueAllocation::Layout& RValueAllocation::layoutFromMode(Mode mode) { 159 switch (mode) { 160 case CONSTANT: { 161 static const RValueAllocation::Layout layout = {PAYLOAD_INDEX, 162 PAYLOAD_NONE, "constant"}; 163 return layout; 164 } 165 166 case CST_UNDEFINED: { 167 static const RValueAllocation::Layout layout = { 168 PAYLOAD_NONE, PAYLOAD_NONE, "undefined"}; 169 return layout; 170 } 171 172 case CST_NULL: { 173 static const RValueAllocation::Layout layout = {PAYLOAD_NONE, 174 PAYLOAD_NONE, "null"}; 175 return layout; 176 } 177 178 case DOUBLE_REG: { 179 static const RValueAllocation::Layout layout = {PAYLOAD_FPU, PAYLOAD_NONE, 180 "double"}; 181 return layout; 182 } 183 case FLOAT32_REG: { 184 static const RValueAllocation::Layout layout = {PAYLOAD_FPU, PAYLOAD_NONE, 185 "float32"}; 186 return layout; 187 } 188 case FLOAT32_STACK: { 189 static const RValueAllocation::Layout layout = {PAYLOAD_STACK_OFFSET, 190 PAYLOAD_NONE, "float32"}; 191 return layout; 192 } 193 #if defined(JS_NUNBOX32) 194 case UNTYPED_REG_REG: { 195 static const RValueAllocation::Layout layout = {PAYLOAD_GPR, PAYLOAD_GPR, 196 "value"}; 197 return layout; 198 } 199 case UNTYPED_REG_STACK: { 200 static const RValueAllocation::Layout layout = { 201 PAYLOAD_GPR, PAYLOAD_STACK_OFFSET, "value"}; 202 return layout; 203 } 204 case UNTYPED_STACK_REG: { 205 static const RValueAllocation::Layout layout = {PAYLOAD_STACK_OFFSET, 206 PAYLOAD_GPR, "value"}; 207 return layout; 208 } 209 case UNTYPED_STACK_STACK: { 210 static const RValueAllocation::Layout layout = { 211 PAYLOAD_STACK_OFFSET, PAYLOAD_STACK_OFFSET, "value"}; 212 return layout; 213 } 214 #elif defined(JS_PUNBOX64) 215 case UNTYPED_REG: { 216 static const RValueAllocation::Layout layout = {PAYLOAD_GPR, PAYLOAD_NONE, 217 "value"}; 218 return layout; 219 } 220 case UNTYPED_STACK: { 221 static const RValueAllocation::Layout layout = {PAYLOAD_STACK_OFFSET, 222 PAYLOAD_NONE, "value"}; 223 return layout; 224 } 225 #endif 226 case RECOVER_INSTRUCTION: { 227 static const RValueAllocation::Layout layout = { 228 PAYLOAD_INDEX, PAYLOAD_NONE, "instruction"}; 229 return layout; 230 } 231 case RI_WITH_DEFAULT_CST: { 232 static const RValueAllocation::Layout layout = { 233 PAYLOAD_INDEX, PAYLOAD_INDEX, "instruction with default"}; 234 return layout; 235 } 236 237 case INTPTR_CST: { 238 #if !defined(JS_64BIT) 239 static const RValueAllocation::Layout layout = { 240 PAYLOAD_INDEX, PAYLOAD_NONE, "unpacked intptr constant"}; 241 static_assert(sizeof(int32_t) == sizeof(intptr_t)); 242 #else 243 static const RValueAllocation::Layout layout = { 244 PAYLOAD_INDEX, PAYLOAD_INDEX, "unpacked intptr constant"}; 245 static_assert(2 * sizeof(int32_t) == sizeof(intptr_t)); 246 #endif 247 return layout; 248 } 249 250 case INTPTR_REG: { 251 static const RValueAllocation::Layout layout = {PAYLOAD_GPR, PAYLOAD_NONE, 252 "unpacked intptr"}; 253 return layout; 254 } 255 256 case INTPTR_STACK: { 257 static const RValueAllocation::Layout layout = { 258 PAYLOAD_STACK_OFFSET, PAYLOAD_NONE, "unpacked intptr"}; 259 return layout; 260 } 261 262 case INTPTR_INT32_STACK: { 263 static const RValueAllocation::Layout layout = { 264 PAYLOAD_STACK_OFFSET, PAYLOAD_NONE, "unpacked intptr (int32)"}; 265 return layout; 266 } 267 268 case INT64_CST: { 269 static const RValueAllocation::Layout layout = { 270 PAYLOAD_INDEX, PAYLOAD_INDEX, "unpacked int64 constant"}; 271 static_assert(2 * sizeof(int32_t) == sizeof(int64_t)); 272 return layout; 273 } 274 275 #if defined(JS_NUNBOX32) 276 case INT64_REG_REG: { 277 static const RValueAllocation::Layout layout = {PAYLOAD_GPR, PAYLOAD_GPR, 278 "unpacked int64"}; 279 return layout; 280 } 281 282 case INT64_REG_STACK: { 283 static const RValueAllocation::Layout layout = { 284 PAYLOAD_GPR, PAYLOAD_STACK_OFFSET, "unpacked int64"}; 285 return layout; 286 } 287 288 case INT64_STACK_REG: { 289 static const RValueAllocation::Layout layout = { 290 PAYLOAD_STACK_OFFSET, PAYLOAD_GPR, "unpacked int64"}; 291 return layout; 292 } 293 294 case INT64_STACK_STACK: { 295 static const RValueAllocation::Layout layout = { 296 PAYLOAD_STACK_OFFSET, PAYLOAD_STACK_OFFSET, "unpacked int64"}; 297 return layout; 298 } 299 #elif defined(JS_PUNBOX64) 300 case INT64_REG: { 301 static const RValueAllocation::Layout layout = {PAYLOAD_GPR, PAYLOAD_NONE, 302 "unpacked int64"}; 303 return layout; 304 } 305 306 case INT64_STACK: { 307 static const RValueAllocation::Layout layout = { 308 PAYLOAD_STACK_OFFSET, PAYLOAD_NONE, "unpacked int64"}; 309 return layout; 310 } 311 #endif 312 313 default: { 314 static const RValueAllocation::Layout regLayout = { 315 PAYLOAD_PACKED_TAG, PAYLOAD_GPR, "typed value"}; 316 317 static const RValueAllocation::Layout stackLayout = { 318 PAYLOAD_PACKED_TAG, PAYLOAD_STACK_OFFSET, "typed value"}; 319 320 if (mode >= TYPED_REG_MIN && mode <= TYPED_REG_MAX) { 321 return regLayout; 322 } 323 if (mode >= TYPED_STACK_MIN && mode <= TYPED_STACK_MAX) { 324 return stackLayout; 325 } 326 } 327 } 328 329 MOZ_CRASH_UNSAFE_PRINTF("Unexpected mode: 0x%x", uint32_t(mode)); 330 } 331 332 // Pad serialized RValueAllocations by a multiple of X bytes in the allocation 333 // buffer. By padding serialized value allocations, we are building an 334 // indexable table of elements of X bytes, and thus we can safely divide any 335 // offset within the buffer by X to obtain an index. 336 // 337 // By padding, we are loosing space within the allocation buffer, but we 338 // multiple by X the number of indexes that we can store on one byte in each 339 // snapshots. 340 // 341 // Some value allocations are taking more than X bytes to be encoded, in which 342 // case we will pad to a multiple of X, and we are wasting indexes. The choice 343 // of X should be balanced between the wasted padding of serialized value 344 // allocation, and the saving made in snapshot indexes. 345 static const size_t ALLOCATION_TABLE_ALIGNMENT = 2; /* bytes */ 346 347 void RValueAllocation::readPayload(CompactBufferReader& reader, 348 PayloadType type, uint8_t* mode, 349 Payload* p) { 350 switch (type) { 351 case PAYLOAD_NONE: 352 break; 353 case PAYLOAD_INDEX: 354 p->index = reader.readUnsigned(); 355 break; 356 case PAYLOAD_STACK_OFFSET: 357 p->stackOffset = reader.readSigned(); 358 break; 359 case PAYLOAD_GPR: 360 p->gpr = Register::FromCode(reader.readByte()); 361 break; 362 case PAYLOAD_FPU: 363 p->fpu.data = reader.readByte(); 364 break; 365 case PAYLOAD_PACKED_TAG: 366 p->type = JSValueType(*mode & PACKED_TAG_MASK); 367 *mode = *mode & ~PACKED_TAG_MASK; 368 break; 369 } 370 } 371 372 RValueAllocation RValueAllocation::read(CompactBufferReader& reader) { 373 uint8_t mode = reader.readByte(); 374 const Layout& layout = layoutFromMode(Mode(mode & MODE_BITS_MASK)); 375 Payload arg1, arg2; 376 377 readPayload(reader, layout.type1, &mode, &arg1); 378 readPayload(reader, layout.type2, &mode, &arg2); 379 return RValueAllocation(Mode(mode), arg1, arg2); 380 } 381 382 void RValueAllocation::writePayload(CompactBufferWriter& writer, 383 PayloadType type, Payload p) { 384 switch (type) { 385 case PAYLOAD_NONE: 386 break; 387 case PAYLOAD_INDEX: 388 writer.writeUnsigned(p.index); 389 break; 390 case PAYLOAD_STACK_OFFSET: 391 writer.writeSigned(p.stackOffset); 392 break; 393 case PAYLOAD_GPR: 394 static_assert(Registers::Total <= 0x100, 395 "Not enough bytes to encode all registers."); 396 writer.writeByte(p.gpr.code()); 397 break; 398 case PAYLOAD_FPU: 399 static_assert(FloatRegisters::Total <= 0x100, 400 "Not enough bytes to encode all float registers."); 401 writer.writeByte(p.fpu.code()); 402 break; 403 case PAYLOAD_PACKED_TAG: { 404 // This code assumes that the PACKED_TAG payload is following the 405 // writeByte of the mode. 406 if (!writer.oom()) { 407 MOZ_ASSERT(writer.length()); 408 uint8_t* mode = writer.buffer() + (writer.length() - 1); 409 MOZ_ASSERT((*mode & PACKED_TAG_MASK) == 0 && 410 (p.type & ~PACKED_TAG_MASK) == 0); 411 *mode = *mode | p.type; 412 } 413 break; 414 } 415 } 416 } 417 418 void RValueAllocation::writePadding(CompactBufferWriter& writer) { 419 // Write 0x7f in all padding bytes. 420 while (writer.length() % ALLOCATION_TABLE_ALIGNMENT) { 421 writer.writeByte(0x7f); 422 } 423 } 424 425 void RValueAllocation::write(CompactBufferWriter& writer) const { 426 const Layout& layout = layoutFromMode(mode()); 427 MOZ_ASSERT(layout.type2 != PAYLOAD_PACKED_TAG); 428 MOZ_ASSERT(writer.length() % ALLOCATION_TABLE_ALIGNMENT == 0); 429 430 writer.writeByte(mode_); 431 writePayload(writer, layout.type1, arg1_); 432 writePayload(writer, layout.type2, arg2_); 433 writePadding(writer); 434 } 435 436 HashNumber RValueAllocation::hash() const { 437 HashNumber res = 0; 438 res = HashNumber(mode_); 439 res = arg1_.index + (res << 6) + (res << 16) - res; 440 res = arg2_.index + (res << 6) + (res << 16) - res; 441 return res; 442 } 443 444 #ifdef JS_JITSPEW 445 void RValueAllocation::dumpPayload(GenericPrinter& out, PayloadType type, 446 Payload p) { 447 switch (type) { 448 case PAYLOAD_NONE: 449 break; 450 case PAYLOAD_INDEX: 451 out.printf("index %u", p.index); 452 break; 453 case PAYLOAD_STACK_OFFSET: 454 out.printf("stack %d", p.stackOffset); 455 break; 456 case PAYLOAD_GPR: 457 out.printf("reg %s", p.gpr.name()); 458 break; 459 case PAYLOAD_FPU: 460 out.printf("reg %s", p.fpu.name()); 461 break; 462 case PAYLOAD_PACKED_TAG: 463 out.printf("%s", ValTypeToString(p.type)); 464 break; 465 } 466 } 467 468 void RValueAllocation::dump(GenericPrinter& out) const { 469 const Layout& layout = layoutFromMode(mode()); 470 out.printf("%s", layout.name); 471 472 if (layout.type1 != PAYLOAD_NONE) { 473 out.printf(" ("); 474 } 475 dumpPayload(out, layout.type1, arg1_); 476 if (layout.type2 != PAYLOAD_NONE) { 477 out.printf(", "); 478 } 479 dumpPayload(out, layout.type2, arg2_); 480 if (layout.type1 != PAYLOAD_NONE) { 481 out.printf(")"); 482 } 483 } 484 #endif // JS_JITSPEW 485 486 SnapshotReader::SnapshotReader(const uint8_t* snapshots, uint32_t offset, 487 uint32_t RVATableSize, uint32_t listSize) 488 : reader_(snapshots + offset, snapshots + listSize), 489 allocReader_(snapshots + listSize, snapshots + listSize + RVATableSize), 490 allocTable_(snapshots + listSize), 491 allocRead_(0) { 492 if (!snapshots) { 493 return; 494 } 495 JitSpew(JitSpew_IonSnapshots, "Creating snapshot reader"); 496 readSnapshotHeader(); 497 } 498 499 #define COMPUTE_SHIFT_AFTER_(name) (name##_BITS + name##_SHIFT) 500 #define COMPUTE_MASK_(name) ((uint32_t(1 << name##_BITS) - 1) << name##_SHIFT) 501 502 // Details of snapshot header packing. 503 static const uint32_t SNAPSHOT_BAILOUTKIND_SHIFT = 0; 504 static const uint32_t SNAPSHOT_BAILOUTKIND_BITS = 6; 505 static const uint32_t SNAPSHOT_BAILOUTKIND_MASK = 506 COMPUTE_MASK_(SNAPSHOT_BAILOUTKIND); 507 508 static_assert((1 << SNAPSHOT_BAILOUTKIND_BITS) - 1 >= 509 uint8_t(BailoutKind::Limit), 510 "Not enough bits for BailoutKinds"); 511 512 static const uint32_t SNAPSHOT_ROFFSET_SHIFT = 513 COMPUTE_SHIFT_AFTER_(SNAPSHOT_BAILOUTKIND); 514 static const uint32_t SNAPSHOT_ROFFSET_BITS = 32 - SNAPSHOT_ROFFSET_SHIFT; 515 static const uint32_t SNAPSHOT_ROFFSET_MASK = COMPUTE_MASK_(SNAPSHOT_ROFFSET); 516 517 #undef COMPUTE_MASK_ 518 #undef COMPUTE_SHIFT_AFTER_ 519 520 void SnapshotReader::readSnapshotHeader() { 521 uint32_t bits = reader_.readUnsigned(); 522 523 bailoutKind_ = BailoutKind((bits & SNAPSHOT_BAILOUTKIND_MASK) >> 524 SNAPSHOT_BAILOUTKIND_SHIFT); 525 recoverOffset_ = (bits & SNAPSHOT_ROFFSET_MASK) >> SNAPSHOT_ROFFSET_SHIFT; 526 527 JitSpew(JitSpew_IonSnapshots, "Read snapshot header with bailout kind %u", 528 uint32_t(bailoutKind_)); 529 530 #ifdef TRACK_SNAPSHOTS 531 readTrackSnapshot(); 532 #endif 533 } 534 535 #ifdef TRACK_SNAPSHOTS 536 void SnapshotReader::readTrackSnapshot() { 537 pcOpcode_ = reader_.readUnsigned(); 538 mirOpcode_ = reader_.readUnsigned(); 539 mirId_ = reader_.readUnsigned(); 540 lirOpcode_ = reader_.readUnsigned(); 541 lirId_ = reader_.readUnsigned(); 542 } 543 544 void SnapshotReader::spewBailingFrom() const { 545 # ifdef JS_JITSPEW 546 if (JitSpewEnabled(JitSpew_IonBailouts)) { 547 JitSpewHeader(JitSpew_IonBailouts); 548 Fprinter& out = JitSpewPrinter(); 549 out.printf(" bailing from bytecode: %s, MIR: ", CodeName(JSOp(pcOpcode_))); 550 MDefinition::PrintOpcodeName(out, MDefinition::Opcode(mirOpcode_)); 551 out.printf(" [%u], LIR: ", mirId_); 552 LInstruction::printName(out, LInstruction::Opcode(lirOpcode_)); 553 out.printf(" [%u]", lirId_); 554 out.printf("\n"); 555 } 556 # endif 557 } 558 #endif 559 560 uint32_t SnapshotReader::readAllocationIndex() { 561 allocRead_++; 562 return reader_.readUnsigned(); 563 } 564 565 RValueAllocation SnapshotReader::readAllocation() { 566 JitSpew(JitSpew_IonSnapshots, "Reading slot %u", allocRead_); 567 uint32_t offset = readAllocationIndex() * ALLOCATION_TABLE_ALIGNMENT; 568 allocReader_.seek(allocTable_, offset); 569 return RValueAllocation::read(allocReader_); 570 } 571 572 SnapshotWriter::SnapshotWriter() 573 // Based on the measurements made in Bug 962555 comment 20, this length 574 // should be enough to prevent the reallocation of the hash table for at 575 // least half of the compilations. 576 : allocMap_(32) {} 577 578 RecoverReader::RecoverReader(SnapshotReader& snapshot, const uint8_t* recovers, 579 uint32_t size) 580 : reader_(nullptr, nullptr), numInstructions_(0), numInstructionsRead_(0) { 581 if (!recovers) { 582 return; 583 } 584 reader_ = 585 CompactBufferReader(recovers + snapshot.recoverOffset(), recovers + size); 586 readRecoverHeader(); 587 readInstruction(); 588 } 589 590 RecoverReader::RecoverReader(const RecoverReader& rr) 591 : reader_(rr.reader_), 592 numInstructions_(rr.numInstructions_), 593 numInstructionsRead_(rr.numInstructionsRead_) { 594 if (reader_.currentPosition()) { 595 rr.instruction()->cloneInto(&rawData_); 596 } 597 } 598 599 RecoverReader& RecoverReader::operator=(const RecoverReader& rr) { 600 reader_ = rr.reader_; 601 numInstructions_ = rr.numInstructions_; 602 numInstructionsRead_ = rr.numInstructionsRead_; 603 if (reader_.currentPosition()) { 604 rr.instruction()->cloneInto(&rawData_); 605 } 606 return *this; 607 } 608 609 void RecoverReader::readRecoverHeader() { 610 numInstructions_ = reader_.readUnsigned(); 611 MOZ_ASSERT(numInstructions_); 612 613 JitSpew(JitSpew_IonSnapshots, "Read recover header with instructionCount %u", 614 numInstructions_); 615 } 616 617 void RecoverReader::readInstruction() { 618 MOZ_ASSERT(moreInstructions()); 619 RInstruction::readRecoverData(reader_, &rawData_); 620 numInstructionsRead_++; 621 } 622 623 SnapshotOffset SnapshotWriter::startSnapshot(RecoverOffset recoverOffset, 624 BailoutKind kind) { 625 lastStart_ = writer_.length(); 626 allocWritten_ = 0; 627 628 JitSpew(JitSpew_IonSnapshots, 629 "starting snapshot with recover offset %u, bailout kind %u", 630 recoverOffset, uint32_t(kind)); 631 632 MOZ_ASSERT(uint32_t(kind) < (1 << SNAPSHOT_BAILOUTKIND_BITS)); 633 MOZ_ASSERT(recoverOffset < (1 << SNAPSHOT_ROFFSET_BITS)); 634 uint32_t bits = (uint32_t(kind) << SNAPSHOT_BAILOUTKIND_SHIFT) | 635 (recoverOffset << SNAPSHOT_ROFFSET_SHIFT); 636 637 writer_.writeUnsigned(bits); 638 return lastStart_; 639 } 640 641 #ifdef TRACK_SNAPSHOTS 642 void SnapshotWriter::trackSnapshot(uint32_t pcOpcode, uint32_t mirOpcode, 643 uint32_t mirId, uint32_t lirOpcode, 644 uint32_t lirId) { 645 writer_.writeUnsigned(pcOpcode); 646 writer_.writeUnsigned(mirOpcode); 647 writer_.writeUnsigned(mirId); 648 writer_.writeUnsigned(lirOpcode); 649 writer_.writeUnsigned(lirId); 650 } 651 #endif 652 653 bool SnapshotWriter::add(const RValueAllocation& alloc) { 654 uint32_t offset; 655 RValueAllocMap::AddPtr p = allocMap_.lookupForAdd(alloc); 656 if (!p) { 657 offset = allocWriter_.length(); 658 alloc.write(allocWriter_); 659 if (!allocMap_.add(p, alloc, offset)) { 660 allocWriter_.setOOM(); 661 return false; 662 } 663 } else { 664 offset = p->value(); 665 } 666 667 #ifdef JS_JITSPEW 668 if (JitSpewEnabled(JitSpew_IonSnapshots)) { 669 JitSpewHeader(JitSpew_IonSnapshots); 670 Fprinter& out = JitSpewPrinter(); 671 out.printf(" slot %u (%u): ", allocWritten_, offset); 672 alloc.dump(out); 673 out.printf("\n"); 674 } 675 #endif 676 677 allocWritten_++; 678 writer_.writeUnsigned(offset / ALLOCATION_TABLE_ALIGNMENT); 679 return true; 680 } 681 682 void SnapshotWriter::endSnapshot() { 683 // Place a sentinel for asserting on the other end. 684 #ifdef DEBUG 685 writer_.writeSigned(-1); 686 #endif 687 688 JitSpew(JitSpew_IonSnapshots, 689 "ending snapshot total size: %u bytes (start %u)", 690 uint32_t(writer_.length() - lastStart_), lastStart_); 691 } 692 693 RecoverOffset RecoverWriter::startRecover(uint32_t instructionCount) { 694 MOZ_ASSERT(instructionCount); 695 instructionCount_ = instructionCount; 696 instructionsWritten_ = 0; 697 698 JitSpew(JitSpew_IonSnapshots, "starting recover with %u instruction(s)", 699 instructionCount); 700 701 RecoverOffset recoverOffset = writer_.length(); 702 writer_.writeUnsigned(instructionCount); 703 return recoverOffset; 704 } 705 706 void RecoverWriter::writeInstruction(const MNode* rp) { 707 if (!rp->writeRecoverData(writer_)) { 708 writer_.setOOM(); 709 } 710 instructionsWritten_++; 711 } 712 713 void RecoverWriter::endRecover() { 714 MOZ_ASSERT(instructionCount_ == instructionsWritten_); 715 }