michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/Snapshots.h" michael@0: michael@0: #include "jsscript.h" michael@0: michael@0: #include "jit/CompileInfo.h" michael@0: #include "jit/IonSpewer.h" michael@0: #ifdef TRACK_SNAPSHOTS michael@0: # include "jit/LIR.h" michael@0: #endif michael@0: #include "jit/MIR.h" michael@0: #include "jit/Recover.h" michael@0: michael@0: using namespace js; michael@0: using namespace js::jit; michael@0: michael@0: // Snapshot header: michael@0: // michael@0: // [vwu] bits ((n+1)-31]: frame count michael@0: // bit n+1: resume after michael@0: // bits [0,n): bailout kind (n = SNAPSHOT_BAILOUTKIND_BITS) michael@0: // michael@0: // Snapshot body, repeated "frame count" times, from oldest frame to newest frame. michael@0: // Note that the first frame doesn't have the "parent PC" field. michael@0: // michael@0: // [ptr] Debug only: JSScript * michael@0: // [vwu] pc offset michael@0: // [vwu] # of RVA's indexes, including nargs michael@0: // [vwu*] List of indexes to R(ecover)ValueAllocation table. Contains michael@0: // nargs + nfixed + stackDepth items. michael@0: // michael@0: // Recover value allocations are encoded at the end of the Snapshot buffer, and michael@0: // they are padded on ALLOCATION_TABLE_ALIGNMENT. The encoding of each michael@0: // allocation is determined by the RValueAllocation::Layout, which can be michael@0: // obtained from the RValueAllocation::Mode with layoutFromMode function. The michael@0: // layout structure list the type of payload which are used to serialized / michael@0: // deserialized / dumped the content of the allocations. michael@0: // michael@0: // R(ecover)ValueAllocation items: michael@0: // [u8'] Mode, which defines the type of the payload as well as the michael@0: // interpretation. michael@0: // [pld] first payload (packed tag, index, stack offset, register, ...) michael@0: // [pld] second payload (register, stack offset, none) michael@0: // michael@0: // Modes: michael@0: // CONSTANT [INDEX] michael@0: // Index into the constant pool. michael@0: // michael@0: // CST_UNDEFINED [] michael@0: // Constant value which correspond to the "undefined" JS value. michael@0: // michael@0: // CST_NULL [] michael@0: // Constant value which correspond to the "null" JS value. michael@0: // michael@0: // DOUBLE_REG [FPU_REG] michael@0: // Double value stored in a FPU register. michael@0: // michael@0: // FLOAT32_REG [FPU_REG] michael@0: // Float 32bit value stored in a FPU register. michael@0: // michael@0: // FLOAT32_STACK [STACK_OFFSET] michael@0: // Float 32bit value stored on the stack. michael@0: // michael@0: // UNTYPED_REG [GPR_REG] michael@0: // UNTYPED_STACK [STACK_OFFSET] michael@0: // UNTYPED_REG_REG [GPR_REG, GPR_REG] michael@0: // UNTYPED_REG_STACK [GPR_REG, STACK_OFFSET] michael@0: // UNTYPED_STACK_REG [STACK_OFFSET, GPR_REG] michael@0: // UNTYPED_STACK_STACK [STACK_OFFSET, STACK_OFFSET] michael@0: // Value with dynamically known type. On 32 bits architecture, the michael@0: // first register/stack-offset correspond to the holder of the type, michael@0: // and the second correspond to the payload of the JS Value. michael@0: // michael@0: // TYPED_REG [PACKED_TAG, GPR_REG]: michael@0: // Value with statically known type, which payload is stored in a michael@0: // register. michael@0: // michael@0: // TYPED_STACK [PACKED_TAG, STACK_OFFSET]: michael@0: // Value with statically known type, which payload is stored at an michael@0: // offset on the stack. michael@0: // michael@0: // Encodings: michael@0: // [ptr] A fixed-size pointer. michael@0: // [vwu] A variable-width unsigned integer. michael@0: // [vws] A variable-width signed integer. michael@0: // [u8] An 8-bit unsigned integer. michael@0: // [u8'] An 8-bit unsigned integer which is potentially extended with packed michael@0: // data. michael@0: // [u8"] Packed data which is stored and packed in the previous [u8']. michael@0: // [vwu*] A list of variable-width unsigned integers. michael@0: // [pld] Payload of Recover Value Allocation: michael@0: // PAYLOAD_NONE: michael@0: // There is no payload. michael@0: // michael@0: // PAYLOAD_INDEX: michael@0: // [vwu] Index, such as the constant pool index. michael@0: // michael@0: // PAYLOAD_STACK_OFFSET: michael@0: // [vws] Stack offset based on the base of the Ion frame. michael@0: // michael@0: // PAYLOAD_GPR: michael@0: // [u8] Code of the general register. michael@0: // michael@0: // PAYLOAD_FPU: michael@0: // [u8] Code of the FPU register. michael@0: // michael@0: // PAYLOAD_PACKED_TAG: michael@0: // [u8"] Bits 5-7: JSValueType is encoded on the low bits of the Mode michael@0: // of the RValueAllocation. michael@0: // michael@0: michael@0: const RValueAllocation::Layout & michael@0: RValueAllocation::layoutFromMode(Mode mode) michael@0: { michael@0: switch (mode) { michael@0: case CONSTANT: { michael@0: static const RValueAllocation::Layout layout = { michael@0: PAYLOAD_INDEX, michael@0: PAYLOAD_NONE, michael@0: "constant" michael@0: }; michael@0: return layout; michael@0: } michael@0: michael@0: case CST_UNDEFINED: { michael@0: static const RValueAllocation::Layout layout = { michael@0: PAYLOAD_NONE, michael@0: PAYLOAD_NONE, michael@0: "undefined" michael@0: }; michael@0: return layout; michael@0: } michael@0: michael@0: case CST_NULL: { michael@0: static const RValueAllocation::Layout layout = { michael@0: PAYLOAD_NONE, michael@0: PAYLOAD_NONE, michael@0: "null" michael@0: }; michael@0: return layout; michael@0: } michael@0: michael@0: case DOUBLE_REG: { michael@0: static const RValueAllocation::Layout layout = { michael@0: PAYLOAD_FPU, michael@0: PAYLOAD_NONE, michael@0: "double" michael@0: }; michael@0: return layout; michael@0: } michael@0: case FLOAT32_REG: { michael@0: static const RValueAllocation::Layout layout = { michael@0: PAYLOAD_FPU, michael@0: PAYLOAD_NONE, michael@0: "float32" michael@0: }; michael@0: return layout; michael@0: } michael@0: case FLOAT32_STACK: { michael@0: static const RValueAllocation::Layout layout = { michael@0: PAYLOAD_STACK_OFFSET, michael@0: PAYLOAD_NONE, michael@0: "float32" michael@0: }; michael@0: return layout; michael@0: } michael@0: #if defined(JS_NUNBOX32) michael@0: case UNTYPED_REG_REG: { michael@0: static const RValueAllocation::Layout layout = { michael@0: PAYLOAD_GPR, michael@0: PAYLOAD_GPR, michael@0: "value" michael@0: }; michael@0: return layout; michael@0: } michael@0: case UNTYPED_REG_STACK: { michael@0: static const RValueAllocation::Layout layout = { michael@0: PAYLOAD_GPR, michael@0: PAYLOAD_STACK_OFFSET, michael@0: "value" michael@0: }; michael@0: return layout; michael@0: } michael@0: case UNTYPED_STACK_REG: { michael@0: static const RValueAllocation::Layout layout = { michael@0: PAYLOAD_STACK_OFFSET, michael@0: PAYLOAD_GPR michael@0: }; michael@0: return layout; michael@0: } michael@0: case UNTYPED_STACK_STACK: { michael@0: static const RValueAllocation::Layout layout = { michael@0: PAYLOAD_STACK_OFFSET, michael@0: PAYLOAD_STACK_OFFSET, michael@0: "value" michael@0: }; michael@0: return layout; michael@0: } michael@0: #elif defined(JS_PUNBOX64) michael@0: case UNTYPED_REG: { michael@0: static const RValueAllocation::Layout layout = { michael@0: PAYLOAD_GPR, michael@0: PAYLOAD_NONE, michael@0: "value" michael@0: }; michael@0: return layout; michael@0: } michael@0: case UNTYPED_STACK: { michael@0: static const RValueAllocation::Layout layout = { michael@0: PAYLOAD_STACK_OFFSET, michael@0: PAYLOAD_NONE, michael@0: "value" michael@0: }; michael@0: return layout; michael@0: } michael@0: #endif michael@0: default: { michael@0: static const RValueAllocation::Layout regLayout = { michael@0: PAYLOAD_PACKED_TAG, michael@0: PAYLOAD_GPR, michael@0: "typed value" michael@0: }; michael@0: michael@0: static const RValueAllocation::Layout stackLayout = { michael@0: PAYLOAD_PACKED_TAG, michael@0: PAYLOAD_STACK_OFFSET, michael@0: "typed value" michael@0: }; michael@0: michael@0: if (mode >= TYPED_REG_MIN && mode <= TYPED_REG_MAX) michael@0: return regLayout; michael@0: if (mode >= TYPED_STACK_MIN && mode <= TYPED_STACK_MAX) michael@0: return stackLayout; michael@0: } michael@0: } michael@0: michael@0: MOZ_ASSUME_UNREACHABLE("Wrong mode type?"); michael@0: } michael@0: michael@0: // Pad serialized RValueAllocations by a multiple of X bytes in the allocation michael@0: // buffer. By padding serialized value allocations, we are building an michael@0: // indexable table of elements of X bytes, and thus we can safely divide any michael@0: // offset within the buffer by X to obtain an index. michael@0: // michael@0: // By padding, we are loosing space within the allocation buffer, but we michael@0: // multiple by X the number of indexes that we can store on one byte in each michael@0: // snapshots. michael@0: // michael@0: // Some value allocations are taking more than X bytes to be encoded, in which michael@0: // case we will pad to a multiple of X, and we are wasting indexes. The choice michael@0: // of X should be balanced between the wasted padding of serialized value michael@0: // allocation, and the saving made in snapshot indexes. michael@0: static const size_t ALLOCATION_TABLE_ALIGNMENT = 2; /* bytes */ michael@0: michael@0: void michael@0: RValueAllocation::readPayload(CompactBufferReader &reader, PayloadType type, michael@0: uint8_t *mode, Payload *p) michael@0: { michael@0: switch (type) { michael@0: case PAYLOAD_NONE: michael@0: break; michael@0: case PAYLOAD_INDEX: michael@0: p->index = reader.readUnsigned(); michael@0: break; michael@0: case PAYLOAD_STACK_OFFSET: michael@0: p->stackOffset = reader.readSigned(); michael@0: break; michael@0: case PAYLOAD_GPR: michael@0: p->gpr = Register::FromCode(reader.readByte()); michael@0: break; michael@0: case PAYLOAD_FPU: michael@0: p->fpu = FloatRegister::FromCode(reader.readByte()); michael@0: break; michael@0: case PAYLOAD_PACKED_TAG: michael@0: p->type = JSValueType(*mode & 0x07); michael@0: *mode = *mode & ~0x07; michael@0: break; michael@0: } michael@0: } michael@0: michael@0: RValueAllocation michael@0: RValueAllocation::read(CompactBufferReader &reader) michael@0: { michael@0: uint8_t mode = reader.readByte(); michael@0: const Layout &layout = layoutFromMode(Mode(mode)); michael@0: Payload arg1, arg2; michael@0: michael@0: readPayload(reader, layout.type1, &mode, &arg1); michael@0: readPayload(reader, layout.type2, &mode, &arg2); michael@0: return RValueAllocation(Mode(mode), arg1, arg2); michael@0: } michael@0: michael@0: void michael@0: RValueAllocation::writePayload(CompactBufferWriter &writer, PayloadType type, michael@0: Payload p) michael@0: { michael@0: switch (type) { michael@0: case PAYLOAD_NONE: michael@0: break; michael@0: case PAYLOAD_INDEX: michael@0: writer.writeUnsigned(p.index); michael@0: break; michael@0: case PAYLOAD_STACK_OFFSET: michael@0: writer.writeSigned(p.stackOffset); michael@0: break; michael@0: case PAYLOAD_GPR: michael@0: static_assert(Registers::Total <= 0x100, michael@0: "Not enough bytes to encode all registers."); michael@0: writer.writeByte(p.gpr.code()); michael@0: break; michael@0: case PAYLOAD_FPU: michael@0: static_assert(FloatRegisters::Total <= 0x100, michael@0: "Not enough bytes to encode all float registers."); michael@0: writer.writeByte(p.fpu.code()); michael@0: break; michael@0: case PAYLOAD_PACKED_TAG: { michael@0: // This code assumes that the PACKED_TAG payload is following the michael@0: // writeByte of the mode. michael@0: MOZ_ASSERT(writer.length()); michael@0: uint8_t *mode = writer.buffer() + (writer.length() - 1); michael@0: MOZ_ASSERT((*mode & 0x07) == 0 && (p.type & ~0x07) == 0); michael@0: *mode = *mode | p.type; michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: michael@0: void michael@0: RValueAllocation::writePadding(CompactBufferWriter &writer) michael@0: { michael@0: // Write 0x7f in all padding bytes. michael@0: while (writer.length() % ALLOCATION_TABLE_ALIGNMENT) michael@0: writer.writeByte(0x7f); michael@0: } michael@0: michael@0: void michael@0: RValueAllocation::write(CompactBufferWriter &writer) const michael@0: { michael@0: const Layout &layout = layoutFromMode(mode()); michael@0: MOZ_ASSERT(layout.type2 != PAYLOAD_PACKED_TAG); michael@0: MOZ_ASSERT(writer.length() % ALLOCATION_TABLE_ALIGNMENT == 0); michael@0: michael@0: writer.writeByte(mode_); michael@0: writePayload(writer, layout.type1, arg1_); michael@0: writePayload(writer, layout.type2, arg2_); michael@0: writePadding(writer); michael@0: } michael@0: michael@0: HashNumber michael@0: RValueAllocation::hash() const { michael@0: CompactBufferWriter writer; michael@0: write(writer); michael@0: michael@0: // We should never oom because the compact buffer writer has 32 inlined michael@0: // bytes, and in the worse case scenario, only encode 12 bytes michael@0: // (12 == mode + signed + signed + pad). michael@0: MOZ_ASSERT(!writer.oom()); michael@0: MOZ_ASSERT(writer.length() <= 12); michael@0: michael@0: HashNumber res = 0; michael@0: for (size_t i = 0; i < writer.length(); i++) { michael@0: res = ((res << 8) | (res >> (sizeof(res) - 1))); michael@0: res ^= writer.buffer()[i]; michael@0: } michael@0: return res; michael@0: } michael@0: michael@0: static const char * michael@0: ValTypeToString(JSValueType type) michael@0: { michael@0: switch (type) { michael@0: case JSVAL_TYPE_INT32: michael@0: return "int32_t"; michael@0: case JSVAL_TYPE_DOUBLE: michael@0: return "double"; michael@0: case JSVAL_TYPE_STRING: michael@0: return "string"; michael@0: case JSVAL_TYPE_BOOLEAN: michael@0: return "boolean"; michael@0: case JSVAL_TYPE_OBJECT: michael@0: return "object"; michael@0: case JSVAL_TYPE_MAGIC: michael@0: return "magic"; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("no payload"); michael@0: } michael@0: } michael@0: michael@0: void michael@0: RValueAllocation::dumpPayload(FILE *fp, PayloadType type, Payload p) michael@0: { michael@0: switch (type) { michael@0: case PAYLOAD_NONE: michael@0: break; michael@0: case PAYLOAD_INDEX: michael@0: fprintf(fp, "index %u", p.index); michael@0: break; michael@0: case PAYLOAD_STACK_OFFSET: michael@0: fprintf(fp, "stack %d", p.stackOffset); michael@0: break; michael@0: case PAYLOAD_GPR: michael@0: fprintf(fp, "reg %s", p.gpr.name()); michael@0: break; michael@0: case PAYLOAD_FPU: michael@0: fprintf(fp, "reg %s", p.fpu.name()); michael@0: break; michael@0: case PAYLOAD_PACKED_TAG: michael@0: fprintf(fp, "%s", ValTypeToString(p.type)); michael@0: break; michael@0: } michael@0: } michael@0: michael@0: void michael@0: RValueAllocation::dump(FILE *fp) const michael@0: { michael@0: const Layout &layout = layoutFromMode(mode()); michael@0: fprintf(fp, "%s", layout.name); michael@0: michael@0: if (layout.type1 != PAYLOAD_NONE) michael@0: fprintf(fp, " ("); michael@0: dumpPayload(fp, layout.type1, arg1_); michael@0: if (layout.type2 != PAYLOAD_NONE) michael@0: fprintf(fp, ", "); michael@0: dumpPayload(fp, layout.type2, arg2_); michael@0: if (layout.type1 != PAYLOAD_NONE) michael@0: fprintf(fp, ")"); michael@0: } michael@0: michael@0: bool michael@0: RValueAllocation::equalPayloads(PayloadType type, Payload lhs, Payload rhs) michael@0: { michael@0: switch (type) { michael@0: case PAYLOAD_NONE: michael@0: return true; michael@0: case PAYLOAD_INDEX: michael@0: return lhs.index == rhs.index; michael@0: case PAYLOAD_STACK_OFFSET: michael@0: return lhs.stackOffset == rhs.stackOffset; michael@0: case PAYLOAD_GPR: michael@0: return lhs.gpr == rhs.gpr; michael@0: case PAYLOAD_FPU: michael@0: return lhs.fpu == rhs.fpu; michael@0: case PAYLOAD_PACKED_TAG: michael@0: return lhs.type == rhs.type; michael@0: } michael@0: michael@0: return false; michael@0: } michael@0: michael@0: SnapshotReader::SnapshotReader(const uint8_t *snapshots, uint32_t offset, michael@0: uint32_t RVATableSize, uint32_t listSize) michael@0: : reader_(snapshots + offset, snapshots + listSize), michael@0: allocReader_(snapshots + listSize, snapshots + listSize + RVATableSize), michael@0: allocTable_(snapshots + listSize), michael@0: allocRead_(0) michael@0: { michael@0: if (!snapshots) michael@0: return; michael@0: IonSpew(IonSpew_Snapshots, "Creating snapshot reader"); michael@0: readSnapshotHeader(); michael@0: } michael@0: michael@0: #define COMPUTE_SHIFT_AFTER_(name) (name ## _BITS + name ##_SHIFT) michael@0: #define COMPUTE_MASK_(name) ((uint32_t(1 << name ## _BITS) - 1) << name ##_SHIFT) michael@0: michael@0: // Details of snapshot header packing. michael@0: static const uint32_t SNAPSHOT_BAILOUTKIND_SHIFT = 0; michael@0: static const uint32_t SNAPSHOT_BAILOUTKIND_BITS = 3; michael@0: static const uint32_t SNAPSHOT_BAILOUTKIND_MASK = COMPUTE_MASK_(SNAPSHOT_BAILOUTKIND); michael@0: michael@0: static const uint32_t SNAPSHOT_ROFFSET_SHIFT = COMPUTE_SHIFT_AFTER_(SNAPSHOT_BAILOUTKIND); michael@0: static const uint32_t SNAPSHOT_ROFFSET_BITS = 32 - SNAPSHOT_ROFFSET_SHIFT; michael@0: static const uint32_t SNAPSHOT_ROFFSET_MASK = COMPUTE_MASK_(SNAPSHOT_ROFFSET); michael@0: michael@0: // Details of recover header packing. michael@0: static const uint32_t RECOVER_RESUMEAFTER_SHIFT = 0; michael@0: static const uint32_t RECOVER_RESUMEAFTER_BITS = 1; michael@0: static const uint32_t RECOVER_RESUMEAFTER_MASK = COMPUTE_MASK_(RECOVER_RESUMEAFTER); michael@0: michael@0: static const uint32_t RECOVER_RINSCOUNT_SHIFT = COMPUTE_SHIFT_AFTER_(RECOVER_RESUMEAFTER); michael@0: static const uint32_t RECOVER_RINSCOUNT_BITS = 32 - RECOVER_RINSCOUNT_SHIFT; michael@0: static const uint32_t RECOVER_RINSCOUNT_MASK = COMPUTE_MASK_(RECOVER_RINSCOUNT); michael@0: michael@0: #undef COMPUTE_MASK_ michael@0: #undef COMPUTE_SHIFT_AFTER_ michael@0: michael@0: void michael@0: SnapshotReader::readSnapshotHeader() michael@0: { michael@0: uint32_t bits = reader_.readUnsigned(); michael@0: michael@0: bailoutKind_ = BailoutKind((bits & SNAPSHOT_BAILOUTKIND_MASK) >> SNAPSHOT_BAILOUTKIND_SHIFT); michael@0: recoverOffset_ = (bits & SNAPSHOT_ROFFSET_MASK) >> SNAPSHOT_ROFFSET_SHIFT; michael@0: michael@0: IonSpew(IonSpew_Snapshots, "Read snapshot header with bailout kind %u", michael@0: bailoutKind_); michael@0: michael@0: #ifdef TRACK_SNAPSHOTS michael@0: readTrackSnapshot(); michael@0: #endif michael@0: } michael@0: michael@0: #ifdef TRACK_SNAPSHOTS michael@0: void michael@0: SnapshotReader::readTrackSnapshot() michael@0: { michael@0: pcOpcode_ = reader_.readUnsigned(); michael@0: mirOpcode_ = reader_.readUnsigned(); michael@0: mirId_ = reader_.readUnsigned(); michael@0: lirOpcode_ = reader_.readUnsigned(); michael@0: lirId_ = reader_.readUnsigned(); michael@0: } michael@0: michael@0: void michael@0: SnapshotReader::spewBailingFrom() const michael@0: { michael@0: if (IonSpewEnabled(IonSpew_Bailouts)) { michael@0: IonSpewHeader(IonSpew_Bailouts); michael@0: fprintf(IonSpewFile, " bailing from bytecode: %s, MIR: ", js_CodeName[pcOpcode_]); michael@0: MDefinition::PrintOpcodeName(IonSpewFile, MDefinition::Opcode(mirOpcode_)); michael@0: fprintf(IonSpewFile, " [%u], LIR: ", mirId_); michael@0: LInstruction::printName(IonSpewFile, LInstruction::Opcode(lirOpcode_)); michael@0: fprintf(IonSpewFile, " [%u]", lirId_); michael@0: fprintf(IonSpewFile, "\n"); michael@0: } michael@0: } michael@0: #endif michael@0: michael@0: uint32_t michael@0: SnapshotReader::readAllocationIndex() michael@0: { michael@0: allocRead_++; michael@0: return reader_.readUnsigned(); michael@0: } michael@0: michael@0: RValueAllocation michael@0: SnapshotReader::readAllocation() michael@0: { michael@0: IonSpew(IonSpew_Snapshots, "Reading slot %u", allocRead_); michael@0: uint32_t offset = readAllocationIndex() * ALLOCATION_TABLE_ALIGNMENT; michael@0: allocReader_.seek(allocTable_, offset); michael@0: return RValueAllocation::read(allocReader_); michael@0: } michael@0: michael@0: bool michael@0: SnapshotWriter::init() michael@0: { michael@0: // Based on the measurements made in Bug 962555 comment 20, this should be michael@0: // enough to prevent the reallocation of the hash table for at least half of michael@0: // the compilations. michael@0: return allocMap_.init(32); michael@0: } michael@0: michael@0: RecoverReader::RecoverReader(SnapshotReader &snapshot, const uint8_t *recovers, uint32_t size) michael@0: : reader_(nullptr, nullptr), michael@0: numInstructions_(0), michael@0: numInstructionsRead_(0) michael@0: { michael@0: if (!recovers) michael@0: return; michael@0: reader_ = CompactBufferReader(recovers + snapshot.recoverOffset(), recovers + size); michael@0: readRecoverHeader(); michael@0: readInstruction(); michael@0: } michael@0: michael@0: void michael@0: RecoverReader::readRecoverHeader() michael@0: { michael@0: uint32_t bits = reader_.readUnsigned(); michael@0: michael@0: numInstructions_ = (bits & RECOVER_RINSCOUNT_MASK) >> RECOVER_RINSCOUNT_SHIFT; michael@0: resumeAfter_ = (bits & RECOVER_RESUMEAFTER_MASK) >> RECOVER_RESUMEAFTER_SHIFT; michael@0: MOZ_ASSERT(numInstructions_); michael@0: michael@0: IonSpew(IonSpew_Snapshots, "Read recover header with instructionCount %u (ra: %d)", michael@0: numInstructions_, resumeAfter_); michael@0: } michael@0: michael@0: void michael@0: RecoverReader::readInstruction() michael@0: { michael@0: MOZ_ASSERT(moreInstructions()); michael@0: RInstruction::readRecoverData(reader_, &rawData_); michael@0: numInstructionsRead_++; michael@0: } michael@0: michael@0: SnapshotOffset michael@0: SnapshotWriter::startSnapshot(RecoverOffset recoverOffset, BailoutKind kind) michael@0: { michael@0: lastStart_ = writer_.length(); michael@0: allocWritten_ = 0; michael@0: michael@0: IonSpew(IonSpew_Snapshots, "starting snapshot with recover offset %u, bailout kind %u", michael@0: recoverOffset, kind); michael@0: michael@0: JS_ASSERT(uint32_t(kind) < (1 << SNAPSHOT_BAILOUTKIND_BITS)); michael@0: JS_ASSERT(recoverOffset < (1 << SNAPSHOT_ROFFSET_BITS)); michael@0: uint32_t bits = michael@0: (uint32_t(kind) << SNAPSHOT_BAILOUTKIND_SHIFT) | michael@0: (recoverOffset << SNAPSHOT_ROFFSET_SHIFT); michael@0: michael@0: writer_.writeUnsigned(bits); michael@0: return lastStart_; michael@0: } michael@0: michael@0: #ifdef TRACK_SNAPSHOTS michael@0: void michael@0: SnapshotWriter::trackSnapshot(uint32_t pcOpcode, uint32_t mirOpcode, uint32_t mirId, michael@0: uint32_t lirOpcode, uint32_t lirId) michael@0: { michael@0: writer_.writeUnsigned(pcOpcode); michael@0: writer_.writeUnsigned(mirOpcode); michael@0: writer_.writeUnsigned(mirId); michael@0: writer_.writeUnsigned(lirOpcode); michael@0: writer_.writeUnsigned(lirId); michael@0: } michael@0: #endif michael@0: michael@0: bool michael@0: SnapshotWriter::add(const RValueAllocation &alloc) michael@0: { michael@0: MOZ_ASSERT(allocMap_.initialized()); michael@0: michael@0: uint32_t offset; michael@0: RValueAllocMap::AddPtr p = allocMap_.lookupForAdd(alloc); michael@0: if (!p) { michael@0: offset = allocWriter_.length(); michael@0: alloc.write(allocWriter_); michael@0: if (!allocMap_.add(p, alloc, offset)) michael@0: return false; michael@0: } else { michael@0: offset = p->value(); michael@0: } michael@0: michael@0: if (IonSpewEnabled(IonSpew_Snapshots)) { michael@0: IonSpewHeader(IonSpew_Snapshots); michael@0: fprintf(IonSpewFile, " slot %u (%d): ", allocWritten_, offset); michael@0: alloc.dump(IonSpewFile); michael@0: fprintf(IonSpewFile, "\n"); michael@0: } michael@0: michael@0: allocWritten_++; michael@0: writer_.writeUnsigned(offset / ALLOCATION_TABLE_ALIGNMENT); michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: SnapshotWriter::endSnapshot() michael@0: { michael@0: // Place a sentinel for asserting on the other end. michael@0: #ifdef DEBUG michael@0: writer_.writeSigned(-1); michael@0: #endif michael@0: michael@0: IonSpew(IonSpew_Snapshots, "ending snapshot total size: %u bytes (start %u)", michael@0: uint32_t(writer_.length() - lastStart_), lastStart_); michael@0: } michael@0: michael@0: RecoverOffset michael@0: RecoverWriter::startRecover(uint32_t frameCount, bool resumeAfter) michael@0: { michael@0: MOZ_ASSERT(frameCount); michael@0: nframes_ = frameCount; michael@0: framesWritten_ = 0; michael@0: michael@0: IonSpew(IonSpew_Snapshots, "starting recover with frameCount %u", michael@0: frameCount); michael@0: michael@0: MOZ_ASSERT(!(uint32_t(resumeAfter) &~ RECOVER_RESUMEAFTER_MASK)); michael@0: MOZ_ASSERT(frameCount < uint32_t(1 << RECOVER_RINSCOUNT_BITS)); michael@0: uint32_t bits = michael@0: (uint32_t(resumeAfter) << RECOVER_RESUMEAFTER_SHIFT) | michael@0: (frameCount << RECOVER_RINSCOUNT_SHIFT); michael@0: michael@0: RecoverOffset recoverOffset = writer_.length(); michael@0: writer_.writeUnsigned(bits); michael@0: return recoverOffset; michael@0: } michael@0: michael@0: bool michael@0: RecoverWriter::writeFrame(const MResumePoint *rp) michael@0: { michael@0: if (!rp->writeRecoverData(writer_)) michael@0: return false; michael@0: framesWritten_++; michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: RecoverWriter::endRecover() michael@0: { michael@0: JS_ASSERT(nframes_ == framesWritten_); michael@0: }