js/src/jit/Snapshots.cpp

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

michael@0 1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
michael@0 2 * vim: set ts=8 sts=4 et sw=4 tw=99:
michael@0 3 * This Source Code Form is subject to the terms of the Mozilla Public
michael@0 4 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 6
michael@0 7 #include "jit/Snapshots.h"
michael@0 8
michael@0 9 #include "jsscript.h"
michael@0 10
michael@0 11 #include "jit/CompileInfo.h"
michael@0 12 #include "jit/IonSpewer.h"
michael@0 13 #ifdef TRACK_SNAPSHOTS
michael@0 14 # include "jit/LIR.h"
michael@0 15 #endif
michael@0 16 #include "jit/MIR.h"
michael@0 17 #include "jit/Recover.h"
michael@0 18
michael@0 19 using namespace js;
michael@0 20 using namespace js::jit;
michael@0 21
michael@0 22 // Snapshot header:
michael@0 23 //
michael@0 24 // [vwu] bits ((n+1)-31]: frame count
michael@0 25 // bit n+1: resume after
michael@0 26 // bits [0,n): bailout kind (n = SNAPSHOT_BAILOUTKIND_BITS)
michael@0 27 //
michael@0 28 // Snapshot body, repeated "frame count" times, from oldest frame to newest frame.
michael@0 29 // Note that the first frame doesn't have the "parent PC" field.
michael@0 30 //
michael@0 31 // [ptr] Debug only: JSScript *
michael@0 32 // [vwu] pc offset
michael@0 33 // [vwu] # of RVA's indexes, including nargs
michael@0 34 // [vwu*] List of indexes to R(ecover)ValueAllocation table. Contains
michael@0 35 // nargs + nfixed + stackDepth items.
michael@0 36 //
michael@0 37 // Recover value allocations are encoded at the end of the Snapshot buffer, and
michael@0 38 // they are padded on ALLOCATION_TABLE_ALIGNMENT. The encoding of each
michael@0 39 // allocation is determined by the RValueAllocation::Layout, which can be
michael@0 40 // obtained from the RValueAllocation::Mode with layoutFromMode function. The
michael@0 41 // layout structure list the type of payload which are used to serialized /
michael@0 42 // deserialized / dumped the content of the allocations.
michael@0 43 //
michael@0 44 // R(ecover)ValueAllocation items:
michael@0 45 // [u8'] Mode, which defines the type of the payload as well as the
michael@0 46 // interpretation.
michael@0 47 // [pld] first payload (packed tag, index, stack offset, register, ...)
michael@0 48 // [pld] second payload (register, stack offset, none)
michael@0 49 //
michael@0 50 // Modes:
michael@0 51 // CONSTANT [INDEX]
michael@0 52 // Index into the constant pool.
michael@0 53 //
michael@0 54 // CST_UNDEFINED []
michael@0 55 // Constant value which correspond to the "undefined" JS value.
michael@0 56 //
michael@0 57 // CST_NULL []
michael@0 58 // Constant value which correspond to the "null" JS value.
michael@0 59 //
michael@0 60 // DOUBLE_REG [FPU_REG]
michael@0 61 // Double value stored in a FPU register.
michael@0 62 //
michael@0 63 // FLOAT32_REG [FPU_REG]
michael@0 64 // Float 32bit value stored in a FPU register.
michael@0 65 //
michael@0 66 // FLOAT32_STACK [STACK_OFFSET]
michael@0 67 // Float 32bit value stored on the stack.
michael@0 68 //
michael@0 69 // UNTYPED_REG [GPR_REG]
michael@0 70 // UNTYPED_STACK [STACK_OFFSET]
michael@0 71 // UNTYPED_REG_REG [GPR_REG, GPR_REG]
michael@0 72 // UNTYPED_REG_STACK [GPR_REG, STACK_OFFSET]
michael@0 73 // UNTYPED_STACK_REG [STACK_OFFSET, GPR_REG]
michael@0 74 // UNTYPED_STACK_STACK [STACK_OFFSET, STACK_OFFSET]
michael@0 75 // Value with dynamically known type. On 32 bits architecture, the
michael@0 76 // first register/stack-offset correspond to the holder of the type,
michael@0 77 // and the second correspond to the payload of the JS Value.
michael@0 78 //
michael@0 79 // TYPED_REG [PACKED_TAG, GPR_REG]:
michael@0 80 // Value with statically known type, which payload is stored in a
michael@0 81 // register.
michael@0 82 //
michael@0 83 // TYPED_STACK [PACKED_TAG, STACK_OFFSET]:
michael@0 84 // Value with statically known type, which payload is stored at an
michael@0 85 // offset on the stack.
michael@0 86 //
michael@0 87 // Encodings:
michael@0 88 // [ptr] A fixed-size pointer.
michael@0 89 // [vwu] A variable-width unsigned integer.
michael@0 90 // [vws] A variable-width signed integer.
michael@0 91 // [u8] An 8-bit unsigned integer.
michael@0 92 // [u8'] An 8-bit unsigned integer which is potentially extended with packed
michael@0 93 // data.
michael@0 94 // [u8"] Packed data which is stored and packed in the previous [u8'].
michael@0 95 // [vwu*] A list of variable-width unsigned integers.
michael@0 96 // [pld] Payload of Recover Value Allocation:
michael@0 97 // PAYLOAD_NONE:
michael@0 98 // There is no payload.
michael@0 99 //
michael@0 100 // PAYLOAD_INDEX:
michael@0 101 // [vwu] Index, such as the constant pool index.
michael@0 102 //
michael@0 103 // PAYLOAD_STACK_OFFSET:
michael@0 104 // [vws] Stack offset based on the base of the Ion frame.
michael@0 105 //
michael@0 106 // PAYLOAD_GPR:
michael@0 107 // [u8] Code of the general register.
michael@0 108 //
michael@0 109 // PAYLOAD_FPU:
michael@0 110 // [u8] Code of the FPU register.
michael@0 111 //
michael@0 112 // PAYLOAD_PACKED_TAG:
michael@0 113 // [u8"] Bits 5-7: JSValueType is encoded on the low bits of the Mode
michael@0 114 // of the RValueAllocation.
michael@0 115 //
michael@0 116
michael@0 117 const RValueAllocation::Layout &
michael@0 118 RValueAllocation::layoutFromMode(Mode mode)
michael@0 119 {
michael@0 120 switch (mode) {
michael@0 121 case CONSTANT: {
michael@0 122 static const RValueAllocation::Layout layout = {
michael@0 123 PAYLOAD_INDEX,
michael@0 124 PAYLOAD_NONE,
michael@0 125 "constant"
michael@0 126 };
michael@0 127 return layout;
michael@0 128 }
michael@0 129
michael@0 130 case CST_UNDEFINED: {
michael@0 131 static const RValueAllocation::Layout layout = {
michael@0 132 PAYLOAD_NONE,
michael@0 133 PAYLOAD_NONE,
michael@0 134 "undefined"
michael@0 135 };
michael@0 136 return layout;
michael@0 137 }
michael@0 138
michael@0 139 case CST_NULL: {
michael@0 140 static const RValueAllocation::Layout layout = {
michael@0 141 PAYLOAD_NONE,
michael@0 142 PAYLOAD_NONE,
michael@0 143 "null"
michael@0 144 };
michael@0 145 return layout;
michael@0 146 }
michael@0 147
michael@0 148 case DOUBLE_REG: {
michael@0 149 static const RValueAllocation::Layout layout = {
michael@0 150 PAYLOAD_FPU,
michael@0 151 PAYLOAD_NONE,
michael@0 152 "double"
michael@0 153 };
michael@0 154 return layout;
michael@0 155 }
michael@0 156 case FLOAT32_REG: {
michael@0 157 static const RValueAllocation::Layout layout = {
michael@0 158 PAYLOAD_FPU,
michael@0 159 PAYLOAD_NONE,
michael@0 160 "float32"
michael@0 161 };
michael@0 162 return layout;
michael@0 163 }
michael@0 164 case FLOAT32_STACK: {
michael@0 165 static const RValueAllocation::Layout layout = {
michael@0 166 PAYLOAD_STACK_OFFSET,
michael@0 167 PAYLOAD_NONE,
michael@0 168 "float32"
michael@0 169 };
michael@0 170 return layout;
michael@0 171 }
michael@0 172 #if defined(JS_NUNBOX32)
michael@0 173 case UNTYPED_REG_REG: {
michael@0 174 static const RValueAllocation::Layout layout = {
michael@0 175 PAYLOAD_GPR,
michael@0 176 PAYLOAD_GPR,
michael@0 177 "value"
michael@0 178 };
michael@0 179 return layout;
michael@0 180 }
michael@0 181 case UNTYPED_REG_STACK: {
michael@0 182 static const RValueAllocation::Layout layout = {
michael@0 183 PAYLOAD_GPR,
michael@0 184 PAYLOAD_STACK_OFFSET,
michael@0 185 "value"
michael@0 186 };
michael@0 187 return layout;
michael@0 188 }
michael@0 189 case UNTYPED_STACK_REG: {
michael@0 190 static const RValueAllocation::Layout layout = {
michael@0 191 PAYLOAD_STACK_OFFSET,
michael@0 192 PAYLOAD_GPR
michael@0 193 };
michael@0 194 return layout;
michael@0 195 }
michael@0 196 case UNTYPED_STACK_STACK: {
michael@0 197 static const RValueAllocation::Layout layout = {
michael@0 198 PAYLOAD_STACK_OFFSET,
michael@0 199 PAYLOAD_STACK_OFFSET,
michael@0 200 "value"
michael@0 201 };
michael@0 202 return layout;
michael@0 203 }
michael@0 204 #elif defined(JS_PUNBOX64)
michael@0 205 case UNTYPED_REG: {
michael@0 206 static const RValueAllocation::Layout layout = {
michael@0 207 PAYLOAD_GPR,
michael@0 208 PAYLOAD_NONE,
michael@0 209 "value"
michael@0 210 };
michael@0 211 return layout;
michael@0 212 }
michael@0 213 case UNTYPED_STACK: {
michael@0 214 static const RValueAllocation::Layout layout = {
michael@0 215 PAYLOAD_STACK_OFFSET,
michael@0 216 PAYLOAD_NONE,
michael@0 217 "value"
michael@0 218 };
michael@0 219 return layout;
michael@0 220 }
michael@0 221 #endif
michael@0 222 default: {
michael@0 223 static const RValueAllocation::Layout regLayout = {
michael@0 224 PAYLOAD_PACKED_TAG,
michael@0 225 PAYLOAD_GPR,
michael@0 226 "typed value"
michael@0 227 };
michael@0 228
michael@0 229 static const RValueAllocation::Layout stackLayout = {
michael@0 230 PAYLOAD_PACKED_TAG,
michael@0 231 PAYLOAD_STACK_OFFSET,
michael@0 232 "typed value"
michael@0 233 };
michael@0 234
michael@0 235 if (mode >= TYPED_REG_MIN && mode <= TYPED_REG_MAX)
michael@0 236 return regLayout;
michael@0 237 if (mode >= TYPED_STACK_MIN && mode <= TYPED_STACK_MAX)
michael@0 238 return stackLayout;
michael@0 239 }
michael@0 240 }
michael@0 241
michael@0 242 MOZ_ASSUME_UNREACHABLE("Wrong mode type?");
michael@0 243 }
michael@0 244
michael@0 245 // Pad serialized RValueAllocations by a multiple of X bytes in the allocation
michael@0 246 // buffer. By padding serialized value allocations, we are building an
michael@0 247 // indexable table of elements of X bytes, and thus we can safely divide any
michael@0 248 // offset within the buffer by X to obtain an index.
michael@0 249 //
michael@0 250 // By padding, we are loosing space within the allocation buffer, but we
michael@0 251 // multiple by X the number of indexes that we can store on one byte in each
michael@0 252 // snapshots.
michael@0 253 //
michael@0 254 // Some value allocations are taking more than X bytes to be encoded, in which
michael@0 255 // case we will pad to a multiple of X, and we are wasting indexes. The choice
michael@0 256 // of X should be balanced between the wasted padding of serialized value
michael@0 257 // allocation, and the saving made in snapshot indexes.
michael@0 258 static const size_t ALLOCATION_TABLE_ALIGNMENT = 2; /* bytes */
michael@0 259
michael@0 260 void
michael@0 261 RValueAllocation::readPayload(CompactBufferReader &reader, PayloadType type,
michael@0 262 uint8_t *mode, Payload *p)
michael@0 263 {
michael@0 264 switch (type) {
michael@0 265 case PAYLOAD_NONE:
michael@0 266 break;
michael@0 267 case PAYLOAD_INDEX:
michael@0 268 p->index = reader.readUnsigned();
michael@0 269 break;
michael@0 270 case PAYLOAD_STACK_OFFSET:
michael@0 271 p->stackOffset = reader.readSigned();
michael@0 272 break;
michael@0 273 case PAYLOAD_GPR:
michael@0 274 p->gpr = Register::FromCode(reader.readByte());
michael@0 275 break;
michael@0 276 case PAYLOAD_FPU:
michael@0 277 p->fpu = FloatRegister::FromCode(reader.readByte());
michael@0 278 break;
michael@0 279 case PAYLOAD_PACKED_TAG:
michael@0 280 p->type = JSValueType(*mode & 0x07);
michael@0 281 *mode = *mode & ~0x07;
michael@0 282 break;
michael@0 283 }
michael@0 284 }
michael@0 285
michael@0 286 RValueAllocation
michael@0 287 RValueAllocation::read(CompactBufferReader &reader)
michael@0 288 {
michael@0 289 uint8_t mode = reader.readByte();
michael@0 290 const Layout &layout = layoutFromMode(Mode(mode));
michael@0 291 Payload arg1, arg2;
michael@0 292
michael@0 293 readPayload(reader, layout.type1, &mode, &arg1);
michael@0 294 readPayload(reader, layout.type2, &mode, &arg2);
michael@0 295 return RValueAllocation(Mode(mode), arg1, arg2);
michael@0 296 }
michael@0 297
michael@0 298 void
michael@0 299 RValueAllocation::writePayload(CompactBufferWriter &writer, PayloadType type,
michael@0 300 Payload p)
michael@0 301 {
michael@0 302 switch (type) {
michael@0 303 case PAYLOAD_NONE:
michael@0 304 break;
michael@0 305 case PAYLOAD_INDEX:
michael@0 306 writer.writeUnsigned(p.index);
michael@0 307 break;
michael@0 308 case PAYLOAD_STACK_OFFSET:
michael@0 309 writer.writeSigned(p.stackOffset);
michael@0 310 break;
michael@0 311 case PAYLOAD_GPR:
michael@0 312 static_assert(Registers::Total <= 0x100,
michael@0 313 "Not enough bytes to encode all registers.");
michael@0 314 writer.writeByte(p.gpr.code());
michael@0 315 break;
michael@0 316 case PAYLOAD_FPU:
michael@0 317 static_assert(FloatRegisters::Total <= 0x100,
michael@0 318 "Not enough bytes to encode all float registers.");
michael@0 319 writer.writeByte(p.fpu.code());
michael@0 320 break;
michael@0 321 case PAYLOAD_PACKED_TAG: {
michael@0 322 // This code assumes that the PACKED_TAG payload is following the
michael@0 323 // writeByte of the mode.
michael@0 324 MOZ_ASSERT(writer.length());
michael@0 325 uint8_t *mode = writer.buffer() + (writer.length() - 1);
michael@0 326 MOZ_ASSERT((*mode & 0x07) == 0 && (p.type & ~0x07) == 0);
michael@0 327 *mode = *mode | p.type;
michael@0 328 break;
michael@0 329 }
michael@0 330 }
michael@0 331 }
michael@0 332
michael@0 333 void
michael@0 334 RValueAllocation::writePadding(CompactBufferWriter &writer)
michael@0 335 {
michael@0 336 // Write 0x7f in all padding bytes.
michael@0 337 while (writer.length() % ALLOCATION_TABLE_ALIGNMENT)
michael@0 338 writer.writeByte(0x7f);
michael@0 339 }
michael@0 340
michael@0 341 void
michael@0 342 RValueAllocation::write(CompactBufferWriter &writer) const
michael@0 343 {
michael@0 344 const Layout &layout = layoutFromMode(mode());
michael@0 345 MOZ_ASSERT(layout.type2 != PAYLOAD_PACKED_TAG);
michael@0 346 MOZ_ASSERT(writer.length() % ALLOCATION_TABLE_ALIGNMENT == 0);
michael@0 347
michael@0 348 writer.writeByte(mode_);
michael@0 349 writePayload(writer, layout.type1, arg1_);
michael@0 350 writePayload(writer, layout.type2, arg2_);
michael@0 351 writePadding(writer);
michael@0 352 }
michael@0 353
michael@0 354 HashNumber
michael@0 355 RValueAllocation::hash() const {
michael@0 356 CompactBufferWriter writer;
michael@0 357 write(writer);
michael@0 358
michael@0 359 // We should never oom because the compact buffer writer has 32 inlined
michael@0 360 // bytes, and in the worse case scenario, only encode 12 bytes
michael@0 361 // (12 == mode + signed + signed + pad).
michael@0 362 MOZ_ASSERT(!writer.oom());
michael@0 363 MOZ_ASSERT(writer.length() <= 12);
michael@0 364
michael@0 365 HashNumber res = 0;
michael@0 366 for (size_t i = 0; i < writer.length(); i++) {
michael@0 367 res = ((res << 8) | (res >> (sizeof(res) - 1)));
michael@0 368 res ^= writer.buffer()[i];
michael@0 369 }
michael@0 370 return res;
michael@0 371 }
michael@0 372
michael@0 373 static const char *
michael@0 374 ValTypeToString(JSValueType type)
michael@0 375 {
michael@0 376 switch (type) {
michael@0 377 case JSVAL_TYPE_INT32:
michael@0 378 return "int32_t";
michael@0 379 case JSVAL_TYPE_DOUBLE:
michael@0 380 return "double";
michael@0 381 case JSVAL_TYPE_STRING:
michael@0 382 return "string";
michael@0 383 case JSVAL_TYPE_BOOLEAN:
michael@0 384 return "boolean";
michael@0 385 case JSVAL_TYPE_OBJECT:
michael@0 386 return "object";
michael@0 387 case JSVAL_TYPE_MAGIC:
michael@0 388 return "magic";
michael@0 389 default:
michael@0 390 MOZ_ASSUME_UNREACHABLE("no payload");
michael@0 391 }
michael@0 392 }
michael@0 393
michael@0 394 void
michael@0 395 RValueAllocation::dumpPayload(FILE *fp, PayloadType type, Payload p)
michael@0 396 {
michael@0 397 switch (type) {
michael@0 398 case PAYLOAD_NONE:
michael@0 399 break;
michael@0 400 case PAYLOAD_INDEX:
michael@0 401 fprintf(fp, "index %u", p.index);
michael@0 402 break;
michael@0 403 case PAYLOAD_STACK_OFFSET:
michael@0 404 fprintf(fp, "stack %d", p.stackOffset);
michael@0 405 break;
michael@0 406 case PAYLOAD_GPR:
michael@0 407 fprintf(fp, "reg %s", p.gpr.name());
michael@0 408 break;
michael@0 409 case PAYLOAD_FPU:
michael@0 410 fprintf(fp, "reg %s", p.fpu.name());
michael@0 411 break;
michael@0 412 case PAYLOAD_PACKED_TAG:
michael@0 413 fprintf(fp, "%s", ValTypeToString(p.type));
michael@0 414 break;
michael@0 415 }
michael@0 416 }
michael@0 417
michael@0 418 void
michael@0 419 RValueAllocation::dump(FILE *fp) const
michael@0 420 {
michael@0 421 const Layout &layout = layoutFromMode(mode());
michael@0 422 fprintf(fp, "%s", layout.name);
michael@0 423
michael@0 424 if (layout.type1 != PAYLOAD_NONE)
michael@0 425 fprintf(fp, " (");
michael@0 426 dumpPayload(fp, layout.type1, arg1_);
michael@0 427 if (layout.type2 != PAYLOAD_NONE)
michael@0 428 fprintf(fp, ", ");
michael@0 429 dumpPayload(fp, layout.type2, arg2_);
michael@0 430 if (layout.type1 != PAYLOAD_NONE)
michael@0 431 fprintf(fp, ")");
michael@0 432 }
michael@0 433
michael@0 434 bool
michael@0 435 RValueAllocation::equalPayloads(PayloadType type, Payload lhs, Payload rhs)
michael@0 436 {
michael@0 437 switch (type) {
michael@0 438 case PAYLOAD_NONE:
michael@0 439 return true;
michael@0 440 case PAYLOAD_INDEX:
michael@0 441 return lhs.index == rhs.index;
michael@0 442 case PAYLOAD_STACK_OFFSET:
michael@0 443 return lhs.stackOffset == rhs.stackOffset;
michael@0 444 case PAYLOAD_GPR:
michael@0 445 return lhs.gpr == rhs.gpr;
michael@0 446 case PAYLOAD_FPU:
michael@0 447 return lhs.fpu == rhs.fpu;
michael@0 448 case PAYLOAD_PACKED_TAG:
michael@0 449 return lhs.type == rhs.type;
michael@0 450 }
michael@0 451
michael@0 452 return false;
michael@0 453 }
michael@0 454
michael@0 455 SnapshotReader::SnapshotReader(const uint8_t *snapshots, uint32_t offset,
michael@0 456 uint32_t RVATableSize, uint32_t listSize)
michael@0 457 : reader_(snapshots + offset, snapshots + listSize),
michael@0 458 allocReader_(snapshots + listSize, snapshots + listSize + RVATableSize),
michael@0 459 allocTable_(snapshots + listSize),
michael@0 460 allocRead_(0)
michael@0 461 {
michael@0 462 if (!snapshots)
michael@0 463 return;
michael@0 464 IonSpew(IonSpew_Snapshots, "Creating snapshot reader");
michael@0 465 readSnapshotHeader();
michael@0 466 }
michael@0 467
michael@0 468 #define COMPUTE_SHIFT_AFTER_(name) (name ## _BITS + name ##_SHIFT)
michael@0 469 #define COMPUTE_MASK_(name) ((uint32_t(1 << name ## _BITS) - 1) << name ##_SHIFT)
michael@0 470
michael@0 471 // Details of snapshot header packing.
michael@0 472 static const uint32_t SNAPSHOT_BAILOUTKIND_SHIFT = 0;
michael@0 473 static const uint32_t SNAPSHOT_BAILOUTKIND_BITS = 3;
michael@0 474 static const uint32_t SNAPSHOT_BAILOUTKIND_MASK = COMPUTE_MASK_(SNAPSHOT_BAILOUTKIND);
michael@0 475
michael@0 476 static const uint32_t SNAPSHOT_ROFFSET_SHIFT = COMPUTE_SHIFT_AFTER_(SNAPSHOT_BAILOUTKIND);
michael@0 477 static const uint32_t SNAPSHOT_ROFFSET_BITS = 32 - SNAPSHOT_ROFFSET_SHIFT;
michael@0 478 static const uint32_t SNAPSHOT_ROFFSET_MASK = COMPUTE_MASK_(SNAPSHOT_ROFFSET);
michael@0 479
michael@0 480 // Details of recover header packing.
michael@0 481 static const uint32_t RECOVER_RESUMEAFTER_SHIFT = 0;
michael@0 482 static const uint32_t RECOVER_RESUMEAFTER_BITS = 1;
michael@0 483 static const uint32_t RECOVER_RESUMEAFTER_MASK = COMPUTE_MASK_(RECOVER_RESUMEAFTER);
michael@0 484
michael@0 485 static const uint32_t RECOVER_RINSCOUNT_SHIFT = COMPUTE_SHIFT_AFTER_(RECOVER_RESUMEAFTER);
michael@0 486 static const uint32_t RECOVER_RINSCOUNT_BITS = 32 - RECOVER_RINSCOUNT_SHIFT;
michael@0 487 static const uint32_t RECOVER_RINSCOUNT_MASK = COMPUTE_MASK_(RECOVER_RINSCOUNT);
michael@0 488
michael@0 489 #undef COMPUTE_MASK_
michael@0 490 #undef COMPUTE_SHIFT_AFTER_
michael@0 491
michael@0 492 void
michael@0 493 SnapshotReader::readSnapshotHeader()
michael@0 494 {
michael@0 495 uint32_t bits = reader_.readUnsigned();
michael@0 496
michael@0 497 bailoutKind_ = BailoutKind((bits & SNAPSHOT_BAILOUTKIND_MASK) >> SNAPSHOT_BAILOUTKIND_SHIFT);
michael@0 498 recoverOffset_ = (bits & SNAPSHOT_ROFFSET_MASK) >> SNAPSHOT_ROFFSET_SHIFT;
michael@0 499
michael@0 500 IonSpew(IonSpew_Snapshots, "Read snapshot header with bailout kind %u",
michael@0 501 bailoutKind_);
michael@0 502
michael@0 503 #ifdef TRACK_SNAPSHOTS
michael@0 504 readTrackSnapshot();
michael@0 505 #endif
michael@0 506 }
michael@0 507
michael@0 508 #ifdef TRACK_SNAPSHOTS
michael@0 509 void
michael@0 510 SnapshotReader::readTrackSnapshot()
michael@0 511 {
michael@0 512 pcOpcode_ = reader_.readUnsigned();
michael@0 513 mirOpcode_ = reader_.readUnsigned();
michael@0 514 mirId_ = reader_.readUnsigned();
michael@0 515 lirOpcode_ = reader_.readUnsigned();
michael@0 516 lirId_ = reader_.readUnsigned();
michael@0 517 }
michael@0 518
michael@0 519 void
michael@0 520 SnapshotReader::spewBailingFrom() const
michael@0 521 {
michael@0 522 if (IonSpewEnabled(IonSpew_Bailouts)) {
michael@0 523 IonSpewHeader(IonSpew_Bailouts);
michael@0 524 fprintf(IonSpewFile, " bailing from bytecode: %s, MIR: ", js_CodeName[pcOpcode_]);
michael@0 525 MDefinition::PrintOpcodeName(IonSpewFile, MDefinition::Opcode(mirOpcode_));
michael@0 526 fprintf(IonSpewFile, " [%u], LIR: ", mirId_);
michael@0 527 LInstruction::printName(IonSpewFile, LInstruction::Opcode(lirOpcode_));
michael@0 528 fprintf(IonSpewFile, " [%u]", lirId_);
michael@0 529 fprintf(IonSpewFile, "\n");
michael@0 530 }
michael@0 531 }
michael@0 532 #endif
michael@0 533
michael@0 534 uint32_t
michael@0 535 SnapshotReader::readAllocationIndex()
michael@0 536 {
michael@0 537 allocRead_++;
michael@0 538 return reader_.readUnsigned();
michael@0 539 }
michael@0 540
michael@0 541 RValueAllocation
michael@0 542 SnapshotReader::readAllocation()
michael@0 543 {
michael@0 544 IonSpew(IonSpew_Snapshots, "Reading slot %u", allocRead_);
michael@0 545 uint32_t offset = readAllocationIndex() * ALLOCATION_TABLE_ALIGNMENT;
michael@0 546 allocReader_.seek(allocTable_, offset);
michael@0 547 return RValueAllocation::read(allocReader_);
michael@0 548 }
michael@0 549
michael@0 550 bool
michael@0 551 SnapshotWriter::init()
michael@0 552 {
michael@0 553 // Based on the measurements made in Bug 962555 comment 20, this should be
michael@0 554 // enough to prevent the reallocation of the hash table for at least half of
michael@0 555 // the compilations.
michael@0 556 return allocMap_.init(32);
michael@0 557 }
michael@0 558
michael@0 559 RecoverReader::RecoverReader(SnapshotReader &snapshot, const uint8_t *recovers, uint32_t size)
michael@0 560 : reader_(nullptr, nullptr),
michael@0 561 numInstructions_(0),
michael@0 562 numInstructionsRead_(0)
michael@0 563 {
michael@0 564 if (!recovers)
michael@0 565 return;
michael@0 566 reader_ = CompactBufferReader(recovers + snapshot.recoverOffset(), recovers + size);
michael@0 567 readRecoverHeader();
michael@0 568 readInstruction();
michael@0 569 }
michael@0 570
michael@0 571 void
michael@0 572 RecoverReader::readRecoverHeader()
michael@0 573 {
michael@0 574 uint32_t bits = reader_.readUnsigned();
michael@0 575
michael@0 576 numInstructions_ = (bits & RECOVER_RINSCOUNT_MASK) >> RECOVER_RINSCOUNT_SHIFT;
michael@0 577 resumeAfter_ = (bits & RECOVER_RESUMEAFTER_MASK) >> RECOVER_RESUMEAFTER_SHIFT;
michael@0 578 MOZ_ASSERT(numInstructions_);
michael@0 579
michael@0 580 IonSpew(IonSpew_Snapshots, "Read recover header with instructionCount %u (ra: %d)",
michael@0 581 numInstructions_, resumeAfter_);
michael@0 582 }
michael@0 583
michael@0 584 void
michael@0 585 RecoverReader::readInstruction()
michael@0 586 {
michael@0 587 MOZ_ASSERT(moreInstructions());
michael@0 588 RInstruction::readRecoverData(reader_, &rawData_);
michael@0 589 numInstructionsRead_++;
michael@0 590 }
michael@0 591
michael@0 592 SnapshotOffset
michael@0 593 SnapshotWriter::startSnapshot(RecoverOffset recoverOffset, BailoutKind kind)
michael@0 594 {
michael@0 595 lastStart_ = writer_.length();
michael@0 596 allocWritten_ = 0;
michael@0 597
michael@0 598 IonSpew(IonSpew_Snapshots, "starting snapshot with recover offset %u, bailout kind %u",
michael@0 599 recoverOffset, kind);
michael@0 600
michael@0 601 JS_ASSERT(uint32_t(kind) < (1 << SNAPSHOT_BAILOUTKIND_BITS));
michael@0 602 JS_ASSERT(recoverOffset < (1 << SNAPSHOT_ROFFSET_BITS));
michael@0 603 uint32_t bits =
michael@0 604 (uint32_t(kind) << SNAPSHOT_BAILOUTKIND_SHIFT) |
michael@0 605 (recoverOffset << SNAPSHOT_ROFFSET_SHIFT);
michael@0 606
michael@0 607 writer_.writeUnsigned(bits);
michael@0 608 return lastStart_;
michael@0 609 }
michael@0 610
michael@0 611 #ifdef TRACK_SNAPSHOTS
michael@0 612 void
michael@0 613 SnapshotWriter::trackSnapshot(uint32_t pcOpcode, uint32_t mirOpcode, uint32_t mirId,
michael@0 614 uint32_t lirOpcode, uint32_t lirId)
michael@0 615 {
michael@0 616 writer_.writeUnsigned(pcOpcode);
michael@0 617 writer_.writeUnsigned(mirOpcode);
michael@0 618 writer_.writeUnsigned(mirId);
michael@0 619 writer_.writeUnsigned(lirOpcode);
michael@0 620 writer_.writeUnsigned(lirId);
michael@0 621 }
michael@0 622 #endif
michael@0 623
michael@0 624 bool
michael@0 625 SnapshotWriter::add(const RValueAllocation &alloc)
michael@0 626 {
michael@0 627 MOZ_ASSERT(allocMap_.initialized());
michael@0 628
michael@0 629 uint32_t offset;
michael@0 630 RValueAllocMap::AddPtr p = allocMap_.lookupForAdd(alloc);
michael@0 631 if (!p) {
michael@0 632 offset = allocWriter_.length();
michael@0 633 alloc.write(allocWriter_);
michael@0 634 if (!allocMap_.add(p, alloc, offset))
michael@0 635 return false;
michael@0 636 } else {
michael@0 637 offset = p->value();
michael@0 638 }
michael@0 639
michael@0 640 if (IonSpewEnabled(IonSpew_Snapshots)) {
michael@0 641 IonSpewHeader(IonSpew_Snapshots);
michael@0 642 fprintf(IonSpewFile, " slot %u (%d): ", allocWritten_, offset);
michael@0 643 alloc.dump(IonSpewFile);
michael@0 644 fprintf(IonSpewFile, "\n");
michael@0 645 }
michael@0 646
michael@0 647 allocWritten_++;
michael@0 648 writer_.writeUnsigned(offset / ALLOCATION_TABLE_ALIGNMENT);
michael@0 649 return true;
michael@0 650 }
michael@0 651
michael@0 652 void
michael@0 653 SnapshotWriter::endSnapshot()
michael@0 654 {
michael@0 655 // Place a sentinel for asserting on the other end.
michael@0 656 #ifdef DEBUG
michael@0 657 writer_.writeSigned(-1);
michael@0 658 #endif
michael@0 659
michael@0 660 IonSpew(IonSpew_Snapshots, "ending snapshot total size: %u bytes (start %u)",
michael@0 661 uint32_t(writer_.length() - lastStart_), lastStart_);
michael@0 662 }
michael@0 663
michael@0 664 RecoverOffset
michael@0 665 RecoverWriter::startRecover(uint32_t frameCount, bool resumeAfter)
michael@0 666 {
michael@0 667 MOZ_ASSERT(frameCount);
michael@0 668 nframes_ = frameCount;
michael@0 669 framesWritten_ = 0;
michael@0 670
michael@0 671 IonSpew(IonSpew_Snapshots, "starting recover with frameCount %u",
michael@0 672 frameCount);
michael@0 673
michael@0 674 MOZ_ASSERT(!(uint32_t(resumeAfter) &~ RECOVER_RESUMEAFTER_MASK));
michael@0 675 MOZ_ASSERT(frameCount < uint32_t(1 << RECOVER_RINSCOUNT_BITS));
michael@0 676 uint32_t bits =
michael@0 677 (uint32_t(resumeAfter) << RECOVER_RESUMEAFTER_SHIFT) |
michael@0 678 (frameCount << RECOVER_RINSCOUNT_SHIFT);
michael@0 679
michael@0 680 RecoverOffset recoverOffset = writer_.length();
michael@0 681 writer_.writeUnsigned(bits);
michael@0 682 return recoverOffset;
michael@0 683 }
michael@0 684
michael@0 685 bool
michael@0 686 RecoverWriter::writeFrame(const MResumePoint *rp)
michael@0 687 {
michael@0 688 if (!rp->writeRecoverData(writer_))
michael@0 689 return false;
michael@0 690 framesWritten_++;
michael@0 691 return true;
michael@0 692 }
michael@0 693
michael@0 694 void
michael@0 695 RecoverWriter::endRecover()
michael@0 696 {
michael@0 697 JS_ASSERT(nframes_ == framesWritten_);
michael@0 698 }

mercurial