1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/js/src/jsanalyze.cpp Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,2399 @@ 1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99: 1.6 + * This Source Code Form is subject to the terms of the Mozilla Public 1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this 1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 1.9 + 1.10 +#include "mozilla/DebugOnly.h" 1.11 +#include "mozilla/MathAlgorithms.h" 1.12 +#include "mozilla/PodOperations.h" 1.13 + 1.14 +#include "jscntxt.h" 1.15 +#include "jscompartment.h" 1.16 + 1.17 +#include "vm/Opcodes.h" 1.18 + 1.19 +#include "jsinferinlines.h" 1.20 +#include "jsobjinlines.h" 1.21 +#include "jsopcodeinlines.h" 1.22 + 1.23 +using mozilla::DebugOnly; 1.24 +using mozilla::PodCopy; 1.25 +using mozilla::PodZero; 1.26 +using mozilla::FloorLog2; 1.27 + 1.28 +namespace js { 1.29 +namespace analyze { 1.30 +class LoopAnalysis; 1.31 +} // namespace analyze 1.32 +} // namespace js 1.33 + 1.34 +namespace mozilla { 1.35 + 1.36 +template <> struct IsPod<js::analyze::LifetimeVariable> : TrueType {}; 1.37 +template <> struct IsPod<js::analyze::LoopAnalysis> : TrueType {}; 1.38 +template <> struct IsPod<js::analyze::SlotValue> : TrueType {}; 1.39 +template <> struct IsPod<js::analyze::SSAValue> : TrueType {}; 1.40 +template <> struct IsPod<js::analyze::SSAUseChain> : TrueType {}; 1.41 + 1.42 +} /* namespace mozilla */ 1.43 + 1.44 +namespace js { 1.45 +namespace analyze { 1.46 + 1.47 +/* 1.48 + * There are three analyses we can perform on a JSScript, outlined below. 1.49 + * The results of all three are stored in ScriptAnalysis, but the analyses 1.50 + * themselves can be performed separately. Along with type inference results, 1.51 + * per-script analysis results are tied to the per-compartment analysis pool 1.52 + * and are freed on every garbage collection. 1.53 + * 1.54 + * - Basic bytecode analysis. For each bytecode, determine the stack depth at 1.55 + * that point and control flow information needed for compilation. Also does 1.56 + * a defined-variables analysis to look for local variables which have uses 1.57 + * before definitions. 1.58 + * 1.59 + * - Lifetime analysis. Makes a backwards pass over the script to approximate 1.60 + * the regions where each variable is live, avoiding a full fixpointing 1.61 + * live-variables pass. This is based on the algorithm described in: 1.62 + * 1.63 + * "Quality and Speed in Linear-scan Register Allocation" 1.64 + * Traub et. al. 1.65 + * PLDI, 1998 1.66 + * 1.67 + * - SSA analysis of the script's variables and stack values. For each stack 1.68 + * value popped and non-escaping local variable or argument read, determines 1.69 + * which push(es) or write(s) produced that value. 1.70 + * 1.71 + * Intermediate type inference results are additionally stored here. The above 1.72 + * analyses are independent from type inference. 1.73 + */ 1.74 + 1.75 +/* Information about a bytecode instruction. */ 1.76 +class Bytecode 1.77 +{ 1.78 + friend class ScriptAnalysis; 1.79 + 1.80 + public: 1.81 + Bytecode() { mozilla::PodZero(this); } 1.82 + 1.83 + /* --------- Bytecode analysis --------- */ 1.84 + 1.85 + /* Whether there are any incoming jumps to this instruction. */ 1.86 + bool jumpTarget : 1; 1.87 + 1.88 + /* Whether there is fallthrough to this instruction from a non-branching instruction. */ 1.89 + bool fallthrough : 1; 1.90 + 1.91 + /* Whether this instruction is the fall through point of a conditional jump. */ 1.92 + bool jumpFallthrough : 1; 1.93 + 1.94 + /* 1.95 + * Whether this instruction must always execute, unless the script throws 1.96 + * an exception which it does not later catch. 1.97 + */ 1.98 + bool unconditional : 1; 1.99 + 1.100 + /* Whether this instruction has been analyzed to get its output defines and stack. */ 1.101 + bool analyzed : 1; 1.102 + 1.103 + /* Whether this is a catch/finally entry point. */ 1.104 + bool exceptionEntry : 1; 1.105 + 1.106 + /* Stack depth before this opcode. */ 1.107 + uint32_t stackDepth; 1.108 + 1.109 + private: 1.110 + 1.111 + /* If this is a JSOP_LOOPHEAD or JSOP_LOOPENTRY, information about the loop. */ 1.112 + LoopAnalysis *loop; 1.113 + 1.114 + /* --------- SSA analysis --------- */ 1.115 + 1.116 + /* Generated location of each value popped by this bytecode. */ 1.117 + SSAValue *poppedValues; 1.118 + 1.119 + /* Points where values pushed or written by this bytecode are popped. */ 1.120 + SSAUseChain **pushedUses; 1.121 + 1.122 + union { 1.123 + /* 1.124 + * If this is a join point (implies jumpTarget), any slots at this 1.125 + * point which can have a different values than at the immediate 1.126 + * predecessor in the bytecode. Array is terminated by an entry with 1.127 + * a zero slot. 1.128 + */ 1.129 + SlotValue *newValues; 1.130 + 1.131 + /* 1.132 + * Vector used during SSA analysis to store values in need of merging 1.133 + * at this point. If this has incoming forward jumps and we have not 1.134 + * yet reached this point, stores values for entries on the stack and 1.135 + * for variables which have changed since the branch. If this is a loop 1.136 + * head and we haven't reached the back edge yet, stores loop phi nodes 1.137 + * for variables and entries live at the head of the loop. 1.138 + */ 1.139 + Vector<SlotValue> *pendingValues; 1.140 + }; 1.141 +}; 1.142 + 1.143 +/* 1.144 + * Information about the lifetime of a local or argument. These form a linked 1.145 + * list describing successive intervals in the program where the variable's 1.146 + * value may be live. At points in the script not in one of these segments 1.147 + * (points in a 'lifetime hole'), the variable is dead and registers containing 1.148 + * its type/payload can be discarded without needing to be synced. 1.149 + */ 1.150 +struct Lifetime 1.151 +{ 1.152 + /* 1.153 + * Start and end offsets of this lifetime. The variable is live at the 1.154 + * beginning of every bytecode in this (inclusive) range. 1.155 + */ 1.156 + uint32_t start; 1.157 + uint32_t end; 1.158 + 1.159 + /* 1.160 + * In a loop body, endpoint to extend this lifetime with if the variable is 1.161 + * live in the next iteration. 1.162 + */ 1.163 + uint32_t savedEnd; 1.164 + 1.165 + /* 1.166 + * The start of this lifetime is a bytecode writing the variable. Each 1.167 + * write to a variable is associated with a lifetime. 1.168 + */ 1.169 + bool write; 1.170 + 1.171 + /* Next lifetime. The variable is dead from this->end to next->start. */ 1.172 + Lifetime *next; 1.173 + 1.174 + Lifetime(uint32_t offset, uint32_t savedEnd, Lifetime *next) 1.175 + : start(offset), end(offset), savedEnd(savedEnd), 1.176 + write(false), next(next) 1.177 + {} 1.178 +}; 1.179 + 1.180 +/* Basic information for a loop. */ 1.181 +class LoopAnalysis 1.182 +{ 1.183 + public: 1.184 + /* Any loop this one is nested in. */ 1.185 + LoopAnalysis *parent; 1.186 + 1.187 + /* Offset of the head of the loop. */ 1.188 + uint32_t head; 1.189 + 1.190 + /* 1.191 + * Offset of the unique jump going to the head of the loop. The code 1.192 + * between the head and the backedge forms the loop body. 1.193 + */ 1.194 + uint32_t backedge; 1.195 +}; 1.196 + 1.197 +/* Current lifetime information for a variable. */ 1.198 +struct LifetimeVariable 1.199 +{ 1.200 + /* If the variable is currently live, the lifetime segment. */ 1.201 + Lifetime *lifetime; 1.202 + 1.203 + /* If the variable is currently dead, the next live segment. */ 1.204 + Lifetime *saved; 1.205 + 1.206 + /* Jump preceding the basic block which killed this variable. */ 1.207 + uint32_t savedEnd : 31; 1.208 + 1.209 + /* If the variable needs to be kept alive until lifetime->start. */ 1.210 + bool ensured : 1; 1.211 + 1.212 + /* Whether this variable is live at offset. */ 1.213 + Lifetime * live(uint32_t offset) const { 1.214 + if (lifetime && lifetime->end >= offset) 1.215 + return lifetime; 1.216 + Lifetime *segment = lifetime ? lifetime : saved; 1.217 + while (segment && segment->start <= offset) { 1.218 + if (segment->end >= offset) 1.219 + return segment; 1.220 + segment = segment->next; 1.221 + } 1.222 + return nullptr; 1.223 + } 1.224 + 1.225 + /* 1.226 + * Get the offset of the first write to the variable in an inclusive range, 1.227 + * UINT32_MAX if the variable is not written in the range. 1.228 + */ 1.229 + uint32_t firstWrite(uint32_t start, uint32_t end) const { 1.230 + Lifetime *segment = lifetime ? lifetime : saved; 1.231 + while (segment && segment->start <= end) { 1.232 + if (segment->start >= start && segment->write) 1.233 + return segment->start; 1.234 + segment = segment->next; 1.235 + } 1.236 + return UINT32_MAX; 1.237 + } 1.238 + uint32_t firstWrite(LoopAnalysis *loop) const { 1.239 + return firstWrite(loop->head, loop->backedge); 1.240 + } 1.241 + 1.242 + /* 1.243 + * If the variable is only written once in the body of a loop, offset of 1.244 + * that write. UINT32_MAX otherwise. 1.245 + */ 1.246 + uint32_t onlyWrite(LoopAnalysis *loop) const { 1.247 + uint32_t offset = UINT32_MAX; 1.248 + Lifetime *segment = lifetime ? lifetime : saved; 1.249 + while (segment && segment->start <= loop->backedge) { 1.250 + if (segment->start >= loop->head && segment->write) { 1.251 + if (offset != UINT32_MAX) 1.252 + return UINT32_MAX; 1.253 + offset = segment->start; 1.254 + } 1.255 + segment = segment->next; 1.256 + } 1.257 + return offset; 1.258 + } 1.259 + 1.260 +#ifdef DEBUG 1.261 + void print() const; 1.262 +#endif 1.263 +}; 1.264 + 1.265 +struct SSAPhiNode; 1.266 + 1.267 +/* 1.268 + * Representation of values on stack or in slots at each point in the script. 1.269 + * Values are independent from the bytecode position, and mean the same thing 1.270 + * everywhere in the script. SSA values are immutable, except for contents of 1.271 + * the values and types in an SSAPhiNode. 1.272 + */ 1.273 +class SSAValue 1.274 +{ 1.275 + friend class ScriptAnalysis; 1.276 + 1.277 + public: 1.278 + enum Kind { 1.279 + EMPTY = 0, /* Invalid entry. */ 1.280 + PUSHED = 1, /* Value pushed by some bytecode. */ 1.281 + VAR = 2, /* Initial or written value to some argument or local. */ 1.282 + PHI = 3 /* Selector for one of several values. */ 1.283 + }; 1.284 + 1.285 + Kind kind() const { 1.286 + JS_ASSERT(u.pushed.kind == u.var.kind && u.pushed.kind == u.phi.kind); 1.287 + 1.288 + /* Use a bitmask because MSVC wants to use -1 for PHI nodes. */ 1.289 + return (Kind) (u.pushed.kind & 0x3); 1.290 + } 1.291 + 1.292 + bool operator==(const SSAValue &o) const { 1.293 + return !memcmp(this, &o, sizeof(SSAValue)); 1.294 + } 1.295 + 1.296 + /* Accessors for values pushed by a bytecode within this script. */ 1.297 + 1.298 + uint32_t pushedOffset() const { 1.299 + JS_ASSERT(kind() == PUSHED); 1.300 + return u.pushed.offset; 1.301 + } 1.302 + 1.303 + uint32_t pushedIndex() const { 1.304 + JS_ASSERT(kind() == PUSHED); 1.305 + return u.pushed.index; 1.306 + } 1.307 + 1.308 + /* Accessors for initial and written values of arguments and (undefined) locals. */ 1.309 + 1.310 + bool varInitial() const { 1.311 + JS_ASSERT(kind() == VAR); 1.312 + return u.var.initial; 1.313 + } 1.314 + 1.315 + uint32_t varSlot() const { 1.316 + JS_ASSERT(kind() == VAR); 1.317 + return u.var.slot; 1.318 + } 1.319 + 1.320 + uint32_t varOffset() const { 1.321 + JS_ASSERT(!varInitial()); 1.322 + return u.var.offset; 1.323 + } 1.324 + 1.325 + /* Accessors for phi nodes. */ 1.326 + 1.327 + uint32_t phiSlot() const; 1.328 + uint32_t phiLength() const; 1.329 + const SSAValue &phiValue(uint32_t i) const; 1.330 + 1.331 + /* Offset at which this phi node was created. */ 1.332 + uint32_t phiOffset() const { 1.333 + JS_ASSERT(kind() == PHI); 1.334 + return u.phi.offset; 1.335 + } 1.336 + 1.337 + SSAPhiNode *phiNode() const { 1.338 + JS_ASSERT(kind() == PHI); 1.339 + return u.phi.node; 1.340 + } 1.341 + 1.342 + /* Other accessors. */ 1.343 + 1.344 +#ifdef DEBUG 1.345 + void print() const; 1.346 +#endif 1.347 + 1.348 + void clear() { 1.349 + mozilla::PodZero(this); 1.350 + JS_ASSERT(kind() == EMPTY); 1.351 + } 1.352 + 1.353 + void initPushed(uint32_t offset, uint32_t index) { 1.354 + clear(); 1.355 + u.pushed.kind = PUSHED; 1.356 + u.pushed.offset = offset; 1.357 + u.pushed.index = index; 1.358 + } 1.359 + 1.360 + static SSAValue PushedValue(uint32_t offset, uint32_t index) { 1.361 + SSAValue v; 1.362 + v.initPushed(offset, index); 1.363 + return v; 1.364 + } 1.365 + 1.366 + void initInitial(uint32_t slot) { 1.367 + clear(); 1.368 + u.var.kind = VAR; 1.369 + u.var.initial = true; 1.370 + u.var.slot = slot; 1.371 + } 1.372 + 1.373 + void initWritten(uint32_t slot, uint32_t offset) { 1.374 + clear(); 1.375 + u.var.kind = VAR; 1.376 + u.var.initial = false; 1.377 + u.var.slot = slot; 1.378 + u.var.offset = offset; 1.379 + } 1.380 + 1.381 + static SSAValue WrittenVar(uint32_t slot, uint32_t offset) { 1.382 + SSAValue v; 1.383 + v.initWritten(slot, offset); 1.384 + return v; 1.385 + } 1.386 + 1.387 + void initPhi(uint32_t offset, SSAPhiNode *node) { 1.388 + clear(); 1.389 + u.phi.kind = PHI; 1.390 + u.phi.offset = offset; 1.391 + u.phi.node = node; 1.392 + } 1.393 + 1.394 + static SSAValue PhiValue(uint32_t offset, SSAPhiNode *node) { 1.395 + SSAValue v; 1.396 + v.initPhi(offset, node); 1.397 + return v; 1.398 + } 1.399 + 1.400 + private: 1.401 + union { 1.402 + struct { 1.403 + Kind kind : 2; 1.404 + uint32_t offset : 30; 1.405 + uint32_t index; 1.406 + } pushed; 1.407 + struct { 1.408 + Kind kind : 2; 1.409 + bool initial : 1; 1.410 + uint32_t slot : 29; 1.411 + uint32_t offset; 1.412 + } var; 1.413 + struct { 1.414 + Kind kind : 2; 1.415 + uint32_t offset : 30; 1.416 + SSAPhiNode *node; 1.417 + } phi; 1.418 + } u; 1.419 +}; 1.420 + 1.421 +/* 1.422 + * Mutable component of a phi node, with the possible values of the phi 1.423 + * and the possible types of the node as determined by type inference. 1.424 + * When phi nodes are copied around, any updates to the original will 1.425 + * be seen by all copies made. 1.426 + */ 1.427 +struct SSAPhiNode 1.428 +{ 1.429 + uint32_t slot; 1.430 + uint32_t length; 1.431 + SSAValue *options; 1.432 + SSAUseChain *uses; 1.433 + SSAPhiNode() { mozilla::PodZero(this); } 1.434 +}; 1.435 + 1.436 +inline uint32_t 1.437 +SSAValue::phiSlot() const 1.438 +{ 1.439 + return u.phi.node->slot; 1.440 +} 1.441 + 1.442 +inline uint32_t 1.443 +SSAValue::phiLength() const 1.444 +{ 1.445 + JS_ASSERT(kind() == PHI); 1.446 + return u.phi.node->length; 1.447 +} 1.448 + 1.449 +inline const SSAValue & 1.450 +SSAValue::phiValue(uint32_t i) const 1.451 +{ 1.452 + JS_ASSERT(kind() == PHI && i < phiLength()); 1.453 + return u.phi.node->options[i]; 1.454 +} 1.455 + 1.456 +class SSAUseChain 1.457 +{ 1.458 + public: 1.459 + bool popped : 1; 1.460 + uint32_t offset : 31; 1.461 + /* FIXME: Assert that only the proper arm of this union is accessed. */ 1.462 + union { 1.463 + uint32_t which; 1.464 + SSAPhiNode *phi; 1.465 + } u; 1.466 + SSAUseChain *next; 1.467 + 1.468 + SSAUseChain() { mozilla::PodZero(this); } 1.469 +}; 1.470 + 1.471 +class SlotValue 1.472 +{ 1.473 + public: 1.474 + uint32_t slot; 1.475 + SSAValue value; 1.476 + SlotValue(uint32_t slot, const SSAValue &value) : slot(slot), value(value) {} 1.477 +}; 1.478 + 1.479 +///////////////////////////////////////////////////////////////////// 1.480 +// Bytecode 1.481 +///////////////////////////////////////////////////////////////////// 1.482 + 1.483 +#ifdef DEBUG 1.484 +void 1.485 +PrintBytecode(JSContext *cx, HandleScript script, jsbytecode *pc) 1.486 +{ 1.487 + fprintf(stderr, "#%u:", script->id()); 1.488 + Sprinter sprinter(cx); 1.489 + if (!sprinter.init()) 1.490 + return; 1.491 + js_Disassemble1(cx, script, pc, script->pcToOffset(pc), true, &sprinter); 1.492 + fprintf(stderr, "%s", sprinter.string()); 1.493 +} 1.494 +#endif 1.495 + 1.496 +/* 1.497 + * For opcodes which assign to a local variable or argument, track an extra def 1.498 + * during SSA analysis for the value's use chain and assigned type. 1.499 + */ 1.500 +static inline bool 1.501 +ExtendedDef(jsbytecode *pc) 1.502 +{ 1.503 + switch ((JSOp)*pc) { 1.504 + case JSOP_SETARG: 1.505 + case JSOP_SETLOCAL: 1.506 + return true; 1.507 + default: 1.508 + return false; 1.509 + } 1.510 +} 1.511 + 1.512 +/* 1.513 + * For opcodes which access local variables or arguments, we track an extra 1.514 + * use during SSA analysis for the value of the variable before/after the op. 1.515 + */ 1.516 +static inline bool 1.517 +ExtendedUse(jsbytecode *pc) 1.518 +{ 1.519 + if (ExtendedDef(pc)) 1.520 + return true; 1.521 + switch ((JSOp)*pc) { 1.522 + case JSOP_GETARG: 1.523 + case JSOP_GETLOCAL: 1.524 + return true; 1.525 + default: 1.526 + return false; 1.527 + } 1.528 +} 1.529 + 1.530 +static inline unsigned 1.531 +FollowBranch(JSContext *cx, JSScript *script, unsigned offset) 1.532 +{ 1.533 + /* 1.534 + * Get the target offset of a branch. For GOTO opcodes implementing 1.535 + * 'continue' statements, short circuit any artificial backwards jump 1.536 + * inserted by the emitter. 1.537 + */ 1.538 + jsbytecode *pc = script->offsetToPC(offset); 1.539 + unsigned targetOffset = offset + GET_JUMP_OFFSET(pc); 1.540 + if (targetOffset < offset) { 1.541 + jsbytecode *target = script->offsetToPC(targetOffset); 1.542 + JSOp nop = JSOp(*target); 1.543 + if (nop == JSOP_GOTO) 1.544 + return targetOffset + GET_JUMP_OFFSET(target); 1.545 + } 1.546 + return targetOffset; 1.547 +} 1.548 + 1.549 +static inline uint32_t StackSlot(JSScript *script, uint32_t index) { 1.550 + return TotalSlots(script) + index; 1.551 +} 1.552 + 1.553 +static inline uint32_t GetBytecodeSlot(JSScript *script, jsbytecode *pc) 1.554 +{ 1.555 + switch (JSOp(*pc)) { 1.556 + 1.557 + case JSOP_GETARG: 1.558 + case JSOP_SETARG: 1.559 + return ArgSlot(GET_ARGNO(pc)); 1.560 + 1.561 + case JSOP_GETLOCAL: 1.562 + case JSOP_SETLOCAL: 1.563 + return LocalSlot(script, GET_LOCALNO(pc)); 1.564 + 1.565 + case JSOP_THIS: 1.566 + return ThisSlot(); 1.567 + 1.568 + default: 1.569 + MOZ_ASSUME_UNREACHABLE("Bad slot opcode"); 1.570 + } 1.571 +} 1.572 + 1.573 +///////////////////////////////////////////////////////////////////// 1.574 +// Bytecode Analysis 1.575 +///////////////////////////////////////////////////////////////////// 1.576 + 1.577 +inline bool 1.578 +ScriptAnalysis::addJump(JSContext *cx, unsigned offset, 1.579 + unsigned *currentOffset, unsigned *forwardJump, unsigned *forwardLoop, 1.580 + unsigned stackDepth) 1.581 +{ 1.582 + JS_ASSERT(offset < script_->length()); 1.583 + 1.584 + Bytecode *&code = codeArray[offset]; 1.585 + if (!code) { 1.586 + code = cx->typeLifoAlloc().new_<Bytecode>(); 1.587 + if (!code) { 1.588 + js_ReportOutOfMemory(cx); 1.589 + return false; 1.590 + } 1.591 + code->stackDepth = stackDepth; 1.592 + } 1.593 + JS_ASSERT(code->stackDepth == stackDepth); 1.594 + 1.595 + code->jumpTarget = true; 1.596 + 1.597 + if (offset < *currentOffset) { 1.598 + if (!code->analyzed) { 1.599 + /* 1.600 + * Backedge in a while/for loop, whose body has not been analyzed 1.601 + * due to a lack of fallthrough at the loop head. Roll back the 1.602 + * offset to analyze the body. 1.603 + */ 1.604 + if (*forwardJump == 0) 1.605 + *forwardJump = *currentOffset; 1.606 + if (*forwardLoop == 0) 1.607 + *forwardLoop = *currentOffset; 1.608 + *currentOffset = offset; 1.609 + } 1.610 + } else if (offset > *forwardJump) { 1.611 + *forwardJump = offset; 1.612 + } 1.613 + 1.614 + return true; 1.615 +} 1.616 + 1.617 +inline bool 1.618 +ScriptAnalysis::jumpTarget(uint32_t offset) 1.619 +{ 1.620 + JS_ASSERT(offset < script_->length()); 1.621 + return codeArray[offset] && codeArray[offset]->jumpTarget; 1.622 +} 1.623 + 1.624 +inline bool 1.625 +ScriptAnalysis::jumpTarget(const jsbytecode *pc) 1.626 +{ 1.627 + return jumpTarget(script_->pcToOffset(pc)); 1.628 +} 1.629 + 1.630 +inline const SSAValue & 1.631 +ScriptAnalysis::poppedValue(uint32_t offset, uint32_t which) 1.632 +{ 1.633 + JS_ASSERT(which < GetUseCount(script_, offset) + 1.634 + (ExtendedUse(script_->offsetToPC(offset)) ? 1 : 0)); 1.635 + return getCode(offset).poppedValues[which]; 1.636 +} 1.637 + 1.638 +inline const SSAValue & 1.639 +ScriptAnalysis::poppedValue(const jsbytecode *pc, uint32_t which) 1.640 +{ 1.641 + return poppedValue(script_->pcToOffset(pc), which); 1.642 +} 1.643 + 1.644 +inline const SlotValue * 1.645 +ScriptAnalysis::newValues(uint32_t offset) 1.646 +{ 1.647 + JS_ASSERT(offset < script_->length()); 1.648 + return getCode(offset).newValues; 1.649 +} 1.650 + 1.651 +inline const SlotValue * 1.652 +ScriptAnalysis::newValues(const jsbytecode *pc) 1.653 +{ 1.654 + return newValues(script_->pcToOffset(pc)); 1.655 +} 1.656 + 1.657 +inline bool 1.658 +ScriptAnalysis::trackUseChain(const SSAValue &v) 1.659 +{ 1.660 + JS_ASSERT_IF(v.kind() == SSAValue::VAR, trackSlot(v.varSlot())); 1.661 + return v.kind() != SSAValue::EMPTY && 1.662 + (v.kind() != SSAValue::VAR || !v.varInitial()); 1.663 +} 1.664 + 1.665 +/* 1.666 + * Get the use chain for an SSA value. 1.667 + */ 1.668 +inline SSAUseChain *& 1.669 +ScriptAnalysis::useChain(const SSAValue &v) 1.670 +{ 1.671 + JS_ASSERT(trackUseChain(v)); 1.672 + if (v.kind() == SSAValue::PUSHED) 1.673 + return getCode(v.pushedOffset()).pushedUses[v.pushedIndex()]; 1.674 + if (v.kind() == SSAValue::VAR) 1.675 + return getCode(v.varOffset()).pushedUses[GetDefCount(script_, v.varOffset())]; 1.676 + return v.phiNode()->uses; 1.677 +} 1.678 + 1.679 +/* For a JSOP_CALL* op, get the pc of the corresponding JSOP_CALL/NEW/etc. */ 1.680 +inline jsbytecode * 1.681 +ScriptAnalysis::getCallPC(jsbytecode *pc) 1.682 +{ 1.683 + SSAUseChain *uses = useChain(SSAValue::PushedValue(script_->pcToOffset(pc), 0)); 1.684 + JS_ASSERT(uses && uses->popped); 1.685 + JS_ASSERT(js_CodeSpec[script_->code()[uses->offset]].format & JOF_INVOKE); 1.686 + return script_->offsetToPC(uses->offset); 1.687 +} 1.688 + 1.689 +/* Accessors for local variable information. */ 1.690 + 1.691 +/* 1.692 + * Escaping slots include all slots that can be accessed in ways other than 1.693 + * through the corresponding LOCAL/ARG opcode. This includes all closed 1.694 + * slots in the script, all slots in scripts which use eval or are in debug 1.695 + * mode, and slots which are aliased by NAME or similar opcodes in the 1.696 + * containing script (which does not imply the variable is closed). 1.697 + */ 1.698 +inline bool 1.699 +ScriptAnalysis::slotEscapes(uint32_t slot) 1.700 +{ 1.701 + JS_ASSERT(script_->compartment()->activeAnalysis); 1.702 + if (slot >= numSlots) 1.703 + return true; 1.704 + return escapedSlots[slot]; 1.705 +} 1.706 + 1.707 +/* 1.708 + * Whether we distinguish different writes of this variable while doing 1.709 + * SSA analysis. Escaping locals can be written in other scripts, and the 1.710 + * presence of NAME opcodes which could alias local variables or arguments 1.711 + * keeps us from tracking variable values at each point. 1.712 + */ 1.713 +inline bool 1.714 +ScriptAnalysis::trackSlot(uint32_t slot) 1.715 +{ 1.716 + return !slotEscapes(slot) && canTrackVars && slot < 1000; 1.717 +} 1.718 + 1.719 +inline const LifetimeVariable & 1.720 +ScriptAnalysis::liveness(uint32_t slot) 1.721 +{ 1.722 + JS_ASSERT(script_->compartment()->activeAnalysis); 1.723 + JS_ASSERT(!slotEscapes(slot)); 1.724 + return lifetimes[slot]; 1.725 +} 1.726 + 1.727 +bool 1.728 +ScriptAnalysis::analyzeBytecode(JSContext *cx) 1.729 +{ 1.730 + JS_ASSERT(cx->compartment()->activeAnalysis); 1.731 + JS_ASSERT(!codeArray); 1.732 + 1.733 + LifoAlloc &alloc = cx->typeLifoAlloc(); 1.734 + 1.735 + numSlots = TotalSlots(script_); 1.736 + 1.737 + unsigned length = script_->length(); 1.738 + codeArray = alloc.newArray<Bytecode*>(length); 1.739 + escapedSlots = alloc.newArray<bool>(numSlots); 1.740 + 1.741 + if (!codeArray || !escapedSlots) { 1.742 + js_ReportOutOfMemory(cx); 1.743 + return false; 1.744 + } 1.745 + 1.746 + PodZero(codeArray, length); 1.747 + 1.748 + /* 1.749 + * Populate arg and local slots which can escape and be accessed in ways 1.750 + * other than through ARG* and LOCAL* opcodes (though arguments can still 1.751 + * be indirectly read but not written through 'arguments' properties). 1.752 + * All escaping locals are treated as having possible use-before-defs. 1.753 + * Conservatively use 'argumentsHasVarBinding' instead of 'needsArgsObj' 1.754 + * (needsArgsObj requires SSA which requires escapedSlots). Lastly, the 1.755 + * debugger can access any local at any time. Even though debugger 1.756 + * reads/writes are monitored by the DebugScopeProxy, this monitoring 1.757 + * updates the flow-insensitive type sets, so we cannot use SSA. 1.758 + */ 1.759 + 1.760 + PodZero(escapedSlots, numSlots); 1.761 + 1.762 + bool allVarsAliased = script_->compartment()->debugMode(); 1.763 + bool allArgsAliased = allVarsAliased || script_->argumentsHasVarBinding(); 1.764 + 1.765 + RootedScript script(cx, script_); 1.766 + for (BindingIter bi(script); bi; bi++) { 1.767 + if (bi->kind() == Binding::ARGUMENT) 1.768 + escapedSlots[ArgSlot(bi.frameIndex())] = allArgsAliased || bi->aliased(); 1.769 + else 1.770 + escapedSlots[LocalSlot(script_, bi.frameIndex())] = allVarsAliased || bi->aliased(); 1.771 + } 1.772 + 1.773 + canTrackVars = true; 1.774 + 1.775 + /* 1.776 + * If we are in the middle of one or more jumps, the offset of the highest 1.777 + * target jumping over this bytecode. Includes implicit jumps from 1.778 + * try/catch/finally blocks. 1.779 + */ 1.780 + unsigned forwardJump = 0; 1.781 + 1.782 + /* If we are in the middle of a loop, the offset of the highest backedge. */ 1.783 + unsigned forwardLoop = 0; 1.784 + 1.785 + /* 1.786 + * If we are in the middle of a try block, the offset of the highest 1.787 + * catch/finally/enditer. 1.788 + */ 1.789 + unsigned forwardCatch = 0; 1.790 + 1.791 + /* Fill in stack depth and definitions at initial bytecode. */ 1.792 + Bytecode *startcode = alloc.new_<Bytecode>(); 1.793 + if (!startcode) { 1.794 + js_ReportOutOfMemory(cx); 1.795 + return false; 1.796 + } 1.797 + 1.798 + startcode->stackDepth = 0; 1.799 + codeArray[0] = startcode; 1.800 + 1.801 + unsigned offset, nextOffset = 0; 1.802 + while (nextOffset < length) { 1.803 + offset = nextOffset; 1.804 + 1.805 + JS_ASSERT(forwardCatch <= forwardJump); 1.806 + 1.807 + /* Check if the current forward jump/try-block has finished. */ 1.808 + if (forwardJump && forwardJump == offset) 1.809 + forwardJump = 0; 1.810 + if (forwardCatch && forwardCatch == offset) 1.811 + forwardCatch = 0; 1.812 + 1.813 + Bytecode *code = maybeCode(offset); 1.814 + jsbytecode *pc = script_->offsetToPC(offset); 1.815 + 1.816 + JSOp op = (JSOp)*pc; 1.817 + JS_ASSERT(op < JSOP_LIMIT); 1.818 + 1.819 + /* Immediate successor of this bytecode. */ 1.820 + unsigned successorOffset = offset + GetBytecodeLength(pc); 1.821 + 1.822 + /* 1.823 + * Next bytecode to analyze. This is either the successor, or is an 1.824 + * earlier bytecode if this bytecode has a loop backedge. 1.825 + */ 1.826 + nextOffset = successorOffset; 1.827 + 1.828 + if (!code) { 1.829 + /* Haven't found a path by which this bytecode is reachable. */ 1.830 + continue; 1.831 + } 1.832 + 1.833 + /* 1.834 + * Update info about bytecodes inside loops, which may have been 1.835 + * analyzed before the backedge was seen. 1.836 + */ 1.837 + if (forwardLoop) { 1.838 + if (forwardLoop <= offset) 1.839 + forwardLoop = 0; 1.840 + } 1.841 + 1.842 + if (code->analyzed) { 1.843 + /* No need to reanalyze, see Bytecode::mergeDefines. */ 1.844 + continue; 1.845 + } 1.846 + 1.847 + code->analyzed = true; 1.848 + 1.849 + if (script_->hasBreakpointsAt(pc)) 1.850 + canTrackVars = false; 1.851 + 1.852 + unsigned stackDepth = code->stackDepth; 1.853 + 1.854 + if (!forwardJump) 1.855 + code->unconditional = true; 1.856 + 1.857 + unsigned nuses = GetUseCount(script_, offset); 1.858 + unsigned ndefs = GetDefCount(script_, offset); 1.859 + 1.860 + JS_ASSERT(stackDepth >= nuses); 1.861 + stackDepth -= nuses; 1.862 + stackDepth += ndefs; 1.863 + 1.864 + switch (op) { 1.865 + 1.866 + case JSOP_DEFFUN: 1.867 + case JSOP_DEFVAR: 1.868 + case JSOP_DEFCONST: 1.869 + case JSOP_SETCONST: 1.870 + canTrackVars = false; 1.871 + break; 1.872 + 1.873 + case JSOP_EVAL: 1.874 + case JSOP_SPREADEVAL: 1.875 + case JSOP_ENTERWITH: 1.876 + canTrackVars = false; 1.877 + break; 1.878 + 1.879 + case JSOP_TABLESWITCH: { 1.880 + unsigned defaultOffset = offset + GET_JUMP_OFFSET(pc); 1.881 + jsbytecode *pc2 = pc + JUMP_OFFSET_LEN; 1.882 + int32_t low = GET_JUMP_OFFSET(pc2); 1.883 + pc2 += JUMP_OFFSET_LEN; 1.884 + int32_t high = GET_JUMP_OFFSET(pc2); 1.885 + pc2 += JUMP_OFFSET_LEN; 1.886 + 1.887 + if (!addJump(cx, defaultOffset, &nextOffset, &forwardJump, &forwardLoop, stackDepth)) 1.888 + return false; 1.889 + 1.890 + for (int32_t i = low; i <= high; i++) { 1.891 + unsigned targetOffset = offset + GET_JUMP_OFFSET(pc2); 1.892 + if (targetOffset != offset) { 1.893 + if (!addJump(cx, targetOffset, &nextOffset, &forwardJump, &forwardLoop, stackDepth)) 1.894 + return false; 1.895 + } 1.896 + pc2 += JUMP_OFFSET_LEN; 1.897 + } 1.898 + break; 1.899 + } 1.900 + 1.901 + case JSOP_TRY: { 1.902 + /* 1.903 + * Everything between a try and corresponding catch or finally is conditional. 1.904 + * Note that there is no problem with code which is skipped by a thrown 1.905 + * exception but is not caught by a later handler in the same function: 1.906 + * no more code will execute, and it does not matter what is defined. 1.907 + */ 1.908 + JSTryNote *tn = script_->trynotes()->vector; 1.909 + JSTryNote *tnlimit = tn + script_->trynotes()->length; 1.910 + for (; tn < tnlimit; tn++) { 1.911 + unsigned startOffset = script_->mainOffset() + tn->start; 1.912 + if (startOffset == offset + 1) { 1.913 + unsigned catchOffset = startOffset + tn->length; 1.914 + 1.915 + /* This will overestimate try block code, for multiple catch/finally. */ 1.916 + if (catchOffset > forwardCatch) 1.917 + forwardCatch = catchOffset; 1.918 + 1.919 + if (tn->kind != JSTRY_ITER && tn->kind != JSTRY_LOOP) { 1.920 + if (!addJump(cx, catchOffset, &nextOffset, &forwardJump, &forwardLoop, stackDepth)) 1.921 + return false; 1.922 + getCode(catchOffset).exceptionEntry = true; 1.923 + } 1.924 + } 1.925 + } 1.926 + break; 1.927 + } 1.928 + 1.929 + case JSOP_GETLOCAL: 1.930 + case JSOP_SETLOCAL: 1.931 + JS_ASSERT(GET_LOCALNO(pc) < script_->nfixed()); 1.932 + break; 1.933 + 1.934 + default: 1.935 + break; 1.936 + } 1.937 + 1.938 + bool jump = IsJumpOpcode(op); 1.939 + 1.940 + /* Check basic jump opcodes, which may or may not have a fallthrough. */ 1.941 + if (jump) { 1.942 + /* Case instructions do not push the lvalue back when branching. */ 1.943 + unsigned newStackDepth = stackDepth; 1.944 + if (op == JSOP_CASE) 1.945 + newStackDepth--; 1.946 + 1.947 + unsigned targetOffset = offset + GET_JUMP_OFFSET(pc); 1.948 + if (!addJump(cx, targetOffset, &nextOffset, &forwardJump, &forwardLoop, newStackDepth)) 1.949 + return false; 1.950 + } 1.951 + 1.952 + /* Handle any fallthrough from this opcode. */ 1.953 + if (BytecodeFallsThrough(op)) { 1.954 + JS_ASSERT(successorOffset < script_->length()); 1.955 + 1.956 + Bytecode *&nextcode = codeArray[successorOffset]; 1.957 + 1.958 + if (!nextcode) { 1.959 + nextcode = alloc.new_<Bytecode>(); 1.960 + if (!nextcode) { 1.961 + js_ReportOutOfMemory(cx); 1.962 + return false; 1.963 + } 1.964 + nextcode->stackDepth = stackDepth; 1.965 + } 1.966 + JS_ASSERT(nextcode->stackDepth == stackDepth); 1.967 + 1.968 + if (jump) 1.969 + nextcode->jumpFallthrough = true; 1.970 + 1.971 + /* Treat the fallthrough of a branch instruction as a jump target. */ 1.972 + if (jump) 1.973 + nextcode->jumpTarget = true; 1.974 + else 1.975 + nextcode->fallthrough = true; 1.976 + } 1.977 + } 1.978 + 1.979 + JS_ASSERT(forwardJump == 0 && forwardLoop == 0 && forwardCatch == 0); 1.980 + 1.981 + /* 1.982 + * Always ensure that a script's arguments usage has been analyzed before 1.983 + * entering the script. This allows the functionPrologue to ensure that 1.984 + * arguments are always created eagerly which simplifies interp logic. 1.985 + */ 1.986 + if (!script_->analyzedArgsUsage()) { 1.987 + if (!analyzeLifetimes(cx)) 1.988 + return false; 1.989 + if (!analyzeSSA(cx)) 1.990 + return false; 1.991 + 1.992 + /* 1.993 + * Now that we have full SSA information for the script, analyze whether 1.994 + * we can avoid creating the arguments object. 1.995 + */ 1.996 + script_->setNeedsArgsObj(needsArgsObj(cx)); 1.997 + } 1.998 + 1.999 + return true; 1.1000 +} 1.1001 + 1.1002 +///////////////////////////////////////////////////////////////////// 1.1003 +// Lifetime Analysis 1.1004 +///////////////////////////////////////////////////////////////////// 1.1005 + 1.1006 +bool 1.1007 +ScriptAnalysis::analyzeLifetimes(JSContext *cx) 1.1008 +{ 1.1009 + JS_ASSERT(cx->compartment()->activeAnalysis); 1.1010 + JS_ASSERT(codeArray); 1.1011 + JS_ASSERT(!lifetimes); 1.1012 + 1.1013 + LifoAlloc &alloc = cx->typeLifoAlloc(); 1.1014 + 1.1015 + lifetimes = alloc.newArray<LifetimeVariable>(numSlots); 1.1016 + if (!lifetimes) { 1.1017 + js_ReportOutOfMemory(cx); 1.1018 + return false; 1.1019 + } 1.1020 + PodZero(lifetimes, numSlots); 1.1021 + 1.1022 + /* 1.1023 + * Variables which are currently dead. On forward branches to locations 1.1024 + * where these are live, they need to be marked as live. 1.1025 + */ 1.1026 + LifetimeVariable **saved = cx->pod_calloc<LifetimeVariable*>(numSlots); 1.1027 + if (!saved) { 1.1028 + js_ReportOutOfMemory(cx); 1.1029 + return false; 1.1030 + } 1.1031 + unsigned savedCount = 0; 1.1032 + 1.1033 + LoopAnalysis *loop = nullptr; 1.1034 + 1.1035 + uint32_t offset = script_->length() - 1; 1.1036 + while (offset < script_->length()) { 1.1037 + Bytecode *code = maybeCode(offset); 1.1038 + if (!code) { 1.1039 + offset--; 1.1040 + continue; 1.1041 + } 1.1042 + 1.1043 + jsbytecode *pc = script_->offsetToPC(offset); 1.1044 + 1.1045 + JSOp op = (JSOp) *pc; 1.1046 + 1.1047 + if (op == JSOP_LOOPHEAD && code->loop) { 1.1048 + /* 1.1049 + * This is the head of a loop, we need to go and make sure that any 1.1050 + * variables live at the head are live at the backedge and points prior. 1.1051 + * For each such variable, look for the last lifetime segment in the body 1.1052 + * and extend it to the end of the loop. 1.1053 + */ 1.1054 + JS_ASSERT(loop == code->loop); 1.1055 + unsigned backedge = code->loop->backedge; 1.1056 + for (unsigned i = 0; i < numSlots; i++) { 1.1057 + if (lifetimes[i].lifetime) { 1.1058 + if (!extendVariable(cx, lifetimes[i], offset, backedge)) 1.1059 + return false; 1.1060 + } 1.1061 + } 1.1062 + 1.1063 + loop = loop->parent; 1.1064 + JS_ASSERT_IF(loop, loop->head < offset); 1.1065 + } 1.1066 + 1.1067 + if (code->exceptionEntry) { 1.1068 + DebugOnly<bool> found = false; 1.1069 + JSTryNote *tn = script_->trynotes()->vector; 1.1070 + JSTryNote *tnlimit = tn + script_->trynotes()->length; 1.1071 + for (; tn < tnlimit; tn++) { 1.1072 + unsigned startOffset = script_->mainOffset() + tn->start; 1.1073 + if (startOffset + tn->length == offset) { 1.1074 + /* 1.1075 + * Extend all live variables at exception entry to the start of 1.1076 + * the try block. 1.1077 + */ 1.1078 + for (unsigned i = 0; i < numSlots; i++) { 1.1079 + if (lifetimes[i].lifetime) 1.1080 + ensureVariable(lifetimes[i], startOffset - 1); 1.1081 + } 1.1082 + 1.1083 + found = true; 1.1084 + break; 1.1085 + } 1.1086 + } 1.1087 + JS_ASSERT(found); 1.1088 + } 1.1089 + 1.1090 + switch (op) { 1.1091 + case JSOP_GETARG: 1.1092 + case JSOP_GETLOCAL: 1.1093 + case JSOP_THIS: { 1.1094 + uint32_t slot = GetBytecodeSlot(script_, pc); 1.1095 + if (!slotEscapes(slot)) { 1.1096 + if (!addVariable(cx, lifetimes[slot], offset, saved, savedCount)) 1.1097 + return false; 1.1098 + } 1.1099 + break; 1.1100 + } 1.1101 + 1.1102 + case JSOP_SETARG: 1.1103 + case JSOP_SETLOCAL: { 1.1104 + uint32_t slot = GetBytecodeSlot(script_, pc); 1.1105 + if (!slotEscapes(slot)) { 1.1106 + if (!killVariable(cx, lifetimes[slot], offset, saved, savedCount)) 1.1107 + return false; 1.1108 + } 1.1109 + break; 1.1110 + } 1.1111 + 1.1112 + case JSOP_TABLESWITCH: 1.1113 + /* Restore all saved variables. :FIXME: maybe do this precisely. */ 1.1114 + for (unsigned i = 0; i < savedCount; i++) { 1.1115 + LifetimeVariable &var = *saved[i]; 1.1116 + uint32_t savedEnd = var.savedEnd; 1.1117 + var.lifetime = alloc.new_<Lifetime>(offset, savedEnd, var.saved); 1.1118 + if (!var.lifetime) { 1.1119 + js_free(saved); 1.1120 + js_ReportOutOfMemory(cx); 1.1121 + return false; 1.1122 + } 1.1123 + var.saved = nullptr; 1.1124 + saved[i--] = saved[--savedCount]; 1.1125 + } 1.1126 + savedCount = 0; 1.1127 + break; 1.1128 + 1.1129 + case JSOP_TRY: 1.1130 + for (unsigned i = 0; i < numSlots; i++) { 1.1131 + LifetimeVariable &var = lifetimes[i]; 1.1132 + if (var.ensured) { 1.1133 + JS_ASSERT(var.lifetime); 1.1134 + if (var.lifetime->start == offset) 1.1135 + var.ensured = false; 1.1136 + } 1.1137 + } 1.1138 + break; 1.1139 + 1.1140 + case JSOP_LOOPENTRY: 1.1141 + getCode(offset).loop = loop; 1.1142 + break; 1.1143 + 1.1144 + default:; 1.1145 + } 1.1146 + 1.1147 + if (IsJumpOpcode(op)) { 1.1148 + /* 1.1149 + * Forward jumps need to pull in all variables which are live at 1.1150 + * their target offset --- the variables live before the jump are 1.1151 + * the union of those live at the fallthrough and at the target. 1.1152 + */ 1.1153 + uint32_t targetOffset = FollowBranch(cx, script_, offset); 1.1154 + 1.1155 + if (targetOffset < offset) { 1.1156 + /* This is a loop back edge, no lifetime to pull in yet. */ 1.1157 + 1.1158 +#ifdef DEBUG 1.1159 + JSOp nop = JSOp(script_->code()[targetOffset]); 1.1160 + JS_ASSERT(nop == JSOP_LOOPHEAD); 1.1161 +#endif 1.1162 + 1.1163 + LoopAnalysis *nloop = alloc.new_<LoopAnalysis>(); 1.1164 + if (!nloop) { 1.1165 + js_free(saved); 1.1166 + js_ReportOutOfMemory(cx); 1.1167 + return false; 1.1168 + } 1.1169 + PodZero(nloop); 1.1170 + 1.1171 + nloop->parent = loop; 1.1172 + loop = nloop; 1.1173 + 1.1174 + getCode(targetOffset).loop = loop; 1.1175 + loop->head = targetOffset; 1.1176 + loop->backedge = offset; 1.1177 + 1.1178 + /* 1.1179 + * Find the entry jump, which will be a GOTO for 'for' or 1.1180 + * 'while' loops or a fallthrough for 'do while' loops. 1.1181 + */ 1.1182 + uint32_t entry = targetOffset; 1.1183 + if (entry) { 1.1184 + do { 1.1185 + entry--; 1.1186 + } while (!maybeCode(entry)); 1.1187 + 1.1188 + jsbytecode *entrypc = script_->offsetToPC(entry); 1.1189 + 1.1190 + if (JSOp(*entrypc) == JSOP_GOTO) 1.1191 + entry += GET_JUMP_OFFSET(entrypc); 1.1192 + else 1.1193 + entry = targetOffset; 1.1194 + } else { 1.1195 + /* Do-while loop at the start of the script. */ 1.1196 + entry = targetOffset; 1.1197 + } 1.1198 + JS_ASSERT(script_->code()[entry] == JSOP_LOOPHEAD || 1.1199 + script_->code()[entry] == JSOP_LOOPENTRY); 1.1200 + } else { 1.1201 + for (unsigned i = 0; i < savedCount; i++) { 1.1202 + LifetimeVariable &var = *saved[i]; 1.1203 + JS_ASSERT(!var.lifetime && var.saved); 1.1204 + if (var.live(targetOffset)) { 1.1205 + /* 1.1206 + * Jumping to a place where this variable is live. Make a new 1.1207 + * lifetime segment for the variable. 1.1208 + */ 1.1209 + uint32_t savedEnd = var.savedEnd; 1.1210 + var.lifetime = alloc.new_<Lifetime>(offset, savedEnd, var.saved); 1.1211 + if (!var.lifetime) { 1.1212 + js_free(saved); 1.1213 + js_ReportOutOfMemory(cx); 1.1214 + return false; 1.1215 + } 1.1216 + var.saved = nullptr; 1.1217 + saved[i--] = saved[--savedCount]; 1.1218 + } else if (loop && !var.savedEnd) { 1.1219 + /* 1.1220 + * This jump precedes the basic block which killed the variable, 1.1221 + * remember it and use it for the end of the next lifetime 1.1222 + * segment should the variable become live again. This is needed 1.1223 + * for loops, as if we wrap liveness around the loop the isLive 1.1224 + * test below may have given the wrong answer. 1.1225 + */ 1.1226 + var.savedEnd = offset; 1.1227 + } 1.1228 + } 1.1229 + } 1.1230 + } 1.1231 + 1.1232 + offset--; 1.1233 + } 1.1234 + 1.1235 + js_free(saved); 1.1236 + 1.1237 + return true; 1.1238 +} 1.1239 + 1.1240 +#ifdef DEBUG 1.1241 +void 1.1242 +LifetimeVariable::print() const 1.1243 +{ 1.1244 + Lifetime *segment = lifetime ? lifetime : saved; 1.1245 + while (segment) { 1.1246 + printf(" (%u,%u)", segment->start, segment->end); 1.1247 + segment = segment->next; 1.1248 + } 1.1249 + printf("\n"); 1.1250 +} 1.1251 +#endif /* DEBUG */ 1.1252 + 1.1253 +inline bool 1.1254 +ScriptAnalysis::addVariable(JSContext *cx, LifetimeVariable &var, unsigned offset, 1.1255 + LifetimeVariable **&saved, unsigned &savedCount) 1.1256 +{ 1.1257 + if (var.lifetime) { 1.1258 + if (var.ensured) 1.1259 + return true; 1.1260 + 1.1261 + JS_ASSERT(offset < var.lifetime->start); 1.1262 + var.lifetime->start = offset; 1.1263 + } else { 1.1264 + if (var.saved) { 1.1265 + /* Remove from the list of saved entries. */ 1.1266 + for (unsigned i = 0; i < savedCount; i++) { 1.1267 + if (saved[i] == &var) { 1.1268 + JS_ASSERT(savedCount); 1.1269 + saved[i--] = saved[--savedCount]; 1.1270 + break; 1.1271 + } 1.1272 + } 1.1273 + } 1.1274 + uint32_t savedEnd = var.savedEnd; 1.1275 + var.lifetime = cx->typeLifoAlloc().new_<Lifetime>(offset, savedEnd, var.saved); 1.1276 + if (!var.lifetime) { 1.1277 + js_ReportOutOfMemory(cx); 1.1278 + return false; 1.1279 + } 1.1280 + var.saved = nullptr; 1.1281 + } 1.1282 + 1.1283 + return true; 1.1284 +} 1.1285 + 1.1286 +inline bool 1.1287 +ScriptAnalysis::killVariable(JSContext *cx, LifetimeVariable &var, unsigned offset, 1.1288 + LifetimeVariable **&saved, unsigned &savedCount) 1.1289 +{ 1.1290 + if (!var.lifetime) { 1.1291 + /* Make a point lifetime indicating the write. */ 1.1292 + uint32_t savedEnd = var.savedEnd; 1.1293 + Lifetime *lifetime = cx->typeLifoAlloc().new_<Lifetime>(offset, savedEnd, var.saved); 1.1294 + if (!lifetime) { 1.1295 + js_ReportOutOfMemory(cx); 1.1296 + return false; 1.1297 + } 1.1298 + if (!var.saved) 1.1299 + saved[savedCount++] = &var; 1.1300 + var.saved = lifetime; 1.1301 + var.saved->write = true; 1.1302 + var.savedEnd = 0; 1.1303 + return true; 1.1304 + } 1.1305 + 1.1306 + JS_ASSERT_IF(!var.ensured, offset < var.lifetime->start); 1.1307 + unsigned start = var.lifetime->start; 1.1308 + 1.1309 + /* 1.1310 + * The variable is considered to be live at the bytecode which kills it 1.1311 + * (just not at earlier bytecodes). This behavior is needed by downstream 1.1312 + * register allocation (see FrameState::bestEvictReg). 1.1313 + */ 1.1314 + var.lifetime->start = offset; 1.1315 + var.lifetime->write = true; 1.1316 + 1.1317 + if (var.ensured) { 1.1318 + /* 1.1319 + * The variable is live even before the write, due to an enclosing try 1.1320 + * block. We need to split the lifetime to indicate there was a write. 1.1321 + * We set the new interval's savedEnd to 0, since it will always be 1.1322 + * adjacent to the old interval, so it never needs to be extended. 1.1323 + */ 1.1324 + var.lifetime = cx->typeLifoAlloc().new_<Lifetime>(start, 0, var.lifetime); 1.1325 + if (!var.lifetime) { 1.1326 + js_ReportOutOfMemory(cx); 1.1327 + return false; 1.1328 + } 1.1329 + var.lifetime->end = offset; 1.1330 + } else { 1.1331 + var.saved = var.lifetime; 1.1332 + var.savedEnd = 0; 1.1333 + var.lifetime = nullptr; 1.1334 + 1.1335 + saved[savedCount++] = &var; 1.1336 + } 1.1337 + 1.1338 + return true; 1.1339 +} 1.1340 + 1.1341 +inline bool 1.1342 +ScriptAnalysis::extendVariable(JSContext *cx, LifetimeVariable &var, 1.1343 + unsigned start, unsigned end) 1.1344 +{ 1.1345 + JS_ASSERT(var.lifetime); 1.1346 + if (var.ensured) { 1.1347 + /* 1.1348 + * If we are still ensured to be live, the try block must scope over 1.1349 + * the loop, in which case the variable is already guaranteed to be 1.1350 + * live for the entire loop. 1.1351 + */ 1.1352 + JS_ASSERT(var.lifetime->start < start); 1.1353 + return true; 1.1354 + } 1.1355 + 1.1356 + var.lifetime->start = start; 1.1357 + 1.1358 + /* 1.1359 + * Consider this code: 1.1360 + * 1.1361 + * while (...) { (#1) 1.1362 + * use x; (#2) 1.1363 + * ... 1.1364 + * x = ...; (#3) 1.1365 + * ... 1.1366 + * } (#4) 1.1367 + * 1.1368 + * Just before analyzing the while statement, there would be a live range 1.1369 + * from #1..#2 and a "point range" at #3. The job of extendVariable is to 1.1370 + * create a new live range from #3..#4. 1.1371 + * 1.1372 + * However, more extensions may be required if the definition of x is 1.1373 + * conditional. Consider the following. 1.1374 + * 1.1375 + * while (...) { (#1) 1.1376 + * use x; (#2) 1.1377 + * ... 1.1378 + * if (...) (#5) 1.1379 + * x = ...; (#3) 1.1380 + * ... 1.1381 + * } (#4) 1.1382 + * 1.1383 + * Assume that x is not used after the loop. Then, before extendVariable is 1.1384 + * run, the live ranges would be the same as before (#1..#2 and #3..#3). We 1.1385 + * still need to create a range from #3..#4. But, since the assignment at #3 1.1386 + * may never run, we also need to create a range from #2..#3. This is done 1.1387 + * as follows. 1.1388 + * 1.1389 + * Each time we create a Lifetime, we store the start of the most recently 1.1390 + * seen sequence of conditional code in the Lifetime's savedEnd field. So, 1.1391 + * when creating the Lifetime at #2, we set the Lifetime's savedEnd to 1.1392 + * #5. (The start of the most recent conditional is cached in each 1.1393 + * variable's savedEnd field.) Consequently, extendVariable is able to 1.1394 + * create a new interval from #2..#5 using the savedEnd field of the 1.1395 + * existing #1..#2 interval. 1.1396 + */ 1.1397 + 1.1398 + Lifetime *segment = var.lifetime; 1.1399 + while (segment && segment->start < end) { 1.1400 + uint32_t savedEnd = segment->savedEnd; 1.1401 + if (!segment->next || segment->next->start >= end) { 1.1402 + /* 1.1403 + * savedEnd is only set for variables killed in the middle of the 1.1404 + * loop. Make a tail segment connecting the last use with the 1.1405 + * back edge. 1.1406 + */ 1.1407 + if (segment->end >= end) { 1.1408 + /* Variable known to be live after the loop finishes. */ 1.1409 + break; 1.1410 + } 1.1411 + savedEnd = end; 1.1412 + } 1.1413 + JS_ASSERT(savedEnd <= end); 1.1414 + if (savedEnd > segment->end) { 1.1415 + Lifetime *tail = cx->typeLifoAlloc().new_<Lifetime>(savedEnd, 0, segment->next); 1.1416 + if (!tail) { 1.1417 + js_ReportOutOfMemory(cx); 1.1418 + return false; 1.1419 + } 1.1420 + tail->start = segment->end; 1.1421 + 1.1422 + /* 1.1423 + * Clear the segment's saved end, but preserve in the tail if this 1.1424 + * is the last segment in the loop and the variable is killed in an 1.1425 + * outer loop before the backedge. 1.1426 + */ 1.1427 + if (segment->savedEnd > end) { 1.1428 + JS_ASSERT(savedEnd == end); 1.1429 + tail->savedEnd = segment->savedEnd; 1.1430 + } 1.1431 + segment->savedEnd = 0; 1.1432 + 1.1433 + segment->next = tail; 1.1434 + segment = tail->next; 1.1435 + } else { 1.1436 + JS_ASSERT(segment->savedEnd == 0); 1.1437 + segment = segment->next; 1.1438 + } 1.1439 + } 1.1440 + return true; 1.1441 +} 1.1442 + 1.1443 +inline void 1.1444 +ScriptAnalysis::ensureVariable(LifetimeVariable &var, unsigned until) 1.1445 +{ 1.1446 + JS_ASSERT(var.lifetime); 1.1447 + 1.1448 + /* 1.1449 + * If we are already ensured, the current range we are trying to ensure 1.1450 + * should already be included. 1.1451 + */ 1.1452 + if (var.ensured) { 1.1453 + JS_ASSERT(var.lifetime->start <= until); 1.1454 + return; 1.1455 + } 1.1456 + 1.1457 + JS_ASSERT(until < var.lifetime->start); 1.1458 + var.lifetime->start = until; 1.1459 + var.ensured = true; 1.1460 +} 1.1461 + 1.1462 +///////////////////////////////////////////////////////////////////// 1.1463 +// SSA Analysis 1.1464 +///////////////////////////////////////////////////////////////////// 1.1465 + 1.1466 +/* Current value for a variable or stack value, as tracked during SSA. */ 1.1467 +struct SSAValueInfo 1.1468 +{ 1.1469 + SSAValue v; 1.1470 + 1.1471 + /* 1.1472 + * Sizes of branchTargets the last time this slot was written. Branches less 1.1473 + * than this threshold do not need to be inspected if the slot is written 1.1474 + * again, as they will already reflect the slot's value at the branch. 1.1475 + */ 1.1476 + int32_t branchSize; 1.1477 +}; 1.1478 + 1.1479 +bool 1.1480 +ScriptAnalysis::analyzeSSA(JSContext *cx) 1.1481 +{ 1.1482 + JS_ASSERT(cx->compartment()->activeAnalysis); 1.1483 + JS_ASSERT(codeArray); 1.1484 + JS_ASSERT(lifetimes); 1.1485 + 1.1486 + LifoAlloc &alloc = cx->typeLifoAlloc(); 1.1487 + unsigned maxDepth = script_->nslots() - script_->nfixed(); 1.1488 + 1.1489 + /* 1.1490 + * Current value of each variable and stack value. Empty for missing or 1.1491 + * untracked entries, i.e. escaping locals and arguments. 1.1492 + */ 1.1493 + SSAValueInfo *values = cx->pod_calloc<SSAValueInfo>(numSlots + maxDepth); 1.1494 + if (!values) { 1.1495 + js_ReportOutOfMemory(cx); 1.1496 + return false; 1.1497 + } 1.1498 + struct FreeSSAValues { 1.1499 + SSAValueInfo *values; 1.1500 + FreeSSAValues(SSAValueInfo *values) : values(values) {} 1.1501 + ~FreeSSAValues() { js_free(values); } 1.1502 + } free(values); 1.1503 + 1.1504 + SSAValueInfo *stack = values + numSlots; 1.1505 + uint32_t stackDepth = 0; 1.1506 + 1.1507 + for (uint32_t slot = ArgSlot(0); slot < numSlots; slot++) { 1.1508 + if (trackSlot(slot)) 1.1509 + values[slot].v.initInitial(slot); 1.1510 + } 1.1511 + 1.1512 + /* 1.1513 + * All target offsets for forward jumps we have seen (including ones whose 1.1514 + * target we have advanced past). We lazily add pending entries at these 1.1515 + * targets for the original value of variables modified before the branch 1.1516 + * rejoins. 1.1517 + */ 1.1518 + Vector<uint32_t> branchTargets(cx); 1.1519 + 1.1520 + /* 1.1521 + * Subset of branchTargets which are exception handlers at future offsets. 1.1522 + * Any new value of a variable modified before the target is reached is a 1.1523 + * potential value at that target, along with the lazy original value. 1.1524 + */ 1.1525 + Vector<uint32_t> exceptionTargets(cx); 1.1526 + 1.1527 + uint32_t offset = 0; 1.1528 + while (offset < script_->length()) { 1.1529 + jsbytecode *pc = script_->offsetToPC(offset); 1.1530 + JSOp op = (JSOp)*pc; 1.1531 + 1.1532 + uint32_t successorOffset = offset + GetBytecodeLength(pc); 1.1533 + 1.1534 + Bytecode *code = maybeCode(pc); 1.1535 + if (!code) { 1.1536 + offset = successorOffset; 1.1537 + continue; 1.1538 + } 1.1539 + 1.1540 + if (code->exceptionEntry) { 1.1541 + /* Remove from exception targets list, which reflects only future targets. */ 1.1542 + for (size_t i = 0; i < exceptionTargets.length(); i++) { 1.1543 + if (exceptionTargets[i] == offset) { 1.1544 + exceptionTargets[i] = exceptionTargets.back(); 1.1545 + exceptionTargets.popBack(); 1.1546 + break; 1.1547 + } 1.1548 + } 1.1549 + } 1.1550 + 1.1551 + if (code->stackDepth > stackDepth) 1.1552 + PodZero(stack + stackDepth, code->stackDepth - stackDepth); 1.1553 + stackDepth = code->stackDepth; 1.1554 + 1.1555 + if (op == JSOP_LOOPHEAD && code->loop) { 1.1556 + /* 1.1557 + * Make sure there is a pending value array for phi nodes at the 1.1558 + * loop head. We won't be able to clear these until we reach the 1.1559 + * loop's back edge. 1.1560 + * 1.1561 + * We need phi nodes for all variables which might be modified 1.1562 + * during the loop. This ensures that in the loop body we have 1.1563 + * already updated state to reflect possible changes that happen 1.1564 + * before the back edge, and don't need to go back and fix things 1.1565 + * up when we *do* get to the back edge. This could be made lazier. 1.1566 + * 1.1567 + * We don't make phi nodes for values on the stack at the head of 1.1568 + * the loop. These may be popped during the loop (i.e. for ITER 1.1569 + * loops), but in such cases the original value is pushed back. 1.1570 + */ 1.1571 + Vector<SlotValue> *&pending = code->pendingValues; 1.1572 + if (!pending) { 1.1573 + pending = cx->new_< Vector<SlotValue> >(cx); 1.1574 + if (!pending) { 1.1575 + js_ReportOutOfMemory(cx); 1.1576 + return false; 1.1577 + } 1.1578 + } 1.1579 + 1.1580 + /* 1.1581 + * Make phi nodes and update state for slots which are already in 1.1582 + * pending from previous branches to the loop head, and which are 1.1583 + * modified in the body of the loop. 1.1584 + */ 1.1585 + for (unsigned i = 0; i < pending->length(); i++) { 1.1586 + SlotValue &v = (*pending)[i]; 1.1587 + if (v.slot < numSlots && liveness(v.slot).firstWrite(code->loop) != UINT32_MAX) { 1.1588 + if (v.value.kind() != SSAValue::PHI || v.value.phiOffset() != offset) { 1.1589 + JS_ASSERT(v.value.phiOffset() < offset); 1.1590 + SSAValue ov = v.value; 1.1591 + if (!makePhi(cx, v.slot, offset, &ov)) 1.1592 + return false; 1.1593 + if (!insertPhi(cx, ov, v.value)) 1.1594 + return false; 1.1595 + v.value = ov; 1.1596 + } 1.1597 + } 1.1598 + if (code->fallthrough || code->jumpFallthrough) { 1.1599 + if (!mergeValue(cx, offset, values[v.slot].v, &v)) 1.1600 + return false; 1.1601 + } 1.1602 + if (!mergeBranchTarget(cx, values[v.slot], v.slot, branchTargets, offset - 1)) 1.1603 + return false; 1.1604 + values[v.slot].v = v.value; 1.1605 + } 1.1606 + 1.1607 + /* 1.1608 + * Make phi nodes for all other slots which might be modified 1.1609 + * during the loop. This ensures that in the loop body we have 1.1610 + * already updated state to reflect possible changes that happen 1.1611 + * before the back edge, and don't need to go back and fix things 1.1612 + * up when we *do* get to the back edge. This could be made lazier. 1.1613 + */ 1.1614 + for (uint32_t slot = ArgSlot(0); slot < numSlots + stackDepth; slot++) { 1.1615 + if (slot >= numSlots || !trackSlot(slot)) 1.1616 + continue; 1.1617 + if (liveness(slot).firstWrite(code->loop) == UINT32_MAX) 1.1618 + continue; 1.1619 + if (values[slot].v.kind() == SSAValue::PHI && values[slot].v.phiOffset() == offset) { 1.1620 + /* There is already a pending entry for this slot. */ 1.1621 + continue; 1.1622 + } 1.1623 + SSAValue ov; 1.1624 + if (!makePhi(cx, slot, offset, &ov)) 1.1625 + return false; 1.1626 + if (code->fallthrough || code->jumpFallthrough) { 1.1627 + if (!insertPhi(cx, ov, values[slot].v)) 1.1628 + return false; 1.1629 + } 1.1630 + if (!mergeBranchTarget(cx, values[slot], slot, branchTargets, offset - 1)) 1.1631 + return false; 1.1632 + values[slot].v = ov; 1.1633 + if (!pending->append(SlotValue(slot, ov))) { 1.1634 + js_ReportOutOfMemory(cx); 1.1635 + return false; 1.1636 + } 1.1637 + } 1.1638 + } else if (code->pendingValues) { 1.1639 + /* 1.1640 + * New values at this point from a previous jump to this bytecode. 1.1641 + * If there is fallthrough from the previous instruction, merge 1.1642 + * with the current state and create phi nodes where necessary, 1.1643 + * otherwise replace current values with the new values. 1.1644 + * 1.1645 + * Catch blocks are artifically treated as having fallthrough, so 1.1646 + * that values written inside the block but not subsequently 1.1647 + * overwritten are picked up. 1.1648 + */ 1.1649 + bool exception = getCode(offset).exceptionEntry; 1.1650 + Vector<SlotValue> *pending = code->pendingValues; 1.1651 + for (unsigned i = 0; i < pending->length(); i++) { 1.1652 + SlotValue &v = (*pending)[i]; 1.1653 + if (code->fallthrough || code->jumpFallthrough || 1.1654 + (exception && values[v.slot].v.kind() != SSAValue::EMPTY)) { 1.1655 + if (!mergeValue(cx, offset, values[v.slot].v, &v)) 1.1656 + return false; 1.1657 + } 1.1658 + if (!mergeBranchTarget(cx, values[v.slot], v.slot, branchTargets, offset)) 1.1659 + return false; 1.1660 + values[v.slot].v = v.value; 1.1661 + } 1.1662 + if (!freezeNewValues(cx, offset)) 1.1663 + return false; 1.1664 + } 1.1665 + 1.1666 + unsigned nuses = GetUseCount(script_, offset); 1.1667 + unsigned ndefs = GetDefCount(script_, offset); 1.1668 + JS_ASSERT(stackDepth >= nuses); 1.1669 + 1.1670 + unsigned xuses = ExtendedUse(pc) ? nuses + 1 : nuses; 1.1671 + 1.1672 + if (xuses) { 1.1673 + code->poppedValues = alloc.newArray<SSAValue>(xuses); 1.1674 + if (!code->poppedValues) { 1.1675 + js_ReportOutOfMemory(cx); 1.1676 + return false; 1.1677 + } 1.1678 + for (unsigned i = 0; i < nuses; i++) { 1.1679 + SSAValue &v = stack[stackDepth - 1 - i].v; 1.1680 + code->poppedValues[i] = v; 1.1681 + v.clear(); 1.1682 + } 1.1683 + if (xuses > nuses) { 1.1684 + /* 1.1685 + * For SETLOCAL, etc. opcodes, add an extra popped value 1.1686 + * holding the value of the local before the op. 1.1687 + */ 1.1688 + uint32_t slot = GetBytecodeSlot(script_, pc); 1.1689 + if (trackSlot(slot)) 1.1690 + code->poppedValues[nuses] = values[slot].v; 1.1691 + else 1.1692 + code->poppedValues[nuses].clear(); 1.1693 + } 1.1694 + 1.1695 + if (xuses) { 1.1696 + SSAUseChain *useChains = alloc.newArray<SSAUseChain>(xuses); 1.1697 + if (!useChains) { 1.1698 + js_ReportOutOfMemory(cx); 1.1699 + return false; 1.1700 + } 1.1701 + PodZero(useChains, xuses); 1.1702 + for (unsigned i = 0; i < xuses; i++) { 1.1703 + const SSAValue &v = code->poppedValues[i]; 1.1704 + if (trackUseChain(v)) { 1.1705 + SSAUseChain *&uses = useChain(v); 1.1706 + useChains[i].popped = true; 1.1707 + useChains[i].offset = offset; 1.1708 + useChains[i].u.which = i; 1.1709 + useChains[i].next = uses; 1.1710 + uses = &useChains[i]; 1.1711 + } 1.1712 + } 1.1713 + } 1.1714 + } 1.1715 + 1.1716 + stackDepth -= nuses; 1.1717 + 1.1718 + for (unsigned i = 0; i < ndefs; i++) 1.1719 + stack[stackDepth + i].v.initPushed(offset, i); 1.1720 + 1.1721 + unsigned xdefs = ExtendedDef(pc) ? ndefs + 1 : ndefs; 1.1722 + if (xdefs) { 1.1723 + code->pushedUses = alloc.newArray<SSAUseChain *>(xdefs); 1.1724 + if (!code->pushedUses) { 1.1725 + js_ReportOutOfMemory(cx); 1.1726 + return false; 1.1727 + } 1.1728 + PodZero(code->pushedUses, xdefs); 1.1729 + } 1.1730 + 1.1731 + stackDepth += ndefs; 1.1732 + 1.1733 + if (op == JSOP_SETARG || op == JSOP_SETLOCAL) { 1.1734 + uint32_t slot = GetBytecodeSlot(script_, pc); 1.1735 + if (trackSlot(slot)) { 1.1736 + if (!mergeBranchTarget(cx, values[slot], slot, branchTargets, offset)) 1.1737 + return false; 1.1738 + if (!mergeExceptionTarget(cx, values[slot].v, slot, exceptionTargets)) 1.1739 + return false; 1.1740 + values[slot].v.initWritten(slot, offset); 1.1741 + } 1.1742 + } 1.1743 + 1.1744 + switch (op) { 1.1745 + case JSOP_GETARG: 1.1746 + case JSOP_GETLOCAL: { 1.1747 + uint32_t slot = GetBytecodeSlot(script_, pc); 1.1748 + if (trackSlot(slot)) { 1.1749 + /* 1.1750 + * Propagate the current value of the local to the pushed value, 1.1751 + * and remember it with an extended use on the opcode. 1.1752 + */ 1.1753 + stack[stackDepth - 1].v = code->poppedValues[0] = values[slot].v; 1.1754 + } 1.1755 + break; 1.1756 + } 1.1757 + 1.1758 + /* Short circuit ops which push back one of their operands. */ 1.1759 + 1.1760 + case JSOP_MOREITER: 1.1761 + stack[stackDepth - 2].v = code->poppedValues[0]; 1.1762 + break; 1.1763 + 1.1764 + case JSOP_MUTATEPROTO: 1.1765 + case JSOP_INITPROP: 1.1766 + case JSOP_INITPROP_GETTER: 1.1767 + case JSOP_INITPROP_SETTER: 1.1768 + stack[stackDepth - 1].v = code->poppedValues[1]; 1.1769 + break; 1.1770 + 1.1771 + case JSOP_SPREAD: 1.1772 + case JSOP_INITELEM_INC: 1.1773 + stack[stackDepth - 2].v = code->poppedValues[2]; 1.1774 + break; 1.1775 + 1.1776 + case JSOP_INITELEM_ARRAY: 1.1777 + stack[stackDepth - 1].v = code->poppedValues[1]; 1.1778 + break; 1.1779 + 1.1780 + case JSOP_INITELEM: 1.1781 + case JSOP_INITELEM_GETTER: 1.1782 + case JSOP_INITELEM_SETTER: 1.1783 + stack[stackDepth - 1].v = code->poppedValues[2]; 1.1784 + break; 1.1785 + 1.1786 + case JSOP_DUP: 1.1787 + stack[stackDepth - 1].v = stack[stackDepth - 2].v = code->poppedValues[0]; 1.1788 + break; 1.1789 + 1.1790 + case JSOP_DUP2: 1.1791 + stack[stackDepth - 1].v = stack[stackDepth - 3].v = code->poppedValues[0]; 1.1792 + stack[stackDepth - 2].v = stack[stackDepth - 4].v = code->poppedValues[1]; 1.1793 + break; 1.1794 + 1.1795 + case JSOP_DUPAT: { 1.1796 + unsigned pickedDepth = GET_UINT24 (pc); 1.1797 + JS_ASSERT(pickedDepth < stackDepth - 1); 1.1798 + stack[stackDepth - 1].v = stack[stackDepth - 2 - pickedDepth].v; 1.1799 + break; 1.1800 + } 1.1801 + 1.1802 + case JSOP_SWAP: 1.1803 + /* Swap is like pick 1. */ 1.1804 + case JSOP_PICK: { 1.1805 + unsigned pickedDepth = (op == JSOP_SWAP ? 1 : pc[1]); 1.1806 + stack[stackDepth - 1].v = code->poppedValues[pickedDepth]; 1.1807 + for (unsigned i = 0; i < pickedDepth; i++) 1.1808 + stack[stackDepth - 2 - i].v = code->poppedValues[i]; 1.1809 + break; 1.1810 + } 1.1811 + 1.1812 + /* 1.1813 + * Switch and try blocks preserve the stack between the original op 1.1814 + * and all case statements or exception/finally handlers. 1.1815 + */ 1.1816 + 1.1817 + case JSOP_TABLESWITCH: { 1.1818 + unsigned defaultOffset = offset + GET_JUMP_OFFSET(pc); 1.1819 + jsbytecode *pc2 = pc + JUMP_OFFSET_LEN; 1.1820 + int32_t low = GET_JUMP_OFFSET(pc2); 1.1821 + pc2 += JUMP_OFFSET_LEN; 1.1822 + int32_t high = GET_JUMP_OFFSET(pc2); 1.1823 + pc2 += JUMP_OFFSET_LEN; 1.1824 + 1.1825 + for (int32_t i = low; i <= high; i++) { 1.1826 + unsigned targetOffset = offset + GET_JUMP_OFFSET(pc2); 1.1827 + if (targetOffset != offset) { 1.1828 + if (!checkBranchTarget(cx, targetOffset, branchTargets, values, stackDepth)) 1.1829 + return false; 1.1830 + } 1.1831 + pc2 += JUMP_OFFSET_LEN; 1.1832 + } 1.1833 + 1.1834 + if (!checkBranchTarget(cx, defaultOffset, branchTargets, values, stackDepth)) 1.1835 + return false; 1.1836 + break; 1.1837 + } 1.1838 + 1.1839 + case JSOP_TRY: { 1.1840 + JSTryNote *tn = script_->trynotes()->vector; 1.1841 + JSTryNote *tnlimit = tn + script_->trynotes()->length; 1.1842 + for (; tn < tnlimit; tn++) { 1.1843 + unsigned startOffset = script_->mainOffset() + tn->start; 1.1844 + if (startOffset == offset + 1) { 1.1845 + unsigned catchOffset = startOffset + tn->length; 1.1846 + 1.1847 + if (tn->kind != JSTRY_ITER && tn->kind != JSTRY_LOOP) { 1.1848 + if (!checkBranchTarget(cx, catchOffset, branchTargets, values, stackDepth)) 1.1849 + return false; 1.1850 + if (!checkExceptionTarget(cx, catchOffset, exceptionTargets)) 1.1851 + return false; 1.1852 + } 1.1853 + } 1.1854 + } 1.1855 + break; 1.1856 + } 1.1857 + 1.1858 + case JSOP_THROW: 1.1859 + case JSOP_RETURN: 1.1860 + case JSOP_RETRVAL: 1.1861 + if (!mergeAllExceptionTargets(cx, values, exceptionTargets)) 1.1862 + return false; 1.1863 + break; 1.1864 + 1.1865 + default:; 1.1866 + } 1.1867 + 1.1868 + if (IsJumpOpcode(op)) { 1.1869 + unsigned targetOffset = FollowBranch(cx, script_, offset); 1.1870 + if (!checkBranchTarget(cx, targetOffset, branchTargets, values, stackDepth)) 1.1871 + return false; 1.1872 + 1.1873 + /* 1.1874 + * If this is a back edge, we're done with the loop and can freeze 1.1875 + * the phi values at the head now. 1.1876 + */ 1.1877 + if (targetOffset < offset) { 1.1878 + if (!freezeNewValues(cx, targetOffset)) 1.1879 + return false; 1.1880 + } 1.1881 + } 1.1882 + 1.1883 + offset = successorOffset; 1.1884 + } 1.1885 + 1.1886 + return true; 1.1887 +} 1.1888 + 1.1889 +/* Get a phi node's capacity for a given length. */ 1.1890 +static inline unsigned 1.1891 +PhiNodeCapacity(unsigned length) 1.1892 +{ 1.1893 + if (length <= 4) 1.1894 + return 4; 1.1895 + 1.1896 + return 1 << (FloorLog2(length - 1) + 1); 1.1897 +} 1.1898 + 1.1899 +bool 1.1900 +ScriptAnalysis::makePhi(JSContext *cx, uint32_t slot, uint32_t offset, SSAValue *pv) 1.1901 +{ 1.1902 + SSAPhiNode *node = cx->typeLifoAlloc().new_<SSAPhiNode>(); 1.1903 + SSAValue *options = cx->typeLifoAlloc().newArray<SSAValue>(PhiNodeCapacity(0)); 1.1904 + if (!node || !options) { 1.1905 + js_ReportOutOfMemory(cx); 1.1906 + return false; 1.1907 + } 1.1908 + node->slot = slot; 1.1909 + node->options = options; 1.1910 + pv->initPhi(offset, node); 1.1911 + return true; 1.1912 +} 1.1913 + 1.1914 +bool 1.1915 +ScriptAnalysis::insertPhi(JSContext *cx, SSAValue &phi, const SSAValue &v) 1.1916 +{ 1.1917 + JS_ASSERT(phi.kind() == SSAValue::PHI); 1.1918 + SSAPhiNode *node = phi.phiNode(); 1.1919 + 1.1920 + /* 1.1921 + * Filter dupes inserted into small nodes to keep things clean and avoid 1.1922 + * extra type constraints, but don't bother on large phi nodes to avoid 1.1923 + * quadratic behavior. 1.1924 + */ 1.1925 + if (node->length <= 8) { 1.1926 + for (unsigned i = 0; i < node->length; i++) { 1.1927 + if (v == node->options[i]) 1.1928 + return true; 1.1929 + } 1.1930 + } 1.1931 + 1.1932 + if (trackUseChain(v)) { 1.1933 + SSAUseChain *&uses = useChain(v); 1.1934 + 1.1935 + SSAUseChain *use = cx->typeLifoAlloc().new_<SSAUseChain>(); 1.1936 + if (!use) { 1.1937 + js_ReportOutOfMemory(cx); 1.1938 + return false; 1.1939 + } 1.1940 + 1.1941 + use->popped = false; 1.1942 + use->offset = phi.phiOffset(); 1.1943 + use->u.phi = node; 1.1944 + use->next = uses; 1.1945 + uses = use; 1.1946 + } 1.1947 + 1.1948 + if (node->length < PhiNodeCapacity(node->length)) { 1.1949 + node->options[node->length++] = v; 1.1950 + return true; 1.1951 + } 1.1952 + 1.1953 + SSAValue *newOptions = 1.1954 + cx->typeLifoAlloc().newArray<SSAValue>(PhiNodeCapacity(node->length + 1)); 1.1955 + if (!newOptions) { 1.1956 + js_ReportOutOfMemory(cx); 1.1957 + return false; 1.1958 + } 1.1959 + 1.1960 + PodCopy(newOptions, node->options, node->length); 1.1961 + node->options = newOptions; 1.1962 + node->options[node->length++] = v; 1.1963 + 1.1964 + return true; 1.1965 +} 1.1966 + 1.1967 +inline bool 1.1968 +ScriptAnalysis::mergeValue(JSContext *cx, uint32_t offset, const SSAValue &v, SlotValue *pv) 1.1969 +{ 1.1970 + /* Make sure that v is accounted for in the pending value or phi value at pv. */ 1.1971 + JS_ASSERT(v.kind() != SSAValue::EMPTY && pv->value.kind() != SSAValue::EMPTY); 1.1972 + 1.1973 + if (v == pv->value) 1.1974 + return true; 1.1975 + 1.1976 + if (pv->value.kind() != SSAValue::PHI || pv->value.phiOffset() < offset) { 1.1977 + SSAValue ov = pv->value; 1.1978 + return makePhi(cx, pv->slot, offset, &pv->value) 1.1979 + && insertPhi(cx, pv->value, v) 1.1980 + && insertPhi(cx, pv->value, ov); 1.1981 + } 1.1982 + 1.1983 + JS_ASSERT(pv->value.phiOffset() == offset); 1.1984 + return insertPhi(cx, pv->value, v); 1.1985 +} 1.1986 + 1.1987 +bool 1.1988 +ScriptAnalysis::checkPendingValue(JSContext *cx, const SSAValue &v, uint32_t slot, 1.1989 + Vector<SlotValue> *pending) 1.1990 +{ 1.1991 + JS_ASSERT(v.kind() != SSAValue::EMPTY); 1.1992 + 1.1993 + for (unsigned i = 0; i < pending->length(); i++) { 1.1994 + if ((*pending)[i].slot == slot) 1.1995 + return true; 1.1996 + } 1.1997 + 1.1998 + if (!pending->append(SlotValue(slot, v))) { 1.1999 + js_ReportOutOfMemory(cx); 1.2000 + return false; 1.2001 + } 1.2002 + 1.2003 + return true; 1.2004 +} 1.2005 + 1.2006 +bool 1.2007 +ScriptAnalysis::checkBranchTarget(JSContext *cx, uint32_t targetOffset, 1.2008 + Vector<uint32_t> &branchTargets, 1.2009 + SSAValueInfo *values, uint32_t stackDepth) 1.2010 +{ 1.2011 + unsigned targetDepth = getCode(targetOffset).stackDepth; 1.2012 + JS_ASSERT(targetDepth <= stackDepth); 1.2013 + 1.2014 + /* 1.2015 + * If there is already an active branch to target, make sure its pending 1.2016 + * values reflect any changes made since the first branch. Otherwise, add a 1.2017 + * new pending branch and determine its pending values lazily. 1.2018 + */ 1.2019 + Vector<SlotValue> *&pending = getCode(targetOffset).pendingValues; 1.2020 + if (pending) { 1.2021 + for (unsigned i = 0; i < pending->length(); i++) { 1.2022 + SlotValue &v = (*pending)[i]; 1.2023 + if (!mergeValue(cx, targetOffset, values[v.slot].v, &v)) 1.2024 + return false; 1.2025 + } 1.2026 + } else { 1.2027 + pending = cx->new_< Vector<SlotValue> >(cx); 1.2028 + if (!pending || !branchTargets.append(targetOffset)) { 1.2029 + js_ReportOutOfMemory(cx); 1.2030 + return false; 1.2031 + } 1.2032 + } 1.2033 + 1.2034 + /* 1.2035 + * Make sure there is a pending entry for each value on the stack. 1.2036 + * The number of stack entries at join points is usually zero, and 1.2037 + * we don't want to look at the active branches while popping and 1.2038 + * pushing values in each opcode. 1.2039 + */ 1.2040 + for (unsigned i = 0; i < targetDepth; i++) { 1.2041 + uint32_t slot = StackSlot(script_, i); 1.2042 + if (!checkPendingValue(cx, values[slot].v, slot, pending)) 1.2043 + return false; 1.2044 + } 1.2045 + 1.2046 + return true; 1.2047 +} 1.2048 + 1.2049 +bool 1.2050 +ScriptAnalysis::checkExceptionTarget(JSContext *cx, uint32_t catchOffset, 1.2051 + Vector<uint32_t> &exceptionTargets) 1.2052 +{ 1.2053 + JS_ASSERT(getCode(catchOffset).exceptionEntry); 1.2054 + 1.2055 + /* 1.2056 + * The catch offset will already be in the branch targets, just check 1.2057 + * whether this is already a known exception target. 1.2058 + */ 1.2059 + for (unsigned i = 0; i < exceptionTargets.length(); i++) { 1.2060 + if (exceptionTargets[i] == catchOffset) 1.2061 + return true; 1.2062 + } 1.2063 + if (!exceptionTargets.append(catchOffset)) { 1.2064 + js_ReportOutOfMemory(cx); 1.2065 + return false; 1.2066 + } 1.2067 + 1.2068 + return true; 1.2069 +} 1.2070 + 1.2071 +bool 1.2072 +ScriptAnalysis::mergeBranchTarget(JSContext *cx, SSAValueInfo &value, uint32_t slot, 1.2073 + const Vector<uint32_t> &branchTargets, uint32_t currentOffset) 1.2074 +{ 1.2075 + if (slot >= numSlots) { 1.2076 + /* 1.2077 + * There is no need to lazily check that there are pending values at 1.2078 + * branch targets for slots on the stack, these are added to pending 1.2079 + * eagerly. 1.2080 + */ 1.2081 + return true; 1.2082 + } 1.2083 + 1.2084 + JS_ASSERT(trackSlot(slot)); 1.2085 + 1.2086 + /* 1.2087 + * Before changing the value of a variable, make sure the old value is 1.2088 + * marked at the target of any branches jumping over the current opcode. 1.2089 + * Only look at new branch targets which have appeared since the last time 1.2090 + * the variable was written. 1.2091 + */ 1.2092 + for (int i = branchTargets.length() - 1; i >= value.branchSize; i--) { 1.2093 + if (branchTargets[i] <= currentOffset) 1.2094 + continue; 1.2095 + 1.2096 + const Bytecode &code = getCode(branchTargets[i]); 1.2097 + 1.2098 + Vector<SlotValue> *pending = code.pendingValues; 1.2099 + if (!checkPendingValue(cx, value.v, slot, pending)) 1.2100 + return false; 1.2101 + } 1.2102 + 1.2103 + value.branchSize = branchTargets.length(); 1.2104 + return true; 1.2105 +} 1.2106 + 1.2107 +bool 1.2108 +ScriptAnalysis::mergeExceptionTarget(JSContext *cx, const SSAValue &value, uint32_t slot, 1.2109 + const Vector<uint32_t> &exceptionTargets) 1.2110 +{ 1.2111 + JS_ASSERT(trackSlot(slot)); 1.2112 + 1.2113 + /* 1.2114 + * Update the value at exception targets with the value of a variable 1.2115 + * before it is overwritten. Unlike mergeBranchTarget, this is done whether 1.2116 + * or not the overwritten value is the value of the variable at the 1.2117 + * original branch. Values for a variable which are written after the 1.2118 + * try block starts and overwritten before it is finished can still be 1.2119 + * seen at exception handlers via exception paths. 1.2120 + */ 1.2121 + for (unsigned i = 0; i < exceptionTargets.length(); i++) { 1.2122 + unsigned offset = exceptionTargets[i]; 1.2123 + Vector<SlotValue> *pending = getCode(offset).pendingValues; 1.2124 + 1.2125 + bool duplicate = false; 1.2126 + for (unsigned i = 0; i < pending->length(); i++) { 1.2127 + if ((*pending)[i].slot == slot) { 1.2128 + duplicate = true; 1.2129 + SlotValue &v = (*pending)[i]; 1.2130 + if (!mergeValue(cx, offset, value, &v)) 1.2131 + return false; 1.2132 + break; 1.2133 + } 1.2134 + } 1.2135 + 1.2136 + if (!duplicate && !pending->append(SlotValue(slot, value))) { 1.2137 + js_ReportOutOfMemory(cx); 1.2138 + return false; 1.2139 + } 1.2140 + } 1.2141 + 1.2142 + return true; 1.2143 +} 1.2144 + 1.2145 +bool 1.2146 +ScriptAnalysis::mergeAllExceptionTargets(JSContext *cx, SSAValueInfo *values, 1.2147 + const Vector<uint32_t> &exceptionTargets) 1.2148 +{ 1.2149 + for (unsigned i = 0; i < exceptionTargets.length(); i++) { 1.2150 + Vector<SlotValue> *pending = getCode(exceptionTargets[i]).pendingValues; 1.2151 + for (unsigned i = 0; i < pending->length(); i++) { 1.2152 + const SlotValue &v = (*pending)[i]; 1.2153 + if (trackSlot(v.slot)) { 1.2154 + if (!mergeExceptionTarget(cx, values[v.slot].v, v.slot, exceptionTargets)) 1.2155 + return false; 1.2156 + } 1.2157 + } 1.2158 + } 1.2159 + return true; 1.2160 +} 1.2161 + 1.2162 +bool 1.2163 +ScriptAnalysis::freezeNewValues(JSContext *cx, uint32_t offset) 1.2164 +{ 1.2165 + Bytecode &code = getCode(offset); 1.2166 + 1.2167 + Vector<SlotValue> *pending = code.pendingValues; 1.2168 + code.pendingValues = nullptr; 1.2169 + 1.2170 + unsigned count = pending->length(); 1.2171 + if (count == 0) { 1.2172 + js_delete(pending); 1.2173 + return true; 1.2174 + } 1.2175 + 1.2176 + code.newValues = cx->typeLifoAlloc().newArray<SlotValue>(count + 1); 1.2177 + if (!code.newValues) { 1.2178 + js_ReportOutOfMemory(cx); 1.2179 + return false; 1.2180 + } 1.2181 + 1.2182 + for (unsigned i = 0; i < count; i++) 1.2183 + code.newValues[i] = (*pending)[i]; 1.2184 + code.newValues[count].slot = 0; 1.2185 + code.newValues[count].value.clear(); 1.2186 + 1.2187 + js_delete(pending); 1.2188 + return true; 1.2189 +} 1.2190 + 1.2191 +bool 1.2192 +ScriptAnalysis::needsArgsObj(JSContext *cx, SeenVector &seen, const SSAValue &v) 1.2193 +{ 1.2194 + /* 1.2195 + * trackUseChain is false for initial values of variables, which 1.2196 + * cannot hold the script's arguments object. 1.2197 + */ 1.2198 + if (!trackUseChain(v)) 1.2199 + return false; 1.2200 + 1.2201 + for (unsigned i = 0; i < seen.length(); i++) { 1.2202 + if (v == seen[i]) 1.2203 + return false; 1.2204 + } 1.2205 + if (!seen.append(v)) 1.2206 + return true; 1.2207 + 1.2208 + SSAUseChain *use = useChain(v); 1.2209 + while (use) { 1.2210 + if (needsArgsObj(cx, seen, use)) 1.2211 + return true; 1.2212 + use = use->next; 1.2213 + } 1.2214 + 1.2215 + return false; 1.2216 +} 1.2217 + 1.2218 +bool 1.2219 +ScriptAnalysis::needsArgsObj(JSContext *cx, SeenVector &seen, SSAUseChain *use) 1.2220 +{ 1.2221 + if (!use->popped) 1.2222 + return needsArgsObj(cx, seen, SSAValue::PhiValue(use->offset, use->u.phi)); 1.2223 + 1.2224 + jsbytecode *pc = script_->offsetToPC(use->offset); 1.2225 + JSOp op = JSOp(*pc); 1.2226 + 1.2227 + if (op == JSOP_POP || op == JSOP_POPN) 1.2228 + return false; 1.2229 + 1.2230 + /* We can read the frame's arguments directly for f.apply(x, arguments). */ 1.2231 + if (op == JSOP_FUNAPPLY && GET_ARGC(pc) == 2 && use->u.which == 0) { 1.2232 + argumentsContentsObserved_ = true; 1.2233 + return false; 1.2234 + } 1.2235 + 1.2236 + /* arguments[i] can read fp->canonicalActualArg(i) directly. */ 1.2237 + if (op == JSOP_GETELEM && use->u.which == 1) { 1.2238 + argumentsContentsObserved_ = true; 1.2239 + return false; 1.2240 + } 1.2241 + 1.2242 + /* arguments.length length can read fp->numActualArgs() directly. */ 1.2243 + if (op == JSOP_LENGTH) 1.2244 + return false; 1.2245 + 1.2246 + /* Allow assignments to non-closed locals (but not arguments). */ 1.2247 + 1.2248 + if (op == JSOP_SETLOCAL) { 1.2249 + uint32_t slot = GetBytecodeSlot(script_, pc); 1.2250 + if (!trackSlot(slot)) 1.2251 + return true; 1.2252 + return needsArgsObj(cx, seen, SSAValue::PushedValue(use->offset, 0)) || 1.2253 + needsArgsObj(cx, seen, SSAValue::WrittenVar(slot, use->offset)); 1.2254 + } 1.2255 + 1.2256 + if (op == JSOP_GETLOCAL) 1.2257 + return needsArgsObj(cx, seen, SSAValue::PushedValue(use->offset, 0)); 1.2258 + 1.2259 + return true; 1.2260 +} 1.2261 + 1.2262 +bool 1.2263 +ScriptAnalysis::needsArgsObj(JSContext *cx) 1.2264 +{ 1.2265 + JS_ASSERT(script_->argumentsHasVarBinding()); 1.2266 + 1.2267 + /* 1.2268 + * Always construct arguments objects when in debug mode and for generator 1.2269 + * scripts (generators can be suspended when speculation fails). 1.2270 + * 1.2271 + * FIXME: Don't build arguments for ES6 generator expressions. 1.2272 + */ 1.2273 + if (cx->compartment()->debugMode() || script_->isGenerator()) 1.2274 + return true; 1.2275 + 1.2276 + /* 1.2277 + * If the script has dynamic name accesses which could reach 'arguments', 1.2278 + * the parser will already have checked to ensure there are no explicit 1.2279 + * uses of 'arguments' in the function. If there are such uses, the script 1.2280 + * will be marked as definitely needing an arguments object. 1.2281 + * 1.2282 + * New accesses on 'arguments' can occur through 'eval' or the debugger 1.2283 + * statement. In the former case, we will dynamically detect the use and 1.2284 + * mark the arguments optimization as having failed. 1.2285 + */ 1.2286 + if (script_->bindingsAccessedDynamically()) 1.2287 + return false; 1.2288 + 1.2289 + unsigned pcOff = script_->pcToOffset(script_->argumentsBytecode()); 1.2290 + 1.2291 + SeenVector seen(cx); 1.2292 + if (needsArgsObj(cx, seen, SSAValue::PushedValue(pcOff, 0))) 1.2293 + return true; 1.2294 + 1.2295 + /* 1.2296 + * If a script explicitly accesses the contents of 'arguments', and has 1.2297 + * formals which may be stored as part of a call object, don't use lazy 1.2298 + * arguments. The compiler can then assume that accesses through 1.2299 + * arguments[i] will be on unaliased variables. 1.2300 + */ 1.2301 + if (script_->funHasAnyAliasedFormal() && argumentsContentsObserved_) 1.2302 + return true; 1.2303 + 1.2304 + return false; 1.2305 +} 1.2306 + 1.2307 +#ifdef DEBUG 1.2308 + 1.2309 +void 1.2310 +ScriptAnalysis::printSSA(JSContext *cx) 1.2311 +{ 1.2312 + types::AutoEnterAnalysis enter(cx); 1.2313 + 1.2314 + printf("\n"); 1.2315 + 1.2316 + RootedScript script(cx, script_); 1.2317 + for (unsigned offset = 0; offset < script_->length(); offset++) { 1.2318 + Bytecode *code = maybeCode(offset); 1.2319 + if (!code) 1.2320 + continue; 1.2321 + 1.2322 + jsbytecode *pc = script_->offsetToPC(offset); 1.2323 + 1.2324 + PrintBytecode(cx, script, pc); 1.2325 + 1.2326 + SlotValue *newv = code->newValues; 1.2327 + if (newv) { 1.2328 + while (newv->slot) { 1.2329 + if (newv->value.kind() != SSAValue::PHI || newv->value.phiOffset() != offset) { 1.2330 + newv++; 1.2331 + continue; 1.2332 + } 1.2333 + printf(" phi "); 1.2334 + newv->value.print(); 1.2335 + printf(" ["); 1.2336 + for (unsigned i = 0; i < newv->value.phiLength(); i++) { 1.2337 + if (i) 1.2338 + printf(","); 1.2339 + newv->value.phiValue(i).print(); 1.2340 + } 1.2341 + printf("]\n"); 1.2342 + newv++; 1.2343 + } 1.2344 + } 1.2345 + 1.2346 + unsigned nuses = GetUseCount(script_, offset); 1.2347 + unsigned xuses = ExtendedUse(pc) ? nuses + 1 : nuses; 1.2348 + 1.2349 + for (unsigned i = 0; i < xuses; i++) { 1.2350 + printf(" popped%d: ", i); 1.2351 + code->poppedValues[i].print(); 1.2352 + printf("\n"); 1.2353 + } 1.2354 + } 1.2355 + 1.2356 + printf("\n"); 1.2357 +} 1.2358 + 1.2359 +void 1.2360 +SSAValue::print() const 1.2361 +{ 1.2362 + switch (kind()) { 1.2363 + 1.2364 + case EMPTY: 1.2365 + printf("empty"); 1.2366 + break; 1.2367 + 1.2368 + case PUSHED: 1.2369 + printf("pushed:%05u#%u", pushedOffset(), pushedIndex()); 1.2370 + break; 1.2371 + 1.2372 + case VAR: 1.2373 + if (varInitial()) 1.2374 + printf("initial:%u", varSlot()); 1.2375 + else 1.2376 + printf("write:%05u", varOffset()); 1.2377 + break; 1.2378 + 1.2379 + case PHI: 1.2380 + printf("phi:%05u#%u", phiOffset(), phiSlot()); 1.2381 + break; 1.2382 + 1.2383 + default: 1.2384 + MOZ_ASSUME_UNREACHABLE("Bad kind"); 1.2385 + } 1.2386 +} 1.2387 + 1.2388 +void 1.2389 +ScriptAnalysis::assertMatchingDebugMode() 1.2390 +{ 1.2391 + JS_ASSERT(!!script_->compartment()->debugMode() == !!originalDebugMode_); 1.2392 +} 1.2393 + 1.2394 +void 1.2395 +ScriptAnalysis::assertMatchingStackDepthAtOffset(uint32_t offset, uint32_t stackDepth) { 1.2396 + JS_ASSERT_IF(maybeCode(offset), getCode(offset).stackDepth == stackDepth); 1.2397 +} 1.2398 + 1.2399 +#endif /* DEBUG */ 1.2400 + 1.2401 +} // namespace analyze 1.2402 +} // namespace js