michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/ParallelFunctions.h" michael@0: michael@0: #include "builtin/TypedObject.h" michael@0: #include "jit/arm/Simulator-arm.h" michael@0: #include "vm/ArrayObject.h" michael@0: michael@0: #include "jsgcinlines.h" michael@0: #include "jsobjinlines.h" michael@0: michael@0: using namespace js; michael@0: using namespace jit; michael@0: michael@0: using parallel::Spew; michael@0: using parallel::SpewOps; michael@0: using parallel::SpewBailouts; michael@0: using parallel::SpewBailoutIR; michael@0: michael@0: // Load the current thread context. michael@0: ForkJoinContext * michael@0: jit::ForkJoinContextPar() michael@0: { michael@0: return ForkJoinContext::current(); michael@0: } michael@0: michael@0: // NewGCThingPar() is called in place of NewGCThing() when executing michael@0: // parallel code. It uses the ArenaLists for the current thread and michael@0: // allocates from there. michael@0: JSObject * michael@0: jit::NewGCThingPar(ForkJoinContext *cx, gc::AllocKind allocKind) michael@0: { michael@0: JS_ASSERT(ForkJoinContext::current() == cx); michael@0: return js::NewGCObject(cx, allocKind, 0, gc::TenuredHeap); michael@0: } michael@0: michael@0: bool michael@0: jit::ParallelWriteGuard(ForkJoinContext *cx, JSObject *object) michael@0: { michael@0: // Implements the most general form of the write guard, which is michael@0: // suitable for writes to any object O. There are two cases to michael@0: // consider and test for: michael@0: // michael@0: // 1. Writes to thread-local memory are safe. Thread-local memory michael@0: // is defined as memory allocated by the current thread. michael@0: // The definition of the PJS API guarantees that such memory michael@0: // cannot have escaped to other parallel threads. michael@0: // michael@0: // 2. Writes into the output buffer are safe. Some PJS operations michael@0: // supply an out pointer into the final target buffer. The design michael@0: // of the API ensures that this out pointer is always pointing michael@0: // at a fresh region of the buffer that is not accessible to michael@0: // other threads. Thus, even though this output buffer has not michael@0: // been created by the current thread, it is writable. michael@0: // michael@0: // There are some subtleties to consider: michael@0: // michael@0: // A. Typed objects and typed arrays are just views onto a base buffer. michael@0: // For the purposes of guarding parallel writes, it is not important michael@0: // whether the *view* is thread-local -- what matters is whether michael@0: // the *underlying buffer* is thread-local. michael@0: // michael@0: // B. With regard to the output buffer, we have to be careful michael@0: // because of the potential for sequential iterations to be michael@0: // intermingled with parallel ones. During a sequential michael@0: // iteration, the out pointer could escape into global michael@0: // variables and so forth, and thus be used during later michael@0: // parallel operations. However, those out pointers must be michael@0: // pointing to distinct regions of the final output buffer than michael@0: // the ones that are currently being written, so there is no michael@0: // harm done in letting them be read (but not written). michael@0: // michael@0: // In order to be able to distinguish escaped out pointers from michael@0: // prior iterations and the proper out pointers from the michael@0: // current iteration, we always track a *target memory region* michael@0: // (which is a span of bytes within the output buffer) and not michael@0: // just the output buffer itself. michael@0: michael@0: JS_ASSERT(ForkJoinContext::current() == cx); michael@0: michael@0: if (object->is()) { michael@0: TypedObject &typedObj = object->as(); michael@0: michael@0: // Note: check target region based on `typedObj`, not the owner. michael@0: // This is because `typedObj` may point to some subregion of the michael@0: // owner and we only care if that *subregion* is within the michael@0: // target region, not the entire owner. michael@0: if (IsInTargetRegion(cx, &typedObj)) michael@0: return true; michael@0: michael@0: // Also check whether owner is thread-local. michael@0: ArrayBufferObject &owner = typedObj.owner(); michael@0: return cx->isThreadLocal(&owner); michael@0: } michael@0: michael@0: // For other kinds of writable objects, must be thread-local. michael@0: return cx->isThreadLocal(object); michael@0: } michael@0: michael@0: // Check that |object| (which must be a typed typedObj) maps michael@0: // to memory in the target region. michael@0: // michael@0: // For efficiency, we assume that all handles which the user has michael@0: // access to are either entirely within the target region or entirely michael@0: // without, but not straddling the target region nor encompassing michael@0: // it. This invariant is maintained by the PJS APIs, where the target michael@0: // region and handles are always elements of the same output array. michael@0: bool michael@0: jit::IsInTargetRegion(ForkJoinContext *cx, TypedObject *typedObj) michael@0: { michael@0: JS_ASSERT(typedObj->is()); // in case JIT supplies something bogus michael@0: uint8_t *typedMem = typedObj->typedMem(); michael@0: return (typedMem >= cx->targetRegionStart && michael@0: typedMem < cx->targetRegionEnd); michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: static void michael@0: printTrace(const char *prefix, struct IonLIRTraceData *cached) michael@0: { michael@0: fprintf(stderr, "%s / Block %3u / LIR %3u / Mode %u / LIR %s\n", michael@0: prefix, michael@0: cached->blockIndex, cached->lirIndex, cached->execModeInt, cached->lirOpName); michael@0: } michael@0: michael@0: static struct IonLIRTraceData seqTraceData; michael@0: #endif michael@0: michael@0: void michael@0: jit::TraceLIR(IonLIRTraceData *current) michael@0: { michael@0: #ifdef DEBUG michael@0: static enum { NotSet, All, Bailouts } traceMode; michael@0: michael@0: // If you set IONFLAGS=trace, this function will be invoked before every LIR. michael@0: // michael@0: // You can either modify it to do whatever you like, or use gdb scripting. michael@0: // For example: michael@0: // michael@0: // break TraceLIR michael@0: // commands michael@0: // continue michael@0: // exit michael@0: michael@0: if (traceMode == NotSet) { michael@0: // Racy, but that's ok. michael@0: const char *env = getenv("IONFLAGS"); michael@0: if (strstr(env, "trace-all")) michael@0: traceMode = All; michael@0: else michael@0: traceMode = Bailouts; michael@0: } michael@0: michael@0: IonLIRTraceData *cached; michael@0: if (current->execModeInt == 0) michael@0: cached = &seqTraceData; michael@0: else michael@0: cached = &ForkJoinContext::current()->traceData; michael@0: michael@0: if (current->blockIndex == 0xDEADBEEF) { michael@0: if (current->execModeInt == 0) michael@0: printTrace("BAILOUT", cached); michael@0: else michael@0: SpewBailoutIR(cached); michael@0: } michael@0: michael@0: memcpy(cached, current, sizeof(IonLIRTraceData)); michael@0: michael@0: if (traceMode == All) michael@0: printTrace("Exec", cached); michael@0: #endif michael@0: } michael@0: michael@0: bool michael@0: jit::CheckOverRecursedPar(ForkJoinContext *cx) michael@0: { michael@0: JS_ASSERT(ForkJoinContext::current() == cx); michael@0: int stackDummy_; michael@0: michael@0: // When an interrupt is requested, the main thread stack limit is michael@0: // overwritten with a sentinel value that brings us here. michael@0: // Therefore, we must check whether this is really a stack overrun michael@0: // and, if not, check whether an interrupt was requested. michael@0: // michael@0: // When not on the main thread, we don't overwrite the stack michael@0: // limit, but we do still call into this routine if the interrupt michael@0: // flag is set, so we still need to double check. michael@0: michael@0: #ifdef JS_ARM_SIMULATOR michael@0: if (Simulator::Current()->overRecursed()) { michael@0: cx->bailoutRecord->setCause(ParallelBailoutOverRecursed); michael@0: return false; michael@0: } michael@0: #endif michael@0: michael@0: uintptr_t realStackLimit; michael@0: if (cx->isMainThread()) michael@0: realStackLimit = GetNativeStackLimit(cx); michael@0: else michael@0: realStackLimit = cx->perThreadData->jitStackLimit; michael@0: michael@0: if (!JS_CHECK_STACK_SIZE(realStackLimit, &stackDummy_)) { michael@0: cx->bailoutRecord->setCause(ParallelBailoutOverRecursed); michael@0: return false; michael@0: } michael@0: michael@0: return InterruptCheckPar(cx); michael@0: } michael@0: michael@0: bool michael@0: jit::InterruptCheckPar(ForkJoinContext *cx) michael@0: { michael@0: JS_ASSERT(ForkJoinContext::current() == cx); michael@0: bool result = cx->check(); michael@0: if (!result) { michael@0: // Do not set the cause here. Either it was set by this michael@0: // thread already by some code that then triggered an abort, michael@0: // or else we are just picking up an abort from some other michael@0: // thread. Either way we have nothing useful to contribute so michael@0: // we might as well leave our bailout case unset. michael@0: return false; michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: JSObject * michael@0: jit::ExtendArrayPar(ForkJoinContext *cx, JSObject *array, uint32_t length) michael@0: { michael@0: JSObject::EnsureDenseResult res = michael@0: array->ensureDenseElementsPreservePackedFlag(cx, 0, length); michael@0: if (res != JSObject::ED_OK) michael@0: return nullptr; michael@0: return array; michael@0: } michael@0: michael@0: bool michael@0: jit::SetPropertyPar(ForkJoinContext *cx, HandleObject obj, HandlePropertyName name, michael@0: HandleValue value, bool strict, jsbytecode *pc) michael@0: { michael@0: JS_ASSERT(cx->isThreadLocal(obj)); michael@0: michael@0: if (*pc == JSOP_SETALIASEDVAR) { michael@0: // See comment in jit::SetProperty. michael@0: Shape *shape = obj->nativeLookupPure(name); michael@0: JS_ASSERT(shape && shape->hasSlot()); michael@0: return obj->nativeSetSlotIfHasType(shape, value); michael@0: } michael@0: michael@0: // Fail early on hooks. michael@0: if (obj->getOps()->setProperty) michael@0: return TP_RETRY_SEQUENTIALLY; michael@0: michael@0: RootedValue v(cx, value); michael@0: RootedId id(cx, NameToId(name)); michael@0: return baseops::SetPropertyHelper(cx, obj, obj, id, baseops::Qualified, &v, michael@0: strict); michael@0: } michael@0: michael@0: bool michael@0: jit::SetElementPar(ForkJoinContext *cx, HandleObject obj, HandleValue index, HandleValue value, michael@0: bool strict) michael@0: { michael@0: RootedId id(cx); michael@0: if (!ValueToIdPure(index, id.address())) michael@0: return false; michael@0: michael@0: // SetObjectElementOperation, the sequential version, has several checks michael@0: // for certain deoptimizing behaviors, such as marking having written to michael@0: // holes and non-indexed element accesses. We don't do that here, as we michael@0: // can't modify any TI state anyways. If we need to add a new type, we michael@0: // would bail out. michael@0: RootedValue v(cx, value); michael@0: return baseops::SetPropertyHelper(cx, obj, obj, id, baseops::Qualified, &v, michael@0: strict); michael@0: } michael@0: michael@0: bool michael@0: jit::SetDenseElementPar(ForkJoinContext *cx, HandleObject obj, int32_t index, HandleValue value, michael@0: bool strict) michael@0: { michael@0: RootedValue indexVal(cx, Int32Value(index)); michael@0: return SetElementPar(cx, obj, indexVal, value, strict); michael@0: } michael@0: michael@0: JSString * michael@0: jit::ConcatStringsPar(ForkJoinContext *cx, HandleString left, HandleString right) michael@0: { michael@0: return ConcatStrings(cx, left, right); michael@0: } michael@0: michael@0: JSFlatString * michael@0: jit::IntToStringPar(ForkJoinContext *cx, int i) michael@0: { michael@0: return Int32ToString(cx, i); michael@0: } michael@0: michael@0: JSString * michael@0: jit::DoubleToStringPar(ForkJoinContext *cx, double d) michael@0: { michael@0: return NumberToString(cx, d); michael@0: } michael@0: michael@0: JSString * michael@0: jit::PrimitiveToStringPar(ForkJoinContext *cx, HandleValue input) michael@0: { michael@0: // All other cases are handled in assembly. michael@0: JS_ASSERT(input.isDouble() || input.isInt32()); michael@0: michael@0: if (input.isInt32()) michael@0: return Int32ToString(cx, input.toInt32()); michael@0: michael@0: return NumberToString(cx, input.toDouble()); michael@0: } michael@0: michael@0: bool michael@0: jit::StringToNumberPar(ForkJoinContext *cx, JSString *str, double *out) michael@0: { michael@0: return StringToNumber(cx, str, out); michael@0: } michael@0: michael@0: #define PAR_RELATIONAL_OP(OP, EXPECTED) \ michael@0: do { \ michael@0: /* Optimize for two int-tagged operands (typical loop control). */ \ michael@0: if (lhs.isInt32() && rhs.isInt32()) { \ michael@0: *res = (lhs.toInt32() OP rhs.toInt32()) == EXPECTED; \ michael@0: } else if (lhs.isNumber() && rhs.isNumber()) { \ michael@0: double l = lhs.toNumber(), r = rhs.toNumber(); \ michael@0: *res = (l OP r) == EXPECTED; \ michael@0: } else if (lhs.isBoolean() && rhs.isBoolean()) { \ michael@0: bool l = lhs.toBoolean(); \ michael@0: bool r = rhs.toBoolean(); \ michael@0: *res = (l OP r) == EXPECTED; \ michael@0: } else if (lhs.isBoolean() && rhs.isNumber()) { \ michael@0: bool l = lhs.toBoolean(); \ michael@0: double r = rhs.toNumber(); \ michael@0: *res = (l OP r) == EXPECTED; \ michael@0: } else if (lhs.isNumber() && rhs.isBoolean()) { \ michael@0: double l = lhs.toNumber(); \ michael@0: bool r = rhs.toBoolean(); \ michael@0: *res = (l OP r) == EXPECTED; \ michael@0: } else { \ michael@0: int32_t vsZero; \ michael@0: if (!CompareMaybeStringsPar(cx, lhs, rhs, &vsZero)) \ michael@0: return false; \ michael@0: *res = (vsZero OP 0) == EXPECTED; \ michael@0: } \ michael@0: return true; \ michael@0: } while(0) michael@0: michael@0: static bool michael@0: CompareStringsPar(ForkJoinContext *cx, JSString *left, JSString *right, int32_t *res) michael@0: { michael@0: ScopedThreadSafeStringInspector leftInspector(left); michael@0: ScopedThreadSafeStringInspector rightInspector(right); michael@0: if (!leftInspector.ensureChars(cx) || !rightInspector.ensureChars(cx)) michael@0: return false; michael@0: michael@0: *res = CompareChars(leftInspector.chars(), left->length(), michael@0: rightInspector.chars(), right->length()); michael@0: return true; michael@0: } michael@0: michael@0: static bool michael@0: CompareMaybeStringsPar(ForkJoinContext *cx, HandleValue v1, HandleValue v2, int32_t *res) michael@0: { michael@0: if (!v1.isString()) michael@0: return false; michael@0: if (!v2.isString()) michael@0: return false; michael@0: return CompareStringsPar(cx, v1.toString(), v2.toString(), res); michael@0: } michael@0: michael@0: template michael@0: bool michael@0: LooselyEqualImplPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res) michael@0: { michael@0: PAR_RELATIONAL_OP(==, Equal); michael@0: } michael@0: michael@0: bool michael@0: js::jit::LooselyEqualPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res) michael@0: { michael@0: return LooselyEqualImplPar(cx, lhs, rhs, res); michael@0: } michael@0: michael@0: bool michael@0: js::jit::LooselyUnequalPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res) michael@0: { michael@0: return LooselyEqualImplPar(cx, lhs, rhs, res); michael@0: } michael@0: michael@0: template michael@0: bool michael@0: StrictlyEqualImplPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res) michael@0: { michael@0: if (lhs.isNumber()) { michael@0: if (rhs.isNumber()) { michael@0: *res = (lhs.toNumber() == rhs.toNumber()) == Equal; michael@0: return true; michael@0: } michael@0: } else if (lhs.isBoolean()) { michael@0: if (rhs.isBoolean()) { michael@0: *res = (lhs.toBoolean() == rhs.toBoolean()) == Equal; michael@0: return true; michael@0: } michael@0: } else if (lhs.isNull()) { michael@0: if (rhs.isNull()) { michael@0: *res = Equal; michael@0: return true; michael@0: } michael@0: } else if (lhs.isUndefined()) { michael@0: if (rhs.isUndefined()) { michael@0: *res = Equal; michael@0: return true; michael@0: } michael@0: } else if (lhs.isObject()) { michael@0: if (rhs.isObject()) { michael@0: *res = (lhs.toObjectOrNull() == rhs.toObjectOrNull()) == Equal; michael@0: return true; michael@0: } michael@0: } else if (lhs.isString()) { michael@0: if (rhs.isString()) michael@0: return LooselyEqualImplPar(cx, lhs, rhs, res); michael@0: } michael@0: michael@0: *res = false; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: js::jit::StrictlyEqualPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res) michael@0: { michael@0: return StrictlyEqualImplPar(cx, lhs, rhs, res); michael@0: } michael@0: michael@0: bool michael@0: js::jit::StrictlyUnequalPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res) michael@0: { michael@0: return StrictlyEqualImplPar(cx, lhs, rhs, res); michael@0: } michael@0: michael@0: bool michael@0: js::jit::LessThanPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res) michael@0: { michael@0: PAR_RELATIONAL_OP(<, true); michael@0: } michael@0: michael@0: bool michael@0: js::jit::LessThanOrEqualPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res) michael@0: { michael@0: PAR_RELATIONAL_OP(<=, true); michael@0: } michael@0: michael@0: bool michael@0: js::jit::GreaterThanPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res) michael@0: { michael@0: PAR_RELATIONAL_OP(>, true); michael@0: } michael@0: michael@0: bool michael@0: js::jit::GreaterThanOrEqualPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res) michael@0: { michael@0: PAR_RELATIONAL_OP(>=, true); michael@0: } michael@0: michael@0: template michael@0: bool michael@0: StringsEqualImplPar(ForkJoinContext *cx, HandleString lhs, HandleString rhs, bool *res) michael@0: { michael@0: int32_t vsZero; michael@0: bool ret = CompareStringsPar(cx, lhs, rhs, &vsZero); michael@0: if (ret != true) michael@0: return ret; michael@0: *res = (vsZero == 0) == Equal; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: js::jit::StringsEqualPar(ForkJoinContext *cx, HandleString v1, HandleString v2, bool *res) michael@0: { michael@0: return StringsEqualImplPar(cx, v1, v2, res); michael@0: } michael@0: michael@0: bool michael@0: js::jit::StringsUnequalPar(ForkJoinContext *cx, HandleString v1, HandleString v2, bool *res) michael@0: { michael@0: return StringsEqualImplPar(cx, v1, v2, res); michael@0: } michael@0: michael@0: bool michael@0: jit::BitNotPar(ForkJoinContext *cx, HandleValue in, int32_t *out) michael@0: { michael@0: if (in.isObject()) michael@0: return false; michael@0: int i; michael@0: if (!NonObjectToInt32(cx, in, &i)) michael@0: return false; michael@0: *out = ~i; michael@0: return true; michael@0: } michael@0: michael@0: #define BIT_OP(OP) \ michael@0: JS_BEGIN_MACRO \ michael@0: int32_t left, right; \ michael@0: if (lhs.isObject() || rhs.isObject()) \ michael@0: return false; \ michael@0: if (!NonObjectToInt32(cx, lhs, &left) || \ michael@0: !NonObjectToInt32(cx, rhs, &right)) \ michael@0: { \ michael@0: return false; \ michael@0: } \ michael@0: *out = (OP); \ michael@0: return true; \ michael@0: JS_END_MACRO michael@0: michael@0: bool michael@0: jit::BitXorPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, int32_t *out) michael@0: { michael@0: BIT_OP(left ^ right); michael@0: } michael@0: michael@0: bool michael@0: jit::BitOrPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, int32_t *out) michael@0: { michael@0: BIT_OP(left | right); michael@0: } michael@0: michael@0: bool michael@0: jit::BitAndPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, int32_t *out) michael@0: { michael@0: BIT_OP(left & right); michael@0: } michael@0: michael@0: bool michael@0: jit::BitLshPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, int32_t *out) michael@0: { michael@0: BIT_OP(uint32_t(left) << (right & 31)); michael@0: } michael@0: michael@0: bool michael@0: jit::BitRshPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, int32_t *out) michael@0: { michael@0: BIT_OP(left >> (right & 31)); michael@0: } michael@0: michael@0: #undef BIT_OP michael@0: michael@0: bool michael@0: jit::UrshValuesPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, michael@0: MutableHandleValue out) michael@0: { michael@0: uint32_t left; michael@0: int32_t right; michael@0: if (lhs.isObject() || rhs.isObject()) michael@0: return false; michael@0: if (!NonObjectToUint32(cx, lhs, &left) || !NonObjectToInt32(cx, rhs, &right)) michael@0: return false; michael@0: left >>= right & 31; michael@0: out.setNumber(uint32_t(left)); michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: jit::AbortPar(ParallelBailoutCause cause, JSScript *outermostScript, JSScript *currentScript, michael@0: jsbytecode *bytecode) michael@0: { michael@0: // Spew before asserts to help with diagnosing failures. michael@0: Spew(SpewBailouts, michael@0: "Parallel abort with cause %d in %p:%s:%d " michael@0: "(%p:%s:%d at line %d)", michael@0: cause, michael@0: outermostScript, outermostScript->filename(), outermostScript->lineno(), michael@0: currentScript, currentScript->filename(), currentScript->lineno(), michael@0: (currentScript ? PCToLineNumber(currentScript, bytecode) : 0)); michael@0: michael@0: JS_ASSERT(InParallelSection()); michael@0: JS_ASSERT(outermostScript != nullptr); michael@0: JS_ASSERT(currentScript != nullptr); michael@0: JS_ASSERT(outermostScript->hasParallelIonScript()); michael@0: michael@0: ForkJoinContext *cx = ForkJoinContext::current(); michael@0: michael@0: JS_ASSERT(cx->bailoutRecord->depth == 0); michael@0: cx->bailoutRecord->setCause(cause, outermostScript, currentScript, bytecode); michael@0: } michael@0: michael@0: void michael@0: jit::PropagateAbortPar(JSScript *outermostScript, JSScript *currentScript) michael@0: { michael@0: Spew(SpewBailouts, michael@0: "Propagate parallel abort via %p:%s:%d (%p:%s:%d)", michael@0: outermostScript, outermostScript->filename(), outermostScript->lineno(), michael@0: currentScript, currentScript->filename(), currentScript->lineno()); michael@0: michael@0: JS_ASSERT(InParallelSection()); michael@0: JS_ASSERT(outermostScript->hasParallelIonScript()); michael@0: michael@0: outermostScript->parallelIonScript()->setHasUncompiledCallTarget(); michael@0: michael@0: ForkJoinContext *cx = ForkJoinContext::current(); michael@0: if (currentScript) michael@0: cx->bailoutRecord->addTrace(currentScript, nullptr); michael@0: } michael@0: michael@0: void michael@0: jit::CallToUncompiledScriptPar(JSObject *obj) michael@0: { michael@0: JS_ASSERT(InParallelSection()); michael@0: michael@0: #ifdef DEBUG michael@0: static const int max_bound_function_unrolling = 5; michael@0: michael@0: if (!obj->is()) { michael@0: Spew(SpewBailouts, "Call to non-function"); michael@0: return; michael@0: } michael@0: michael@0: JSFunction *func = &obj->as(); michael@0: if (func->hasScript()) { michael@0: JSScript *script = func->nonLazyScript(); michael@0: Spew(SpewBailouts, "Call to uncompiled script: %p:%s:%d", michael@0: script, script->filename(), script->lineno()); michael@0: } else if (func->isInterpretedLazy()) { michael@0: Spew(SpewBailouts, "Call to uncompiled lazy script"); michael@0: } else if (func->isBoundFunction()) { michael@0: int depth = 0; michael@0: JSFunction *target = &func->getBoundFunctionTarget()->as(); michael@0: while (depth < max_bound_function_unrolling) { michael@0: if (target->hasScript()) michael@0: break; michael@0: if (target->isBoundFunction()) michael@0: target = &target->getBoundFunctionTarget()->as(); michael@0: depth--; michael@0: } michael@0: if (target->hasScript()) { michael@0: JSScript *script = target->nonLazyScript(); michael@0: Spew(SpewBailouts, "Call to bound function leading (depth: %d) to script: %p:%s:%d", michael@0: depth, script, script->filename(), script->lineno()); michael@0: } else { michael@0: Spew(SpewBailouts, "Call to bound function (excessive depth: %d)", depth); michael@0: } michael@0: } else { michael@0: JS_ASSERT(func->isNative()); michael@0: Spew(SpewBailouts, "Call to native function"); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: JSObject * michael@0: jit::InitRestParameterPar(ForkJoinContext *cx, uint32_t length, Value *rest, michael@0: HandleObject templateObj, HandleObject res) michael@0: { michael@0: // In parallel execution, we should always have succeeded in allocation michael@0: // before this point. We can do the allocation here like in the sequential michael@0: // path, but duplicating the initGCThing logic is too tedious. michael@0: JS_ASSERT(res); michael@0: JS_ASSERT(res->is()); michael@0: JS_ASSERT(!res->getDenseInitializedLength()); michael@0: JS_ASSERT(res->type() == templateObj->type()); michael@0: michael@0: if (length > 0) { michael@0: JSObject::EnsureDenseResult edr = michael@0: res->ensureDenseElementsPreservePackedFlag(cx, 0, length); michael@0: if (edr != JSObject::ED_OK) michael@0: return nullptr; michael@0: res->initDenseElementsUnbarriered(0, rest, length); michael@0: res->as().setLengthInt32(length); michael@0: } michael@0: michael@0: return res; michael@0: }