js/src/jit/shared/IonAssemblerBufferWithConstantPools.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,1163 @@
     1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99:
     1.6 + * This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +
    1.10 +#ifndef jit_shared_IonAssemblerBufferWithConstantPools_h
    1.11 +#define jit_shared_IonAssemblerBufferWithConstantPools_h
    1.12 +
    1.13 +#include "mozilla/DebugOnly.h"
    1.14 +
    1.15 +#include "assembler/wtf/SegmentedVector.h"
    1.16 +#include "jit/IonSpewer.h"
    1.17 +#include "jit/shared/IonAssemblerBuffer.h"
    1.18 +
    1.19 +namespace js {
    1.20 +namespace jit {
    1.21 +typedef Vector<BufferOffset, 512, OldIonAllocPolicy> LoadOffsets;
    1.22 +
    1.23 +struct Pool
    1.24 +  : public OldIonAllocPolicy
    1.25 +{
    1.26 +    const int maxOffset;
    1.27 +    const int immSize;
    1.28 +    const int instSize;
    1.29 +    const int bias;
    1.30 +
    1.31 +  private:
    1.32 +    const int alignment;
    1.33 +
    1.34 +  public:
    1.35 +    const bool isBackref;
    1.36 +    const bool canDedup;
    1.37 +    // "other" is the backwards half of this pool, it is held in another pool structure
    1.38 +    Pool *other;
    1.39 +    uint8_t *poolData;
    1.40 +    uint32_t numEntries;
    1.41 +    uint32_t buffSize;
    1.42 +    LoadOffsets loadOffsets;
    1.43 +
    1.44 +    // When filling pools where the the size of an immediate is larger
    1.45 +    // than the size of an instruction, we find we're in a case where the distance between the
    1.46 +    // next instruction and the next pool slot is increasing!
    1.47 +    // Moreover, If we want to do fancy things like deduplicate pool entries at
    1.48 +    // dump time, we may not know the location in a pool (and thus the limiting load)
    1.49 +    // until very late.
    1.50 +    // Lastly, it may be beneficial to interleave the pools.  I have absolutely no idea
    1.51 +    // how that will work, but my suspicions are that it will be difficult.
    1.52 +
    1.53 +    BufferOffset limitingUser;
    1.54 +    int limitingUsee;
    1.55 +
    1.56 +    Pool(int maxOffset_, int immSize_, int instSize_, int bias_, int alignment_, LifoAlloc &LifoAlloc_,
    1.57 +         bool isBackref_ = false, bool canDedup_ = false, Pool *other_ = nullptr)
    1.58 +        : maxOffset(maxOffset_), immSize(immSize_), instSize(instSize_),
    1.59 +          bias(bias_), alignment(alignment_),
    1.60 +          isBackref(isBackref_), canDedup(canDedup_), other(other_),
    1.61 +          poolData(static_cast<uint8_t *>(LifoAlloc_.alloc(8*immSize))), numEntries(0),
    1.62 +          buffSize(8), loadOffsets(), limitingUser(), limitingUsee(INT_MIN)
    1.63 +    {
    1.64 +    }
    1.65 +    static const int garbage=0xa5a5a5a5;
    1.66 +    Pool() : maxOffset(garbage), immSize(garbage), instSize(garbage), bias(garbage),
    1.67 +             alignment(garbage), isBackref(garbage), canDedup(garbage), other((Pool*)garbage)
    1.68 +    {
    1.69 +    }
    1.70 +    // Sometimes, when we are adding large values to a pool, the limiting use may change.
    1.71 +    // Handle this case.  nextInst is the address of the
    1.72 +    void updateLimiter(BufferOffset nextInst) {
    1.73 +        int oldRange, newRange;
    1.74 +        if (isBackref) {
    1.75 +            // common expressions that are not subtracted: the location of the pool, ...
    1.76 +            oldRange = limitingUser.getOffset() - ((numEntries - limitingUsee) * immSize);
    1.77 +            newRange = nextInst.getOffset();
    1.78 +        } else {
    1.79 +            oldRange = (limitingUsee * immSize) - limitingUser.getOffset();
    1.80 +            newRange = (numEntries * immSize) - nextInst.getOffset();
    1.81 +        }
    1.82 +        if (!limitingUser.assigned() || newRange > oldRange) {
    1.83 +            // We have a new largest range!
    1.84 +            limitingUser = nextInst;
    1.85 +            limitingUsee = numEntries;
    1.86 +        }
    1.87 +    }
    1.88 +    // checkFull is called before any modifications have been made.
    1.89 +    // It is "if we were to add this instruction and pool entry,
    1.90 +    // would we be in an invalid state?".  If it is true, then it is in fact
    1.91 +    // time for a "pool dump".
    1.92 +
    1.93 +    // poolOffset is the distance from the end of the current section to the end of the pool.
    1.94 +    //            For the last section of the pool, this will be the size of the footer
    1.95 +    //            For the first section of the pool, it will be the size of every other
    1.96 +    //            section and the footer
    1.97 +    // codeOffset is the instruction-distance from the pool to the beginning of the buffer.
    1.98 +    //            Since codeOffset only includes instructions, the number is the same for
    1.99 +    //            the beginning and end of the pool.
   1.100 +    // instOffset is the offset from the beginning of the buffer to the instruction that
   1.101 +    //            is about to be placed.
   1.102 +    bool checkFullBackref(int poolOffset, int codeOffset) {
   1.103 +        if (!limitingUser.assigned())
   1.104 +            return false;
   1.105 +        signed int distance =
   1.106 +            limitingUser.getOffset() + bias
   1.107 +            - codeOffset + poolOffset +
   1.108 +            (numEntries - limitingUsee + 1) * immSize;
   1.109 +        if (distance >= maxOffset)
   1.110 +            return true;
   1.111 +        return false;
   1.112 +    }
   1.113 +
   1.114 +    // checkFull answers the question "If a pool were placed at poolOffset, would
   1.115 +    // any reference into the pool be out of range?". It is meant to be used as instructions
   1.116 +    // and elements are inserted, to determine if a saved perforation point needs to be used.
   1.117 +
   1.118 +    bool checkFull(int poolOffset) {
   1.119 +        // Inserting an instruction into the stream can
   1.120 +        // push any of the pools out of range.
   1.121 +        // Similarly, inserting into a pool can push the pool entry out of range
   1.122 +        JS_ASSERT(!isBackref);
   1.123 +        // Not full if there aren't any uses.
   1.124 +        if (!limitingUser.assigned()) {
   1.125 +            return false;
   1.126 +        }
   1.127 +        // We're considered "full" when:
   1.128 +        // bias + abs(poolOffset + limitingeUsee * numEntries - limitingUser) + sizeof(other_pools) >= maxOffset
   1.129 +        if (poolOffset + limitingUsee * immSize - (limitingUser.getOffset() + bias) >= maxOffset) {
   1.130 +            return true;
   1.131 +        }
   1.132 +        return false;
   1.133 +    }
   1.134 +
   1.135 +    // By the time this function is called, we'd damn well better know that this is going to succeed.
   1.136 +    uint32_t insertEntry(uint8_t *data, BufferOffset off, LifoAlloc &LifoAlloc_) {
   1.137 +        if (numEntries == buffSize) {
   1.138 +            buffSize <<= 1;
   1.139 +            uint8_t *tmp = static_cast<uint8_t*>(LifoAlloc_.alloc(immSize * buffSize));
   1.140 +            memcpy(tmp, poolData,  immSize * numEntries);
   1.141 +            if (poolData == nullptr) {
   1.142 +                buffSize = 0;
   1.143 +                return -1;
   1.144 +            }
   1.145 +            poolData = tmp;
   1.146 +        }
   1.147 +        memcpy(&poolData[numEntries * immSize], data, immSize);
   1.148 +        loadOffsets.append(off.getOffset());
   1.149 +        return numEntries++;
   1.150 +    }
   1.151 +
   1.152 +    bool reset(LifoAlloc &a) {
   1.153 +        numEntries = 0;
   1.154 +        buffSize = 8;
   1.155 +        poolData = static_cast<uint8_t*>(a.alloc(buffSize * immSize));
   1.156 +        if (poolData == nullptr)
   1.157 +            return false;
   1.158 +
   1.159 +        void *otherSpace = a.alloc(sizeof(Pool));
   1.160 +        if (otherSpace == nullptr)
   1.161 +            return false;
   1.162 +
   1.163 +        other = new (otherSpace) Pool(other->maxOffset, other->immSize, other->instSize,
   1.164 +                                      other->bias, other->alignment, a, other->isBackref,
   1.165 +                                      other->canDedup);
   1.166 +        new (&loadOffsets) LoadOffsets;
   1.167 +
   1.168 +        limitingUser = BufferOffset();
   1.169 +        limitingUsee = -1;
   1.170 +        return true;
   1.171 +
   1.172 +    }
   1.173 +    // WARNING: This will not always align values. It will only
   1.174 +    // align to the requirement of the pool. If the pool is empty,
   1.175 +    // there is nothing to be aligned, so it will not perform any alignment
   1.176 +    uint8_t* align(uint8_t *ptr) {
   1.177 +        return (uint8_t*)align((uint32_t)ptr);
   1.178 +    }
   1.179 +    uint32_t align(uint32_t ptr) {
   1.180 +        if (numEntries == 0)
   1.181 +            return ptr;
   1.182 +        return (ptr + alignment-1) & ~(alignment-1);
   1.183 +    }
   1.184 +    uint32_t forceAlign(uint32_t ptr) {
   1.185 +        return (ptr + alignment-1) & ~(alignment-1);
   1.186 +    }
   1.187 +    bool isAligned(uint32_t ptr) {
   1.188 +        return ptr == align(ptr);
   1.189 +    }
   1.190 +    int getAlignment() {
   1.191 +        return alignment;
   1.192 +    }
   1.193 +
   1.194 +    uint32_t addPoolSize(uint32_t start) {
   1.195 +        start = align(start);
   1.196 +        start += immSize * numEntries;
   1.197 +        return start;
   1.198 +    }
   1.199 +    uint8_t *addPoolSize(uint8_t *start) {
   1.200 +        start = align(start);
   1.201 +        start += immSize * numEntries;
   1.202 +        return start;
   1.203 +    }
   1.204 +    uint32_t getPoolSize() {
   1.205 +        return immSize * numEntries;
   1.206 +    }
   1.207 +};
   1.208 +
   1.209 +
   1.210 +template <int SliceSize, int InstBaseSize>
   1.211 +struct BufferSliceTail : public BufferSlice<SliceSize> {
   1.212 +    Pool *data;
   1.213 +    mozilla::Array<uint8_t, (SliceSize + (InstBaseSize * 8 - 1)) / (InstBaseSize * 8)> isBranch;
   1.214 +    bool isNatural : 1;
   1.215 +    BufferSliceTail *getNext() {
   1.216 +        return (BufferSliceTail *)this->next;
   1.217 +    }
   1.218 +    BufferSliceTail() : data(nullptr), isNatural(true) {
   1.219 +        memset(&isBranch[0], 0, sizeof(isBranch));
   1.220 +    }
   1.221 +    void markNextAsBranch() {
   1.222 +        int idx = this->nodeSize / InstBaseSize;
   1.223 +        isBranch[idx >> 3] |= 1 << (idx & 0x7);
   1.224 +    }
   1.225 +    bool isNextBranch() {
   1.226 +        unsigned int size = this->nodeSize;
   1.227 +        if (size >= SliceSize)
   1.228 +            return false;
   1.229 +        int idx = size / InstBaseSize;
   1.230 +        return (isBranch[idx >> 3] >> (idx & 0x7)) & 1;
   1.231 +    }
   1.232 +};
   1.233 +
   1.234 +#if 0
   1.235 +static int getId() {
   1.236 +    if (MaybeGetIonContext())
   1.237 +        return MaybeGetIonContext()->getNextAssemblerId();
   1.238 +    return NULL_ID;
   1.239 +}
   1.240 +#endif
   1.241 +static inline void spewEntry(uint8_t *ptr, int length) {
   1.242 +#if IS_LITTLE_ENDIAN
   1.243 +    for (int idx = 0; idx < length; idx++) {
   1.244 +        IonSpewCont(IonSpew_Pools, "%02x", ptr[length - idx - 1]);
   1.245 +        if (((idx & 3) == 3) && (idx + 1 != length))
   1.246 +            IonSpewCont(IonSpew_Pools, "_");
   1.247 +    }
   1.248 +#else
   1.249 +    for (int idx = 0; idx < length; idx++) {
   1.250 +        IonSpewCont(IonSpew_Pools, "%02x", ptr[idx]);
   1.251 +        if (((idx & 3) == 3) && (idx + 1 != length))
   1.252 +            IonSpewCont(IonSpew_Pools, "_");
   1.253 +    }
   1.254 +#endif
   1.255 +}
   1.256 +// NOTE: Adding in the ability to retroactively insert a pool has consequences!
   1.257 +// Most notably, Labels stop working.  Normally, we create a label, later bind it.
   1.258 +// when the label is bound, We back-patch all previous references to the label with
   1.259 +// the correct offset. However, since a pool may be retroactively inserted, we don't
   1.260 +// actually know what the final offset is going to be until much later. This will
   1.261 +// happen (in some instances) after the pools have been finalized. Attempting to compute
   1.262 +// the correct offsets for branches as the pools are finalized is quite infeasible.
   1.263 +// Instead, I write *just* the number of instructions that will be jumped over, then
   1.264 +// when we go to copy the instructions into the executable buffer, fix up all of the
   1.265 +// offsets to include the pools. Since we have about 32 megabytes worth of offset,
   1.266 +// I am not very worried about the pools moving it out of range.
   1.267 +// Now, How exactly do we generate these? The first step is to identify which
   1.268 +// instructions are actually branches that need to be fixed up.  A single bit
   1.269 +// per instruction should be enough to determine which ones are branches, but
   1.270 +// we have no guarantee that all instructions are the same size, so the start of
   1.271 +// each branch instruction will be marked with a bit (1 bit per byte).
   1.272 +// then we neet to call up to the assembler to determine what the offset of the branch
   1.273 +// is. The offset will be the number of instructions that are being skipped over
   1.274 +// along with any processor bias. We then need to calculate the offset, including pools
   1.275 +// and write that value into the buffer.  At this point, we can write it into the
   1.276 +// executable buffer, or the AssemblerBuffer, and copy the data over later.
   1.277 +// Previously, This was all handled by the assembler, since the location
   1.278 +// and size of pools were always known as soon as its location had been reached.
   1.279 +
   1.280 +// A class for indexing into constant pools.
   1.281 +// Each time a pool entry is added, one of these is generated.
   1.282 +// This can be supplied to read and write that entry after the fact.
   1.283 +// And it can be used to get the address of the entry once the buffer
   1.284 +// has been finalized, and an executable copy allocated.
   1.285 +
   1.286 +template <int SliceSize, int InstBaseSize, class Inst, class Asm, int poolKindBits>
   1.287 +struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst> {
   1.288 +  private:
   1.289 +    mozilla::Array<int, 1 << poolKindBits> entryCount;
   1.290 +    static const int offsetBits = 32 - poolKindBits;
   1.291 +  public:
   1.292 +
   1.293 +    class PoolEntry {
   1.294 +        template <int ss, int ibs, class i, class a, int pkb>
   1.295 +        friend struct AssemblerBufferWithConstantPool;
   1.296 +        uint32_t offset_ : offsetBits;
   1.297 +        uint32_t kind_ : poolKindBits;
   1.298 +        PoolEntry(int offset, int kind) : offset_(offset), kind_(kind) {
   1.299 +        }
   1.300 +      public:
   1.301 +        uint32_t encode() {
   1.302 +            uint32_t ret;
   1.303 +            memcpy(&ret, this, sizeof(uint32_t));
   1.304 +            return ret;
   1.305 +        }
   1.306 +        PoolEntry(uint32_t bits) : offset_(((1u << offsetBits) - 1) & bits),
   1.307 +                                 kind_(bits >> offsetBits) {
   1.308 +        }
   1.309 +        PoolEntry() : offset_((1u << offsetBits) - 1), kind_((1u << poolKindBits) - 1) {
   1.310 +        }
   1.311 +
   1.312 +        uint32_t poolKind() const {
   1.313 +            return kind_;
   1.314 +        }
   1.315 +        uint32_t offset() const {
   1.316 +            return offset_;
   1.317 +        }
   1.318 +    };
   1.319 +  private:
   1.320 +    typedef BufferSliceTail<SliceSize, InstBaseSize> BufferSlice;
   1.321 +    typedef AssemblerBuffer<SliceSize, Inst> Parent;
   1.322 +
   1.323 +    // The size of a guard instruction
   1.324 +    const int guardSize;
   1.325 +    // The size of the header that is put at the beginning of a full pool
   1.326 +    const int headerSize;
   1.327 +    // The size of a footer that is put in a pool after it is full.
   1.328 +    const int footerSize;
   1.329 +    // the number of sub-pools that we can allocate into.
   1.330 +    static const int numPoolKinds = 1 << poolKindBits;
   1.331 +
   1.332 +    Pool *pools;
   1.333 +
   1.334 +    // The buffer should be aligned to this address.
   1.335 +    const int instBufferAlign;
   1.336 +
   1.337 +    // the number of times we've dumped the pool.
   1.338 +    int numDumps;
   1.339 +    struct PoolInfo {
   1.340 +        int offset; // the number of instructions before the start of the pool
   1.341 +        int size;   // the size of the pool, including padding
   1.342 +        int finalPos; // the end of the buffer, in bytes from the beginning of the buffer
   1.343 +        BufferSlice *slice;
   1.344 +    };
   1.345 +    PoolInfo *poolInfo;
   1.346 +    // we need to keep track of how large the pools are, so we can allocate
   1.347 +    // enough space for them later.  This should include any amount of padding
   1.348 +    // necessary to keep the pools aligned.
   1.349 +    int poolSize;
   1.350 +    // The Assembler should set this to true if it does not want us to dump a pool here
   1.351 +    int canNotPlacePool;
   1.352 +    // Are we filling up the forwards or backwards pools?
   1.353 +    bool inBackref;
   1.354 +    // Cache the last place we saw an opportunity to dump the pool
   1.355 +    BufferOffset perforation;
   1.356 +    BufferSlice *perforatedNode;
   1.357 +  public:
   1.358 +    int id;
   1.359 +  private:
   1.360 +    static const int logBasePoolInfo = 3;
   1.361 +    BufferSlice ** getHead() {
   1.362 +        return (BufferSlice**)&this->head;
   1.363 +    }
   1.364 +    BufferSlice ** getTail() {
   1.365 +        return (BufferSlice**)&this->tail;
   1.366 +    }
   1.367 +
   1.368 +    virtual BufferSlice *newSlice(LifoAlloc &a) {
   1.369 +        BufferSlice *tmp = static_cast<BufferSlice*>(a.alloc(sizeof(BufferSlice)));
   1.370 +        if (!tmp) {
   1.371 +            this->m_oom = true;
   1.372 +            return nullptr;
   1.373 +        }
   1.374 +        new (tmp) BufferSlice;
   1.375 +        return tmp;
   1.376 +    }
   1.377 +  public:
   1.378 +    AssemblerBufferWithConstantPool(int guardSize_, int headerSize_, int footerSize_, Pool *pools_, int instBufferAlign_)
   1.379 +        : guardSize(guardSize_), headerSize(headerSize_),
   1.380 +          footerSize(footerSize_),
   1.381 +          pools(pools_),
   1.382 +          instBufferAlign(instBufferAlign_), numDumps(0),
   1.383 +          poolInfo(nullptr),
   1.384 +          poolSize(0), canNotPlacePool(0), inBackref(false),
   1.385 +          perforatedNode(nullptr), id(-1)
   1.386 +    {
   1.387 +        for (int idx = 0; idx < numPoolKinds; idx++) {
   1.388 +            entryCount[idx] = 0;
   1.389 +        }
   1.390 +    }
   1.391 +
   1.392 +    // We need to wait until an AutoIonContextAlloc is created by the
   1.393 +    // IonMacroAssembler, before allocating any space.
   1.394 +    void initWithAllocator() {
   1.395 +        poolInfo = static_cast<PoolInfo*>(this->LifoAlloc_.alloc(sizeof(PoolInfo) * (1 << logBasePoolInfo)));
   1.396 +    }
   1.397 +
   1.398 +    const PoolInfo & getInfo(int x) const {
   1.399 +        static const PoolInfo nil = {0,0,0};
   1.400 +        if (x < 0 || x >= numDumps)
   1.401 +            return nil;
   1.402 +        return poolInfo[x];
   1.403 +    }
   1.404 +    void executableCopy(uint8_t *dest_) {
   1.405 +        if (this->oom())
   1.406 +            return;
   1.407 +        // TODO: only do this when the pool actually has a value in it
   1.408 +        flushPool();
   1.409 +        for (int idx = 0; idx < numPoolKinds; idx++) {
   1.410 +            JS_ASSERT(pools[idx].numEntries == 0 && pools[idx].other->numEntries == 0);
   1.411 +        }
   1.412 +        typedef mozilla::Array<uint8_t, InstBaseSize> Chunk;
   1.413 +        mozilla::DebugOnly<Chunk *> start = (Chunk*)dest_;
   1.414 +        Chunk *dest = (Chunk*)(((uint32_t)dest_ + instBufferAlign - 1) & ~(instBufferAlign -1));
   1.415 +        int curIndex = 0;
   1.416 +        int curInstOffset = 0;
   1.417 +        JS_ASSERT(start == dest);
   1.418 +        for (BufferSlice * cur = *getHead(); cur != nullptr; cur = cur->getNext()) {
   1.419 +            Chunk *src = (Chunk*)&cur->instructions;
   1.420 +            for (unsigned int idx = 0; idx <cur->size()/InstBaseSize;
   1.421 +                 idx++, curInstOffset += InstBaseSize) {
   1.422 +                // Is the current instruction a branch?
   1.423 +                if (cur->isBranch[idx >> 3] & (1<<(idx&7))) {
   1.424 +                    // It's a branch.  fix up the branchiness!
   1.425 +                    patchBranch((Inst*)&src[idx], curIndex, BufferOffset(curInstOffset));
   1.426 +                }
   1.427 +                memcpy(&dest[idx], &src[idx], sizeof(Chunk));
   1.428 +            }
   1.429 +            dest+=cur->size()/InstBaseSize;
   1.430 +            if (cur->data != nullptr) {
   1.431 +                // have the repatcher move on to the next pool
   1.432 +                curIndex ++;
   1.433 +                // loop over all of the pools, copying them into place.
   1.434 +                uint8_t *poolDest = (uint8_t*)dest;
   1.435 +                Asm::writePoolHeader(poolDest, cur->data, cur->isNatural);
   1.436 +                poolDest += headerSize;
   1.437 +                for (int idx = 0; idx < numPoolKinds; idx++) {
   1.438 +                    Pool *curPool = &cur->data[idx];
   1.439 +                    // align the pool.
   1.440 +                    poolDest = curPool->align(poolDest);
   1.441 +                    memcpy(poolDest, curPool->poolData, curPool->immSize * curPool->numEntries);
   1.442 +                    poolDest += curPool->immSize * curPool->numEntries;
   1.443 +                }
   1.444 +                // now go over the whole list backwards, and copy in the reverse portions
   1.445 +                for (int idx = numPoolKinds-1; idx >= 0; idx--) {
   1.446 +                    Pool *curPool = cur->data[idx].other;
   1.447 +                    // align the pool.
   1.448 +                    poolDest = curPool->align(poolDest);
   1.449 +                    memcpy(poolDest, curPool->poolData, curPool->immSize * curPool->numEntries);
   1.450 +                    poolDest += curPool->immSize * curPool->numEntries;
   1.451 +                }
   1.452 +                // write a footer in place
   1.453 +                Asm::writePoolFooter(poolDest, cur->data, cur->isNatural);
   1.454 +                poolDest += footerSize;
   1.455 +                // at this point, poolDest had better still be aligned to a chunk boundary.
   1.456 +                dest = (Chunk*) poolDest;
   1.457 +            }
   1.458 +        }
   1.459 +    }
   1.460 +
   1.461 +    BufferOffset insertEntry(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data, PoolEntry *pe = nullptr) {
   1.462 +        if (this->oom() && !this->bail())
   1.463 +            return BufferOffset();
   1.464 +        int token;
   1.465 +        if (p != nullptr) {
   1.466 +            int poolId = p - pools;
   1.467 +            const char sigil = inBackref ? 'B' : 'F';
   1.468 +
   1.469 +            IonSpew(IonSpew_Pools, "[%d]{%c} Inserting entry into pool %d", id, sigil, poolId);
   1.470 +            IonSpewStart(IonSpew_Pools, "[%d] data is: 0x", id);
   1.471 +            spewEntry(data, p->immSize);
   1.472 +            IonSpewFin(IonSpew_Pools);
   1.473 +        }
   1.474 +        // insert the pool value
   1.475 +        if (inBackref)
   1.476 +            token = insertEntryBackwards(instSize, inst, p, data);
   1.477 +        else
   1.478 +            token = insertEntryForwards(instSize, inst, p, data);
   1.479 +        // now to get an instruction to write
   1.480 +        PoolEntry retPE;
   1.481 +        if (p != nullptr) {
   1.482 +            if (this->oom())
   1.483 +                return BufferOffset();
   1.484 +            int poolId = p - pools;
   1.485 +            IonSpew(IonSpew_Pools, "[%d] Entry has token %d, offset ~%d", id, token, size());
   1.486 +            Asm::insertTokenIntoTag(instSize, inst, token);
   1.487 +            JS_ASSERT(poolId < (1 << poolKindBits));
   1.488 +            JS_ASSERT(poolId >= 0);
   1.489 +            // Figure out the offset within like-kinded pool entries
   1.490 +            retPE = PoolEntry(entryCount[poolId], poolId);
   1.491 +            entryCount[poolId]++;
   1.492 +        }
   1.493 +        // Now inst is a valid thing to insert into the instruction stream
   1.494 +        if (pe != nullptr)
   1.495 +            *pe = retPE;
   1.496 +        return this->putBlob(instSize, inst);
   1.497 +    }
   1.498 +
   1.499 +    uint32_t insertEntryBackwards(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data) {
   1.500 +        // unlike the forward case, inserting an instruction without inserting
   1.501 +        // anything into a pool after a pool has been placed, we don't affect
   1.502 +        // anything relevant, so we can skip this check entirely!
   1.503 +
   1.504 +        if (p == nullptr)
   1.505 +            return INT_MIN;
   1.506 +        // TODO: calculating offsets for the alignment requirements is *hard*
   1.507 +        // Instead, assume that we always add the maximum.
   1.508 +        int poolOffset = footerSize;
   1.509 +        Pool *cur, *tmp;
   1.510 +        // NOTE: we want to process the pools from last to first.
   1.511 +        // Since the last pool is pools[0].other, and the first pool
   1.512 +        // is pools[numPoolKinds-1], we actually want to process this
   1.513 +        // forwards.
   1.514 +        for (cur = pools; cur < &pools[numPoolKinds]; cur++) {
   1.515 +            // fetch the pool for the backwards half.
   1.516 +            tmp = cur->other;
   1.517 +            if (p == cur)
   1.518 +                tmp->updateLimiter(this->nextOffset());
   1.519 +
   1.520 +            if (tmp->checkFullBackref(poolOffset, perforation.getOffset())) {
   1.521 +                // uh-oh, the backwards pool is full.  Time to finalize it, and
   1.522 +                // switch to a new forward pool.
   1.523 +                if (p != nullptr)
   1.524 +                    IonSpew(IonSpew_Pools, "[%d]Inserting pool entry caused a spill", id);
   1.525 +                else
   1.526 +                    IonSpew(IonSpew_Pools, "[%d]Inserting instruction(%d) caused a spill", id, size());
   1.527 +
   1.528 +                this->finishPool();
   1.529 +                if (this->oom())
   1.530 +                    return uint32_t(-1);
   1.531 +                return this->insertEntryForwards(instSize, inst, p, data);
   1.532 +            }
   1.533 +            // when moving back to front, calculating the alignment is hard, just be
   1.534 +            // conservative with it.
   1.535 +            poolOffset += tmp->immSize * tmp->numEntries + tmp->getAlignment();
   1.536 +            if (p == tmp) {
   1.537 +                poolOffset += tmp->immSize;
   1.538 +            }
   1.539 +        }
   1.540 +        return p->numEntries + p->other->insertEntry(data, this->nextOffset(), this->LifoAlloc_);
   1.541 +    }
   1.542 +
   1.543 +    // Simultaneously insert an instSized instruction into the stream,
   1.544 +    // and an entry into the pool.  There are many things that can happen.
   1.545 +    // 1) the insertion goes as planned
   1.546 +    // 2) inserting an instruction pushes a previous pool-reference out of range, forcing a dump
   1.547 +    // 2a) there isn't a reasonable save point in the instruction stream. We need to save room for
   1.548 +    //     a guard instruction to branch over the pool.
   1.549 +    int insertEntryForwards(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data) {
   1.550 +        // Advance the "current offset" by an inst, so everyone knows what their offset should be.
   1.551 +        uint32_t nextOffset = this->size() + instSize;
   1.552 +        uint32_t poolOffset = nextOffset;
   1.553 +        Pool *tmp;
   1.554 +        // If we need a guard instruction, reserve space for that.
   1.555 +        if (!perforatedNode)
   1.556 +            poolOffset += guardSize;
   1.557 +        // Also, take into account the size of the header that will be placed *after*
   1.558 +        // the guard instruction
   1.559 +        poolOffset += headerSize;
   1.560 +
   1.561 +        // Perform the necessary range checks.
   1.562 +        for (tmp = pools; tmp < &pools[numPoolKinds]; tmp++) {
   1.563 +            // The pool may wish for a particular alignment, Let's give it one.
   1.564 +            JS_ASSERT((tmp->getAlignment() & (tmp->getAlignment() - 1)) == 0);
   1.565 +            // The pool only needs said alignment *if* there are any entries in the pool
   1.566 +            // WARNING: the pool needs said alignment if there are going to be entries in
   1.567 +            // the pool after this entry has been inserted
   1.568 +            if (p == tmp)
   1.569 +                poolOffset = tmp->forceAlign(poolOffset);
   1.570 +            else
   1.571 +                poolOffset = tmp->align(poolOffset);
   1.572 +
   1.573 +            // If we're at the pool we want to insert into, find a new limiter
   1.574 +            // before we do the range check.
   1.575 +            if (p == tmp) {
   1.576 +                p->updateLimiter(BufferOffset(nextOffset));
   1.577 +            }
   1.578 +            if (tmp->checkFull(poolOffset)) {
   1.579 +                // uh-oh. DUMP DUMP DUMP
   1.580 +                if (p != nullptr)
   1.581 +                    IonSpew(IonSpew_Pools, "[%d] Inserting pool entry caused a spill", id);
   1.582 +                else
   1.583 +                    IonSpew(IonSpew_Pools, "[%d] Inserting instruction(%d) caused a spill", id, size());
   1.584 +
   1.585 +                this->dumpPool();
   1.586 +                return this->insertEntryBackwards(instSize, inst, p, data);
   1.587 +            }
   1.588 +            // include the size of this pool in the running total
   1.589 +            if (p == tmp) {
   1.590 +                nextOffset += tmp->immSize;
   1.591 +            }
   1.592 +            nextOffset += tmp->immSize * tmp->numEntries;
   1.593 +        }
   1.594 +        if (p == nullptr) {
   1.595 +            return INT_MIN;
   1.596 +        }
   1.597 +        return p->insertEntry(data, this->nextOffset(), this->LifoAlloc_);
   1.598 +    }
   1.599 +    BufferOffset putInt(uint32_t value) {
   1.600 +        return insertEntry(sizeof(uint32_t) / sizeof(uint8_t), (uint8_t*)&value, nullptr, nullptr);
   1.601 +    }
   1.602 +    // Mark the current section as an area where we can
   1.603 +    // later go to dump a pool
   1.604 +    void perforate() {
   1.605 +        // If we're filling the backrefrences, we don't want to start looking for a new dumpsite.
   1.606 +        if (inBackref)
   1.607 +            return;
   1.608 +        if (canNotPlacePool)
   1.609 +            return;
   1.610 +        // If there is nothing in the pool, then it is strictly disadvantageous
   1.611 +        // to attempt to place a pool here
   1.612 +        bool empty = true;
   1.613 +        for (int i = 0; i < numPoolKinds; i++) {
   1.614 +            if (pools[i].numEntries != 0) {
   1.615 +                empty = false;
   1.616 +                break;
   1.617 +            }
   1.618 +        }
   1.619 +        if (empty)
   1.620 +            return;
   1.621 +        perforatedNode = *getTail();
   1.622 +        perforation = this->nextOffset();
   1.623 +        Parent::perforate();
   1.624 +        IonSpew(IonSpew_Pools, "[%d] Adding a perforation at offset %d", id, perforation.getOffset());
   1.625 +    }
   1.626 +
   1.627 +    // After a pool is finished, no more elements may be added to it. During this phase, we
   1.628 +    // will know the exact offsets to the pool entries, and those values should be written into
   1.629 +    // the given instructions.
   1.630 +    PoolInfo getPoolData() const {
   1.631 +        int prevOffset = getInfo(numDumps-1).offset;
   1.632 +        int prevEnd = getInfo(numDumps-1).finalPos;
   1.633 +        // calculate the offset of the start of this pool;
   1.634 +        int perfOffset = perforation.assigned() ?
   1.635 +            perforation.getOffset() :
   1.636 +            this->nextOffset().getOffset() + this->guardSize;
   1.637 +        int initOffset = prevEnd + (perfOffset - prevOffset);
   1.638 +        int finOffset = initOffset;
   1.639 +        bool poolIsEmpty = true;
   1.640 +        for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
   1.641 +            if (pools[poolIdx].numEntries != 0) {
   1.642 +                poolIsEmpty = false;
   1.643 +                break;
   1.644 +            }
   1.645 +            if (pools[poolIdx].other != nullptr && pools[poolIdx].other->numEntries != 0) {
   1.646 +                poolIsEmpty = false;
   1.647 +                break;
   1.648 +            }
   1.649 +        }
   1.650 +        if (!poolIsEmpty) {
   1.651 +            finOffset += headerSize;
   1.652 +            for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
   1.653 +                finOffset=pools[poolIdx].align(finOffset);
   1.654 +                finOffset+=pools[poolIdx].numEntries * pools[poolIdx].immSize;
   1.655 +            }
   1.656 +            // And compute the necessary adjustments for the second half of the pool.
   1.657 +            for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) {
   1.658 +                finOffset=pools[poolIdx].other->align(finOffset);
   1.659 +                finOffset+=pools[poolIdx].other->numEntries * pools[poolIdx].other->immSize;
   1.660 +            }
   1.661 +            finOffset += footerSize;
   1.662 +        }
   1.663 +
   1.664 +        PoolInfo ret;
   1.665 +        ret.offset = perfOffset;
   1.666 +        ret.size = finOffset - initOffset;
   1.667 +        ret.finalPos = finOffset;
   1.668 +        ret.slice = perforatedNode;
   1.669 +        return ret;
   1.670 +    }
   1.671 +    void finishPool() {
   1.672 +        // This function should only be called while the backwards half of the pool
   1.673 +        // is being filled in. The backwards half of the pool is always in a state
   1.674 +        // where it is sane. Everything that needs to be done here is for "sanity's sake".
   1.675 +        // The per-buffer pools need to be reset, and we need to record the size of the pool.
   1.676 +        IonSpew(IonSpew_Pools, "[%d] Finishing pool %d", id, numDumps);
   1.677 +        JS_ASSERT(inBackref);
   1.678 +        PoolInfo newPoolInfo = getPoolData();
   1.679 +        if (newPoolInfo.size == 0) {
   1.680 +            // The code below also creates a new pool, but that is not necessary, since
   1.681 +            // the pools have not been modified at all.
   1.682 +            new (&perforation) BufferOffset();
   1.683 +            perforatedNode = nullptr;
   1.684 +            inBackref = false;
   1.685 +            IonSpew(IonSpew_Pools, "[%d] Aborting because the pool is empty", id);
   1.686 +            // Bail out early, since we don't want to even pretend these pools exist.
   1.687 +            return;
   1.688 +        }
   1.689 +        JS_ASSERT(perforatedNode != nullptr);
   1.690 +        if (numDumps >= (1<<logBasePoolInfo) && (numDumps & (numDumps-1)) == 0) {
   1.691 +            // need to resize.
   1.692 +            PoolInfo *tmp = static_cast<PoolInfo*>(this->LifoAlloc_.alloc(sizeof(PoolInfo) * numDumps * 2));
   1.693 +            if (tmp == nullptr) {
   1.694 +                this->fail_oom();
   1.695 +                return;
   1.696 +            }
   1.697 +            memcpy(tmp, poolInfo, sizeof(PoolInfo) * numDumps);
   1.698 +            poolInfo = tmp;
   1.699 +
   1.700 +        }
   1.701 +
   1.702 +        // In order to figure out how to fix up the loads for the second half of the pool
   1.703 +        // we need to find where the bits of the pool that have been implemented end.
   1.704 +        int poolOffset = perforation.getOffset();
   1.705 +        int magicAlign = getInfo(numDumps-1).finalPos - getInfo(numDumps-1).offset;
   1.706 +        poolOffset += magicAlign;
   1.707 +        poolOffset += headerSize;
   1.708 +        for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
   1.709 +            poolOffset=pools[poolIdx].align(poolOffset);
   1.710 +            poolOffset+=pools[poolIdx].numEntries * pools[poolIdx].immSize;
   1.711 +        }
   1.712 +        mozilla::Array<LoadOffsets, 1 << poolKindBits> outcasts;
   1.713 +        mozilla::Array<uint8_t *, 1 << poolKindBits> outcastEntries;
   1.714 +        // All of the pool loads referred to by this code are going to
   1.715 +        // need fixing up here.
   1.716 +        int skippedBytes = 0;
   1.717 +        for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) {
   1.718 +            Pool *p =  pools[poolIdx].other;
   1.719 +            JS_ASSERT(p != nullptr);
   1.720 +            unsigned int idx = p->numEntries-1;
   1.721 +            // Allocate space for tracking information that needs to be propagated to the next pool
   1.722 +            // as well as space for quickly updating the pool entries in the current pool to remove
   1.723 +            // the entries that don't actually fit.  I probably should change this over to a vector
   1.724 +            outcastEntries[poolIdx] = new uint8_t[p->getPoolSize()];
   1.725 +            bool *preservedEntries = new bool[p->numEntries];
   1.726 +            // Hacks on top of Hacks!
   1.727 +            // the patching code takes in the address of the instruction to be patched,
   1.728 +            // and the "address" of the element in the pool that we want to load.
   1.729 +            // However, since the code isn't actually in an array, we need to lie about
   1.730 +            // the address that the pool is in. Furthermore, since the offsets are
   1.731 +            // technically from the beginning of the FORWARD reference section, we have
   1.732 +            // to lie to ourselves about where this pool starts in order to make sure
   1.733 +            // the distance into the pool is interpreted correctly.
   1.734 +            // There is a more elegant way to fix this that will need to be implemented
   1.735 +            // eventually. We will want to provide the fixup function with a method to
   1.736 +            // convert from a 'token' into a pool offset.
   1.737 +            poolOffset = p->align(poolOffset);
   1.738 +            int numSkips = 0;
   1.739 +            int fakePoolOffset = poolOffset - pools[poolIdx].numEntries * pools[poolIdx].immSize;
   1.740 +            for (BufferOffset *iter = p->loadOffsets.end()-1;
   1.741 +                 iter != p->loadOffsets.begin()-1; --iter, --idx)
   1.742 +            {
   1.743 +
   1.744 +                IonSpew(IonSpew_Pools, "[%d] Linking entry %d in pool %d", id, idx+ pools[poolIdx].numEntries, poolIdx);
   1.745 +                JS_ASSERT(iter->getOffset() >= perforation.getOffset());
   1.746 +                // Everything here is known, we can safely do the necessary substitutions
   1.747 +                Inst * inst = this->getInst(*iter);
   1.748 +                // Manually compute the offset, including a possible bias.
   1.749 +                // Also take into account the whole size of the pool that is being placed.
   1.750 +                int codeOffset = fakePoolOffset - iter->getOffset() - newPoolInfo.size + numSkips * p->immSize - skippedBytes;
   1.751 +                // That is, patchConstantPoolLoad wants to be handed the address of the
   1.752 +                // pool entry that is being loaded.  We need to do a non-trivial amount
   1.753 +                // of math here, since the pool that we've made does not actually reside there
   1.754 +                // in memory.
   1.755 +                IonSpew(IonSpew_Pools, "[%d] Fixing offset to %d", id, codeOffset - magicAlign);
   1.756 +                if (!Asm::patchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign)) {
   1.757 +                    // NOTE: if removing this entry happens to change the alignment of the next
   1.758 +                    // block, chances are you will have a bad time.
   1.759 +                    // ADDENDUM: this CANNOT happen on ARM, because the only elements that
   1.760 +                    // fall into this case are doubles loaded via vfp, but they will also be
   1.761 +                    // the last pool, which means it cannot affect the alignment of any other
   1.762 +                    // Sub Pools.
   1.763 +                    IonSpew(IonSpew_Pools, "[%d]***Offset was still out of range!***", id, codeOffset - magicAlign);
   1.764 +                    IonSpew(IonSpew_Pools, "[%d] Too complicated; bailingp", id);
   1.765 +                    this->fail_bail();
   1.766 +                    // only free up to the current offset
   1.767 +                    for (int pi = poolIdx; pi < numPoolKinds; pi++)
   1.768 +                        delete[] outcastEntries[pi];
   1.769 +                    delete[] preservedEntries;
   1.770 +                    return;
   1.771 +                } else {
   1.772 +                    preservedEntries[idx] = true;
   1.773 +                }
   1.774 +            }
   1.775 +            // remove the elements of the pool that should not be there (YAY, MEMCPY)
   1.776 +            unsigned int idxDest = 0;
   1.777 +            // If no elements were skipped, no expensive copy is necessary.
   1.778 +            if (numSkips != 0) {
   1.779 +                for (idx = 0; idx < p->numEntries; idx++) {
   1.780 +                    if (preservedEntries[idx]) {
   1.781 +                        if (idx != idxDest) {
   1.782 +                            memcpy(&p->poolData[idxDest * p->immSize],
   1.783 +                                   &p->poolData[idx * p->immSize],
   1.784 +                                   p->immSize);
   1.785 +                        }
   1.786 +                        idxDest++;
   1.787 +                    }
   1.788 +                }
   1.789 +                p->numEntries -= numSkips;
   1.790 +            }
   1.791 +            poolOffset += p->numEntries * p->immSize;
   1.792 +            delete[] preservedEntries;
   1.793 +            preservedEntries = nullptr;
   1.794 +        }
   1.795 +        // bind the current pool to the perforation point.
   1.796 +        Pool **tmp = &perforatedNode->data;
   1.797 +        *tmp = static_cast<Pool*>(this->LifoAlloc_.alloc(sizeof(Pool) * numPoolKinds));
   1.798 +        if (tmp == nullptr) {
   1.799 +            this->fail_oom();
   1.800 +            for (int pi = 0; pi < numPoolKinds; pi++)
   1.801 +                delete[] outcastEntries[pi];
   1.802 +            return;
   1.803 +        }
   1.804 +        // The above operations may have changed the size of pools!
   1.805 +        // recalibrate the size of the pool.
   1.806 +        newPoolInfo = getPoolData();
   1.807 +        poolInfo[numDumps] = newPoolInfo;
   1.808 +        poolSize += poolInfo[numDumps].size;
   1.809 +        numDumps++;
   1.810 +
   1.811 +        memcpy(*tmp, pools, sizeof(Pool) * numPoolKinds);
   1.812 +
   1.813 +        // reset everything to the state that it was in when we started
   1.814 +        for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
   1.815 +            if (!pools[poolIdx].reset(this->LifoAlloc_)) {
   1.816 +                this->fail_oom();
   1.817 +                for (int pi = 0; pi < numPoolKinds; pi++)
   1.818 +                    delete[] outcastEntries[pi];
   1.819 +                return;
   1.820 +            }
   1.821 +        }
   1.822 +        new (&perforation) BufferOffset();
   1.823 +        perforatedNode = nullptr;
   1.824 +        inBackref = false;
   1.825 +
   1.826 +        // Now that the backwards pool has been emptied, and a new forward pool
   1.827 +        // has been allocated, it is time to populate the new forward pool with
   1.828 +        // any entries that couldn't fit in the backwards pool.
   1.829 +        for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
   1.830 +            // Technically, the innermost pool will never have this issue, but it is easier
   1.831 +            // to just handle this case.
   1.832 +            // Since the pool entry was filled back-to-front, and in the next buffer, the elements
   1.833 +            // should be front-to-back, this insertion also needs to proceed backwards
   1.834 +            int idx = outcasts[poolIdx].length();
   1.835 +            for (BufferOffset *iter = outcasts[poolIdx].end()-1;
   1.836 +                 iter != outcasts[poolIdx].begin()-1;
   1.837 +                 --iter, --idx) {
   1.838 +                pools[poolIdx].updateLimiter(*iter);
   1.839 +                Inst *inst = this->getInst(*iter);
   1.840 +                Asm::insertTokenIntoTag(pools[poolIdx].instSize, (uint8_t*)inst, outcasts[poolIdx].end()-1-iter);
   1.841 +                pools[poolIdx].insertEntry(&outcastEntries[poolIdx][idx*pools[poolIdx].immSize], *iter, this->LifoAlloc_);
   1.842 +            }
   1.843 +            delete[] outcastEntries[poolIdx];
   1.844 +        }
   1.845 +        // this (*2) is not technically kosher, but I want to get this bug fixed.
   1.846 +        // It should actually be guardSize + the size of the instruction that we're attempting
   1.847 +        // to insert. Unfortunately that vaue is never passed in.  On ARM, these instructions
   1.848 +        // are always 4 bytes, so guardSize is legit to use.
   1.849 +        poolOffset = this->size() + guardSize * 2;
   1.850 +        poolOffset += headerSize;
   1.851 +        for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
   1.852 +            // There can still be an awkward situation where the element that triggered the
   1.853 +            // initial dump didn't fit into the pool backwards, and now, still does not fit into
   1.854 +            // this pool.  Now it is necessary to go and dump this pool (note: this is almost
   1.855 +            // certainly being called from dumpPool()).
   1.856 +            poolOffset = pools[poolIdx].align(poolOffset);
   1.857 +            if (pools[poolIdx].checkFull(poolOffset)) {
   1.858 +                // ONCE AGAIN, UH-OH, TIME TO BAIL
   1.859 +                dumpPool();
   1.860 +                break;
   1.861 +            }
   1.862 +            poolOffset += pools[poolIdx].getPoolSize();
   1.863 +        }
   1.864 +    }
   1.865 +
   1.866 +    void dumpPool() {
   1.867 +        JS_ASSERT(!inBackref);
   1.868 +        IonSpew(IonSpew_Pools, "[%d] Attempting to dump the pool", id);
   1.869 +        PoolInfo newPoolInfo = getPoolData();
   1.870 +        if (newPoolInfo.size == 0) {
   1.871 +            // If there is no data in the pool being dumped, don't dump anything.
   1.872 +            inBackref = true;
   1.873 +            IonSpew(IonSpew_Pools, "[%d]Abort, no pool data", id);
   1.874 +            return;
   1.875 +        }
   1.876 +
   1.877 +        IonSpew(IonSpew_Pools, "[%d] Dumping %d bytes", id, newPoolInfo.size);
   1.878 +        if (!perforation.assigned()) {
   1.879 +            IonSpew(IonSpew_Pools, "[%d] No Perforation point selected, generating a new one", id);
   1.880 +            // There isn't a perforation here, we need to dump the pool with a guard.
   1.881 +            BufferOffset branch = this->nextOffset();
   1.882 +            bool shouldMarkAsBranch = this->isNextBranch();
   1.883 +            this->markNextAsBranch();
   1.884 +            this->putBlob(guardSize, nullptr);
   1.885 +            BufferOffset afterPool = this->nextOffset();
   1.886 +            Asm::writePoolGuard(branch, this->getInst(branch), afterPool);
   1.887 +            markGuard();
   1.888 +            perforatedNode->isNatural = false;
   1.889 +            if (shouldMarkAsBranch)
   1.890 +                this->markNextAsBranch();
   1.891 +        }
   1.892 +
   1.893 +        // We have a perforation.  Time to cut the instruction stream, patch in the pool
   1.894 +        // and possibly re-arrange the pool to accomodate its new location.
   1.895 +        int poolOffset = perforation.getOffset();
   1.896 +        int magicAlign =  getInfo(numDumps-1).finalPos - getInfo(numDumps-1).offset;
   1.897 +        poolOffset += magicAlign;
   1.898 +        poolOffset += headerSize;
   1.899 +        for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
   1.900 +            mozilla::DebugOnly<bool> beforePool = true;
   1.901 +            Pool *p = &pools[poolIdx];
   1.902 +            // Any entries that happened to be after the place we put our pool will need to be
   1.903 +            // switched from the forward-referenced pool to the backward-refrenced pool.
   1.904 +            int idx = 0;
   1.905 +            for (BufferOffset *iter = p->loadOffsets.begin();
   1.906 +                 iter != p->loadOffsets.end(); ++iter, ++idx)
   1.907 +            {
   1.908 +                if (iter->getOffset() >= perforation.getOffset()) {
   1.909 +                    IonSpew(IonSpew_Pools, "[%d] Pushing entry %d in pool %d into the backwards section.", id, idx, poolIdx);
   1.910 +                    // insert this into the rear part of the pool.
   1.911 +                    int offset = idx * p->immSize;
   1.912 +                    p->other->insertEntry(&p->poolData[offset], BufferOffset(*iter), this->LifoAlloc_);
   1.913 +                    // update the limiting entry for this pool.
   1.914 +                    p->other->updateLimiter(*iter);
   1.915 +
   1.916 +                    // Update the current pool to report fewer entries.  They are now in the
   1.917 +                    // backwards section.
   1.918 +                    p->numEntries--;
   1.919 +                    beforePool = false;
   1.920 +                } else {
   1.921 +                    JS_ASSERT(beforePool);
   1.922 +                    // align the pool offset to the alignment of this pool
   1.923 +                    // it already only aligns when the pool has data in it, but we want to not
   1.924 +                    // align when all entries will end up in the backwards half of the pool
   1.925 +                    poolOffset = p->align(poolOffset);
   1.926 +                    IonSpew(IonSpew_Pools, "[%d] Entry %d in pool %d is before the pool.", id, idx, poolIdx);
   1.927 +                    // Everything here is known, we can safely do the necessary substitutions
   1.928 +                    Inst * inst = this->getInst(*iter);
   1.929 +                    // We need to manually compute the offset, including a possible bias.
   1.930 +                    int codeOffset = poolOffset - iter->getOffset();
   1.931 +                    // That is, patchConstantPoolLoad wants to be handed the address of the
   1.932 +                    // pool entry that is being loaded.  We need to do a non-trivial amount
   1.933 +                    // of math here, since the pool that we've made does not actually reside there
   1.934 +                    // in memory.
   1.935 +                    IonSpew(IonSpew_Pools, "[%d] Fixing offset to %d", id, codeOffset - magicAlign);
   1.936 +                    Asm::patchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign);
   1.937 +                }
   1.938 +            }
   1.939 +            // Some number of entries have been positively identified as being
   1.940 +            // in this section of the pool. Before processing the next pool,
   1.941 +            // update the offset from the beginning of the buffer
   1.942 +            poolOffset += p->numEntries * p->immSize;
   1.943 +        }
   1.944 +        poolOffset = footerSize;
   1.945 +        inBackref = true;
   1.946 +        for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) {
   1.947 +            Pool *tmp = pools[poolIdx].other;
   1.948 +            if (tmp->checkFullBackref(poolOffset, perforation.getOffset())) {
   1.949 +                // GNAAAH.  While we rotated elements into the back half, one of them filled up
   1.950 +                // Now,  dumping the back half is necessary...
   1.951 +                finishPool();
   1.952 +                break;
   1.953 +            }
   1.954 +        }
   1.955 +    }
   1.956 +
   1.957 +    void flushPool() {
   1.958 +        if (this->oom())
   1.959 +            return;
   1.960 +        IonSpew(IonSpew_Pools, "[%d] Requesting a pool flush", id);
   1.961 +        if (!inBackref)
   1.962 +            dumpPool();
   1.963 +        finishPool();
   1.964 +    }
   1.965 +    void patchBranch(Inst *i, int curpool, BufferOffset branch) {
   1.966 +        const Inst *ci = i;
   1.967 +        ptrdiff_t offset = Asm::getBranchOffset(ci);
   1.968 +        // If the offset is 0, then there is nothing to do.
   1.969 +        if (offset == 0)
   1.970 +            return;
   1.971 +        int destOffset = branch.getOffset() + offset;
   1.972 +        if (offset > 0) {
   1.973 +
   1.974 +            while (curpool < numDumps && poolInfo[curpool].offset <= destOffset) {
   1.975 +                offset += poolInfo[curpool].size;
   1.976 +                curpool++;
   1.977 +            }
   1.978 +        } else {
   1.979 +            // Ignore the pool that comes next, since this is a backwards branch
   1.980 +            curpool--;
   1.981 +            while (curpool >= 0 && poolInfo[curpool].offset > destOffset) {
   1.982 +                offset -= poolInfo[curpool].size;
   1.983 +                curpool--;
   1.984 +            }
   1.985 +            // Can't assert anything here, since the first pool may be after the target.
   1.986 +        }
   1.987 +        Asm::retargetNearBranch(i, offset, false);
   1.988 +    }
   1.989 +
   1.990 +    // Mark the next instruction as a valid guard.  This means we can place a pool here.
   1.991 +    void markGuard() {
   1.992 +        // If we are in a no pool zone then there is no point in dogearing
   1.993 +        // this branch as a place to go back to
   1.994 +        if (canNotPlacePool)
   1.995 +            return;
   1.996 +        // There is no point in trying to grab a new slot if we've already
   1.997 +        // found one and are in the process of filling it in.
   1.998 +        if (inBackref)
   1.999 +            return;
  1.1000 +        perforate();
  1.1001 +    }
  1.1002 +    void enterNoPool() {
  1.1003 +        if (!canNotPlacePool && !perforation.assigned()) {
  1.1004 +            // Embarassing mode: The Assembler requests the start of a no pool section
  1.1005 +            // and there have been no valid places that a pool could be dumped thusfar.
  1.1006 +            // If a pool were to fill up before this no-pool section ends, we need to go back
  1.1007 +            // in the stream and enter a pool guard after the fact.  This is feasable, but
  1.1008 +            // for now, it is easier to just allocate a junk instruction, default it to a nop, and
  1.1009 +            // finally, if the pool *is* needed, patch the nop to  apool guard.
  1.1010 +            // What the assembler requests:
  1.1011 +
  1.1012 +            // #request no-pool zone
  1.1013 +            // push pc
  1.1014 +            // blx r12
  1.1015 +            // #end no-pool zone
  1.1016 +
  1.1017 +            // however, if we would need to insert a pool, and there is no perforation point...
  1.1018 +            // so, actual generated code:
  1.1019 +
  1.1020 +            // b next; <= perforation point
  1.1021 +            // next:
  1.1022 +            // #beginning of no pool zone
  1.1023 +            // push pc
  1.1024 +            // blx r12
  1.1025 +
  1.1026 +            BufferOffset branch = this->nextOffset();
  1.1027 +            this->markNextAsBranch();
  1.1028 +            this->putBlob(guardSize, nullptr);
  1.1029 +            BufferOffset afterPool = this->nextOffset();
  1.1030 +            Asm::writePoolGuard(branch, this->getInst(branch), afterPool);
  1.1031 +            markGuard();
  1.1032 +            if (perforatedNode != nullptr)
  1.1033 +                perforatedNode->isNatural = false;
  1.1034 +        }
  1.1035 +        canNotPlacePool++;
  1.1036 +    }
  1.1037 +    void leaveNoPool() {
  1.1038 +        canNotPlacePool--;
  1.1039 +    }
  1.1040 +    int size() const {
  1.1041 +        return uncheckedSize();
  1.1042 +    }
  1.1043 +    Pool *getPool(int idx) {
  1.1044 +        return &pools[idx];
  1.1045 +    }
  1.1046 +    void markNextAsBranch() {
  1.1047 +        // If the previous thing inserted was the last instruction of
  1.1048 +        // the node, then whoops, we want to mark the first instruction of
  1.1049 +        // the next node.
  1.1050 +        this->ensureSpace(InstBaseSize);
  1.1051 +        JS_ASSERT(*this->getTail() != nullptr);
  1.1052 +        (*this->getTail())->markNextAsBranch();
  1.1053 +    }
  1.1054 +    bool isNextBranch() {
  1.1055 +        JS_ASSERT(*this->getTail() != nullptr);
  1.1056 +        return (*this->getTail())->isNextBranch();
  1.1057 +    }
  1.1058 +
  1.1059 +    int uncheckedSize() const {
  1.1060 +        PoolInfo pi = getPoolData();
  1.1061 +        int codeEnd = this->nextOffset().getOffset();
  1.1062 +        return (codeEnd - pi.offset) + pi.finalPos;
  1.1063 +    }
  1.1064 +    ptrdiff_t curDumpsite;
  1.1065 +    void resetCounter() {
  1.1066 +        curDumpsite = 0;
  1.1067 +    }
  1.1068 +    ptrdiff_t poolSizeBefore(ptrdiff_t offset) const {
  1.1069 +        int cur = 0;
  1.1070 +        while(cur < numDumps && poolInfo[cur].offset <= offset)
  1.1071 +            cur++;
  1.1072 +        // poolInfo[curDumpsite] is now larger than the offset
  1.1073 +        // either this is the first one, or the previous is the last one we care about
  1.1074 +        if (cur == 0)
  1.1075 +            return 0;
  1.1076 +        return poolInfo[cur-1].finalPos - poolInfo[cur-1].offset;
  1.1077 +    }
  1.1078 +
  1.1079 +  private:
  1.1080 +    void getPEPool(PoolEntry pe, Pool **retP, int32_t * retOffset, int32_t *poolNum) const {
  1.1081 +        int poolKind = pe.poolKind();
  1.1082 +        Pool *p = nullptr;
  1.1083 +        uint32_t offset = pe.offset() * pools[poolKind].immSize;
  1.1084 +        int idx;
  1.1085 +        for (idx = 0; idx < numDumps; idx++) {
  1.1086 +            p = &poolInfo[idx].slice->data[poolKind];
  1.1087 +            if (p->getPoolSize() > offset)
  1.1088 +                break;
  1.1089 +            offset -= p->getPoolSize();
  1.1090 +            p = p->other;
  1.1091 +            if (p->getPoolSize() > offset)
  1.1092 +                break;
  1.1093 +            offset -= p->getPoolSize();
  1.1094 +            p = nullptr;
  1.1095 +        }
  1.1096 +        if (poolNum != nullptr)
  1.1097 +            *poolNum = idx;
  1.1098 +        // If this offset is contained in any finished pool, forward or backwards, p now
  1.1099 +        // points to that pool, if it is not in any pool (should be in the currently building pool)
  1.1100 +        // then p is nullptr.
  1.1101 +        if (p == nullptr) {
  1.1102 +            p = &pools[poolKind];
  1.1103 +            if (offset >= p->getPoolSize()) {
  1.1104 +                p = p->other;
  1.1105 +                offset -= p->getPoolSize();
  1.1106 +            }
  1.1107 +        }
  1.1108 +        JS_ASSERT(p != nullptr);
  1.1109 +        JS_ASSERT(offset < p->getPoolSize());
  1.1110 +        *retP = p;
  1.1111 +        *retOffset = offset;
  1.1112 +    }
  1.1113 +    uint8_t *getPoolEntry(PoolEntry pe) {
  1.1114 +        Pool *p;
  1.1115 +        int32_t offset;
  1.1116 +        getPEPool(pe, &p, &offset, nullptr);
  1.1117 +        return &p->poolData[offset];
  1.1118 +    }
  1.1119 +    size_t getPoolEntrySize(PoolEntry pe) {
  1.1120 +        int idx = pe.poolKind();
  1.1121 +        return pools[idx].immSize;
  1.1122 +    }
  1.1123 +
  1.1124 +  public:
  1.1125 +    uint32_t poolEntryOffset(PoolEntry pe) const {
  1.1126 +        Pool *realPool;
  1.1127 +        // offset is in bytes, not entries.
  1.1128 +        int32_t offset;
  1.1129 +        int32_t poolNum;
  1.1130 +        getPEPool(pe, &realPool, &offset, &poolNum);
  1.1131 +        PoolInfo *pi = &poolInfo[poolNum];
  1.1132 +        Pool *poolGroup = pi->slice->data;
  1.1133 +        uint32_t start = pi->finalPos - pi->size + headerSize;
  1.1134 +        /// The order of the pools is:
  1.1135 +        // A B C C_Rev B_Rev A_Rev, so in the initial pass,
  1.1136 +        // go through the pools forwards, and in the second pass
  1.1137 +        // go through them in reverse order.
  1.1138 +        for (int idx = 0; idx < numPoolKinds; idx++) {
  1.1139 +            if (&poolGroup[idx] == realPool) {
  1.1140 +                return start + offset;
  1.1141 +            }
  1.1142 +            start = poolGroup[idx].addPoolSize(start);
  1.1143 +        }
  1.1144 +        for (int idx = numPoolKinds-1; idx >= 0; idx--) {
  1.1145 +            if (poolGroup[idx].other == realPool) {
  1.1146 +                return start + offset;
  1.1147 +            }
  1.1148 +            start = poolGroup[idx].other->addPoolSize(start);
  1.1149 +        }
  1.1150 +        MOZ_ASSUME_UNREACHABLE("Entry is not in a pool");
  1.1151 +    }
  1.1152 +    void writePoolEntry(PoolEntry pe, uint8_t *buff) {
  1.1153 +        size_t size = getPoolEntrySize(pe);
  1.1154 +        uint8_t *entry = getPoolEntry(pe);
  1.1155 +        memcpy(entry, buff, size);
  1.1156 +    }
  1.1157 +    void readPoolEntry(PoolEntry pe, uint8_t *buff) {
  1.1158 +        size_t size = getPoolEntrySize(pe);
  1.1159 +        uint8_t *entry = getPoolEntry(pe);
  1.1160 +        memcpy(buff, entry, size);
  1.1161 +    }
  1.1162 +
  1.1163 +};
  1.1164 +} // ion
  1.1165 +} // js
  1.1166 +#endif /* jit_shared_IonAssemblerBufferWithConstantPools_h */

mercurial