michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #ifndef jit_shared_IonAssemblerBufferWithConstantPools_h michael@0: #define jit_shared_IonAssemblerBufferWithConstantPools_h michael@0: michael@0: #include "mozilla/DebugOnly.h" michael@0: michael@0: #include "assembler/wtf/SegmentedVector.h" michael@0: #include "jit/IonSpewer.h" michael@0: #include "jit/shared/IonAssemblerBuffer.h" michael@0: michael@0: namespace js { michael@0: namespace jit { michael@0: typedef Vector LoadOffsets; michael@0: michael@0: struct Pool michael@0: : public OldIonAllocPolicy michael@0: { michael@0: const int maxOffset; michael@0: const int immSize; michael@0: const int instSize; michael@0: const int bias; michael@0: michael@0: private: michael@0: const int alignment; michael@0: michael@0: public: michael@0: const bool isBackref; michael@0: const bool canDedup; michael@0: // "other" is the backwards half of this pool, it is held in another pool structure michael@0: Pool *other; michael@0: uint8_t *poolData; michael@0: uint32_t numEntries; michael@0: uint32_t buffSize; michael@0: LoadOffsets loadOffsets; michael@0: michael@0: // When filling pools where the the size of an immediate is larger michael@0: // than the size of an instruction, we find we're in a case where the distance between the michael@0: // next instruction and the next pool slot is increasing! michael@0: // Moreover, If we want to do fancy things like deduplicate pool entries at michael@0: // dump time, we may not know the location in a pool (and thus the limiting load) michael@0: // until very late. michael@0: // Lastly, it may be beneficial to interleave the pools. I have absolutely no idea michael@0: // how that will work, but my suspicions are that it will be difficult. michael@0: michael@0: BufferOffset limitingUser; michael@0: int limitingUsee; michael@0: michael@0: Pool(int maxOffset_, int immSize_, int instSize_, int bias_, int alignment_, LifoAlloc &LifoAlloc_, michael@0: bool isBackref_ = false, bool canDedup_ = false, Pool *other_ = nullptr) michael@0: : maxOffset(maxOffset_), immSize(immSize_), instSize(instSize_), michael@0: bias(bias_), alignment(alignment_), michael@0: isBackref(isBackref_), canDedup(canDedup_), other(other_), michael@0: poolData(static_cast(LifoAlloc_.alloc(8*immSize))), numEntries(0), michael@0: buffSize(8), loadOffsets(), limitingUser(), limitingUsee(INT_MIN) michael@0: { michael@0: } michael@0: static const int garbage=0xa5a5a5a5; michael@0: Pool() : maxOffset(garbage), immSize(garbage), instSize(garbage), bias(garbage), michael@0: alignment(garbage), isBackref(garbage), canDedup(garbage), other((Pool*)garbage) michael@0: { michael@0: } michael@0: // Sometimes, when we are adding large values to a pool, the limiting use may change. michael@0: // Handle this case. nextInst is the address of the michael@0: void updateLimiter(BufferOffset nextInst) { michael@0: int oldRange, newRange; michael@0: if (isBackref) { michael@0: // common expressions that are not subtracted: the location of the pool, ... michael@0: oldRange = limitingUser.getOffset() - ((numEntries - limitingUsee) * immSize); michael@0: newRange = nextInst.getOffset(); michael@0: } else { michael@0: oldRange = (limitingUsee * immSize) - limitingUser.getOffset(); michael@0: newRange = (numEntries * immSize) - nextInst.getOffset(); michael@0: } michael@0: if (!limitingUser.assigned() || newRange > oldRange) { michael@0: // We have a new largest range! michael@0: limitingUser = nextInst; michael@0: limitingUsee = numEntries; michael@0: } michael@0: } michael@0: // checkFull is called before any modifications have been made. michael@0: // It is "if we were to add this instruction and pool entry, michael@0: // would we be in an invalid state?". If it is true, then it is in fact michael@0: // time for a "pool dump". michael@0: michael@0: // poolOffset is the distance from the end of the current section to the end of the pool. michael@0: // For the last section of the pool, this will be the size of the footer michael@0: // For the first section of the pool, it will be the size of every other michael@0: // section and the footer michael@0: // codeOffset is the instruction-distance from the pool to the beginning of the buffer. michael@0: // Since codeOffset only includes instructions, the number is the same for michael@0: // the beginning and end of the pool. michael@0: // instOffset is the offset from the beginning of the buffer to the instruction that michael@0: // is about to be placed. michael@0: bool checkFullBackref(int poolOffset, int codeOffset) { michael@0: if (!limitingUser.assigned()) michael@0: return false; michael@0: signed int distance = michael@0: limitingUser.getOffset() + bias michael@0: - codeOffset + poolOffset + michael@0: (numEntries - limitingUsee + 1) * immSize; michael@0: if (distance >= maxOffset) michael@0: return true; michael@0: return false; michael@0: } michael@0: michael@0: // checkFull answers the question "If a pool were placed at poolOffset, would michael@0: // any reference into the pool be out of range?". It is meant to be used as instructions michael@0: // and elements are inserted, to determine if a saved perforation point needs to be used. michael@0: michael@0: bool checkFull(int poolOffset) { michael@0: // Inserting an instruction into the stream can michael@0: // push any of the pools out of range. michael@0: // Similarly, inserting into a pool can push the pool entry out of range michael@0: JS_ASSERT(!isBackref); michael@0: // Not full if there aren't any uses. michael@0: if (!limitingUser.assigned()) { michael@0: return false; michael@0: } michael@0: // We're considered "full" when: michael@0: // bias + abs(poolOffset + limitingeUsee * numEntries - limitingUser) + sizeof(other_pools) >= maxOffset michael@0: if (poolOffset + limitingUsee * immSize - (limitingUser.getOffset() + bias) >= maxOffset) { michael@0: return true; michael@0: } michael@0: return false; michael@0: } michael@0: michael@0: // By the time this function is called, we'd damn well better know that this is going to succeed. michael@0: uint32_t insertEntry(uint8_t *data, BufferOffset off, LifoAlloc &LifoAlloc_) { michael@0: if (numEntries == buffSize) { michael@0: buffSize <<= 1; michael@0: uint8_t *tmp = static_cast(LifoAlloc_.alloc(immSize * buffSize)); michael@0: memcpy(tmp, poolData, immSize * numEntries); michael@0: if (poolData == nullptr) { michael@0: buffSize = 0; michael@0: return -1; michael@0: } michael@0: poolData = tmp; michael@0: } michael@0: memcpy(&poolData[numEntries * immSize], data, immSize); michael@0: loadOffsets.append(off.getOffset()); michael@0: return numEntries++; michael@0: } michael@0: michael@0: bool reset(LifoAlloc &a) { michael@0: numEntries = 0; michael@0: buffSize = 8; michael@0: poolData = static_cast(a.alloc(buffSize * immSize)); michael@0: if (poolData == nullptr) michael@0: return false; michael@0: michael@0: void *otherSpace = a.alloc(sizeof(Pool)); michael@0: if (otherSpace == nullptr) michael@0: return false; michael@0: michael@0: other = new (otherSpace) Pool(other->maxOffset, other->immSize, other->instSize, michael@0: other->bias, other->alignment, a, other->isBackref, michael@0: other->canDedup); michael@0: new (&loadOffsets) LoadOffsets; michael@0: michael@0: limitingUser = BufferOffset(); michael@0: limitingUsee = -1; michael@0: return true; michael@0: michael@0: } michael@0: // WARNING: This will not always align values. It will only michael@0: // align to the requirement of the pool. If the pool is empty, michael@0: // there is nothing to be aligned, so it will not perform any alignment michael@0: uint8_t* align(uint8_t *ptr) { michael@0: return (uint8_t*)align((uint32_t)ptr); michael@0: } michael@0: uint32_t align(uint32_t ptr) { michael@0: if (numEntries == 0) michael@0: return ptr; michael@0: return (ptr + alignment-1) & ~(alignment-1); michael@0: } michael@0: uint32_t forceAlign(uint32_t ptr) { michael@0: return (ptr + alignment-1) & ~(alignment-1); michael@0: } michael@0: bool isAligned(uint32_t ptr) { michael@0: return ptr == align(ptr); michael@0: } michael@0: int getAlignment() { michael@0: return alignment; michael@0: } michael@0: michael@0: uint32_t addPoolSize(uint32_t start) { michael@0: start = align(start); michael@0: start += immSize * numEntries; michael@0: return start; michael@0: } michael@0: uint8_t *addPoolSize(uint8_t *start) { michael@0: start = align(start); michael@0: start += immSize * numEntries; michael@0: return start; michael@0: } michael@0: uint32_t getPoolSize() { michael@0: return immSize * numEntries; michael@0: } michael@0: }; michael@0: michael@0: michael@0: template michael@0: struct BufferSliceTail : public BufferSlice { michael@0: Pool *data; michael@0: mozilla::Array isBranch; michael@0: bool isNatural : 1; michael@0: BufferSliceTail *getNext() { michael@0: return (BufferSliceTail *)this->next; michael@0: } michael@0: BufferSliceTail() : data(nullptr), isNatural(true) { michael@0: memset(&isBranch[0], 0, sizeof(isBranch)); michael@0: } michael@0: void markNextAsBranch() { michael@0: int idx = this->nodeSize / InstBaseSize; michael@0: isBranch[idx >> 3] |= 1 << (idx & 0x7); michael@0: } michael@0: bool isNextBranch() { michael@0: unsigned int size = this->nodeSize; michael@0: if (size >= SliceSize) michael@0: return false; michael@0: int idx = size / InstBaseSize; michael@0: return (isBranch[idx >> 3] >> (idx & 0x7)) & 1; michael@0: } michael@0: }; michael@0: michael@0: #if 0 michael@0: static int getId() { michael@0: if (MaybeGetIonContext()) michael@0: return MaybeGetIonContext()->getNextAssemblerId(); michael@0: return NULL_ID; michael@0: } michael@0: #endif michael@0: static inline void spewEntry(uint8_t *ptr, int length) { michael@0: #if IS_LITTLE_ENDIAN michael@0: for (int idx = 0; idx < length; idx++) { michael@0: IonSpewCont(IonSpew_Pools, "%02x", ptr[length - idx - 1]); michael@0: if (((idx & 3) == 3) && (idx + 1 != length)) michael@0: IonSpewCont(IonSpew_Pools, "_"); michael@0: } michael@0: #else michael@0: for (int idx = 0; idx < length; idx++) { michael@0: IonSpewCont(IonSpew_Pools, "%02x", ptr[idx]); michael@0: if (((idx & 3) == 3) && (idx + 1 != length)) michael@0: IonSpewCont(IonSpew_Pools, "_"); michael@0: } michael@0: #endif michael@0: } michael@0: // NOTE: Adding in the ability to retroactively insert a pool has consequences! michael@0: // Most notably, Labels stop working. Normally, we create a label, later bind it. michael@0: // when the label is bound, We back-patch all previous references to the label with michael@0: // the correct offset. However, since a pool may be retroactively inserted, we don't michael@0: // actually know what the final offset is going to be until much later. This will michael@0: // happen (in some instances) after the pools have been finalized. Attempting to compute michael@0: // the correct offsets for branches as the pools are finalized is quite infeasible. michael@0: // Instead, I write *just* the number of instructions that will be jumped over, then michael@0: // when we go to copy the instructions into the executable buffer, fix up all of the michael@0: // offsets to include the pools. Since we have about 32 megabytes worth of offset, michael@0: // I am not very worried about the pools moving it out of range. michael@0: // Now, How exactly do we generate these? The first step is to identify which michael@0: // instructions are actually branches that need to be fixed up. A single bit michael@0: // per instruction should be enough to determine which ones are branches, but michael@0: // we have no guarantee that all instructions are the same size, so the start of michael@0: // each branch instruction will be marked with a bit (1 bit per byte). michael@0: // then we neet to call up to the assembler to determine what the offset of the branch michael@0: // is. The offset will be the number of instructions that are being skipped over michael@0: // along with any processor bias. We then need to calculate the offset, including pools michael@0: // and write that value into the buffer. At this point, we can write it into the michael@0: // executable buffer, or the AssemblerBuffer, and copy the data over later. michael@0: // Previously, This was all handled by the assembler, since the location michael@0: // and size of pools were always known as soon as its location had been reached. michael@0: michael@0: // A class for indexing into constant pools. michael@0: // Each time a pool entry is added, one of these is generated. michael@0: // This can be supplied to read and write that entry after the fact. michael@0: // And it can be used to get the address of the entry once the buffer michael@0: // has been finalized, and an executable copy allocated. michael@0: michael@0: template michael@0: struct AssemblerBufferWithConstantPool : public AssemblerBuffer { michael@0: private: michael@0: mozilla::Array entryCount; michael@0: static const int offsetBits = 32 - poolKindBits; michael@0: public: michael@0: michael@0: class PoolEntry { michael@0: template michael@0: friend struct AssemblerBufferWithConstantPool; michael@0: uint32_t offset_ : offsetBits; michael@0: uint32_t kind_ : poolKindBits; michael@0: PoolEntry(int offset, int kind) : offset_(offset), kind_(kind) { michael@0: } michael@0: public: michael@0: uint32_t encode() { michael@0: uint32_t ret; michael@0: memcpy(&ret, this, sizeof(uint32_t)); michael@0: return ret; michael@0: } michael@0: PoolEntry(uint32_t bits) : offset_(((1u << offsetBits) - 1) & bits), michael@0: kind_(bits >> offsetBits) { michael@0: } michael@0: PoolEntry() : offset_((1u << offsetBits) - 1), kind_((1u << poolKindBits) - 1) { michael@0: } michael@0: michael@0: uint32_t poolKind() const { michael@0: return kind_; michael@0: } michael@0: uint32_t offset() const { michael@0: return offset_; michael@0: } michael@0: }; michael@0: private: michael@0: typedef BufferSliceTail BufferSlice; michael@0: typedef AssemblerBuffer Parent; michael@0: michael@0: // The size of a guard instruction michael@0: const int guardSize; michael@0: // The size of the header that is put at the beginning of a full pool michael@0: const int headerSize; michael@0: // The size of a footer that is put in a pool after it is full. michael@0: const int footerSize; michael@0: // the number of sub-pools that we can allocate into. michael@0: static const int numPoolKinds = 1 << poolKindBits; michael@0: michael@0: Pool *pools; michael@0: michael@0: // The buffer should be aligned to this address. michael@0: const int instBufferAlign; michael@0: michael@0: // the number of times we've dumped the pool. michael@0: int numDumps; michael@0: struct PoolInfo { michael@0: int offset; // the number of instructions before the start of the pool michael@0: int size; // the size of the pool, including padding michael@0: int finalPos; // the end of the buffer, in bytes from the beginning of the buffer michael@0: BufferSlice *slice; michael@0: }; michael@0: PoolInfo *poolInfo; michael@0: // we need to keep track of how large the pools are, so we can allocate michael@0: // enough space for them later. This should include any amount of padding michael@0: // necessary to keep the pools aligned. michael@0: int poolSize; michael@0: // The Assembler should set this to true if it does not want us to dump a pool here michael@0: int canNotPlacePool; michael@0: // Are we filling up the forwards or backwards pools? michael@0: bool inBackref; michael@0: // Cache the last place we saw an opportunity to dump the pool michael@0: BufferOffset perforation; michael@0: BufferSlice *perforatedNode; michael@0: public: michael@0: int id; michael@0: private: michael@0: static const int logBasePoolInfo = 3; michael@0: BufferSlice ** getHead() { michael@0: return (BufferSlice**)&this->head; michael@0: } michael@0: BufferSlice ** getTail() { michael@0: return (BufferSlice**)&this->tail; michael@0: } michael@0: michael@0: virtual BufferSlice *newSlice(LifoAlloc &a) { michael@0: BufferSlice *tmp = static_cast(a.alloc(sizeof(BufferSlice))); michael@0: if (!tmp) { michael@0: this->m_oom = true; michael@0: return nullptr; michael@0: } michael@0: new (tmp) BufferSlice; michael@0: return tmp; michael@0: } michael@0: public: michael@0: AssemblerBufferWithConstantPool(int guardSize_, int headerSize_, int footerSize_, Pool *pools_, int instBufferAlign_) michael@0: : guardSize(guardSize_), headerSize(headerSize_), michael@0: footerSize(footerSize_), michael@0: pools(pools_), michael@0: instBufferAlign(instBufferAlign_), numDumps(0), michael@0: poolInfo(nullptr), michael@0: poolSize(0), canNotPlacePool(0), inBackref(false), michael@0: perforatedNode(nullptr), id(-1) michael@0: { michael@0: for (int idx = 0; idx < numPoolKinds; idx++) { michael@0: entryCount[idx] = 0; michael@0: } michael@0: } michael@0: michael@0: // We need to wait until an AutoIonContextAlloc is created by the michael@0: // IonMacroAssembler, before allocating any space. michael@0: void initWithAllocator() { michael@0: poolInfo = static_cast(this->LifoAlloc_.alloc(sizeof(PoolInfo) * (1 << logBasePoolInfo))); michael@0: } michael@0: michael@0: const PoolInfo & getInfo(int x) const { michael@0: static const PoolInfo nil = {0,0,0}; michael@0: if (x < 0 || x >= numDumps) michael@0: return nil; michael@0: return poolInfo[x]; michael@0: } michael@0: void executableCopy(uint8_t *dest_) { michael@0: if (this->oom()) michael@0: return; michael@0: // TODO: only do this when the pool actually has a value in it michael@0: flushPool(); michael@0: for (int idx = 0; idx < numPoolKinds; idx++) { michael@0: JS_ASSERT(pools[idx].numEntries == 0 && pools[idx].other->numEntries == 0); michael@0: } michael@0: typedef mozilla::Array Chunk; michael@0: mozilla::DebugOnly start = (Chunk*)dest_; michael@0: Chunk *dest = (Chunk*)(((uint32_t)dest_ + instBufferAlign - 1) & ~(instBufferAlign -1)); michael@0: int curIndex = 0; michael@0: int curInstOffset = 0; michael@0: JS_ASSERT(start == dest); michael@0: for (BufferSlice * cur = *getHead(); cur != nullptr; cur = cur->getNext()) { michael@0: Chunk *src = (Chunk*)&cur->instructions; michael@0: for (unsigned int idx = 0; idx size()/InstBaseSize; michael@0: idx++, curInstOffset += InstBaseSize) { michael@0: // Is the current instruction a branch? michael@0: if (cur->isBranch[idx >> 3] & (1<<(idx&7))) { michael@0: // It's a branch. fix up the branchiness! michael@0: patchBranch((Inst*)&src[idx], curIndex, BufferOffset(curInstOffset)); michael@0: } michael@0: memcpy(&dest[idx], &src[idx], sizeof(Chunk)); michael@0: } michael@0: dest+=cur->size()/InstBaseSize; michael@0: if (cur->data != nullptr) { michael@0: // have the repatcher move on to the next pool michael@0: curIndex ++; michael@0: // loop over all of the pools, copying them into place. michael@0: uint8_t *poolDest = (uint8_t*)dest; michael@0: Asm::writePoolHeader(poolDest, cur->data, cur->isNatural); michael@0: poolDest += headerSize; michael@0: for (int idx = 0; idx < numPoolKinds; idx++) { michael@0: Pool *curPool = &cur->data[idx]; michael@0: // align the pool. michael@0: poolDest = curPool->align(poolDest); michael@0: memcpy(poolDest, curPool->poolData, curPool->immSize * curPool->numEntries); michael@0: poolDest += curPool->immSize * curPool->numEntries; michael@0: } michael@0: // now go over the whole list backwards, and copy in the reverse portions michael@0: for (int idx = numPoolKinds-1; idx >= 0; idx--) { michael@0: Pool *curPool = cur->data[idx].other; michael@0: // align the pool. michael@0: poolDest = curPool->align(poolDest); michael@0: memcpy(poolDest, curPool->poolData, curPool->immSize * curPool->numEntries); michael@0: poolDest += curPool->immSize * curPool->numEntries; michael@0: } michael@0: // write a footer in place michael@0: Asm::writePoolFooter(poolDest, cur->data, cur->isNatural); michael@0: poolDest += footerSize; michael@0: // at this point, poolDest had better still be aligned to a chunk boundary. michael@0: dest = (Chunk*) poolDest; michael@0: } michael@0: } michael@0: } michael@0: michael@0: BufferOffset insertEntry(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data, PoolEntry *pe = nullptr) { michael@0: if (this->oom() && !this->bail()) michael@0: return BufferOffset(); michael@0: int token; michael@0: if (p != nullptr) { michael@0: int poolId = p - pools; michael@0: const char sigil = inBackref ? 'B' : 'F'; michael@0: michael@0: IonSpew(IonSpew_Pools, "[%d]{%c} Inserting entry into pool %d", id, sigil, poolId); michael@0: IonSpewStart(IonSpew_Pools, "[%d] data is: 0x", id); michael@0: spewEntry(data, p->immSize); michael@0: IonSpewFin(IonSpew_Pools); michael@0: } michael@0: // insert the pool value michael@0: if (inBackref) michael@0: token = insertEntryBackwards(instSize, inst, p, data); michael@0: else michael@0: token = insertEntryForwards(instSize, inst, p, data); michael@0: // now to get an instruction to write michael@0: PoolEntry retPE; michael@0: if (p != nullptr) { michael@0: if (this->oom()) michael@0: return BufferOffset(); michael@0: int poolId = p - pools; michael@0: IonSpew(IonSpew_Pools, "[%d] Entry has token %d, offset ~%d", id, token, size()); michael@0: Asm::insertTokenIntoTag(instSize, inst, token); michael@0: JS_ASSERT(poolId < (1 << poolKindBits)); michael@0: JS_ASSERT(poolId >= 0); michael@0: // Figure out the offset within like-kinded pool entries michael@0: retPE = PoolEntry(entryCount[poolId], poolId); michael@0: entryCount[poolId]++; michael@0: } michael@0: // Now inst is a valid thing to insert into the instruction stream michael@0: if (pe != nullptr) michael@0: *pe = retPE; michael@0: return this->putBlob(instSize, inst); michael@0: } michael@0: michael@0: uint32_t insertEntryBackwards(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data) { michael@0: // unlike the forward case, inserting an instruction without inserting michael@0: // anything into a pool after a pool has been placed, we don't affect michael@0: // anything relevant, so we can skip this check entirely! michael@0: michael@0: if (p == nullptr) michael@0: return INT_MIN; michael@0: // TODO: calculating offsets for the alignment requirements is *hard* michael@0: // Instead, assume that we always add the maximum. michael@0: int poolOffset = footerSize; michael@0: Pool *cur, *tmp; michael@0: // NOTE: we want to process the pools from last to first. michael@0: // Since the last pool is pools[0].other, and the first pool michael@0: // is pools[numPoolKinds-1], we actually want to process this michael@0: // forwards. michael@0: for (cur = pools; cur < &pools[numPoolKinds]; cur++) { michael@0: // fetch the pool for the backwards half. michael@0: tmp = cur->other; michael@0: if (p == cur) michael@0: tmp->updateLimiter(this->nextOffset()); michael@0: michael@0: if (tmp->checkFullBackref(poolOffset, perforation.getOffset())) { michael@0: // uh-oh, the backwards pool is full. Time to finalize it, and michael@0: // switch to a new forward pool. michael@0: if (p != nullptr) michael@0: IonSpew(IonSpew_Pools, "[%d]Inserting pool entry caused a spill", id); michael@0: else michael@0: IonSpew(IonSpew_Pools, "[%d]Inserting instruction(%d) caused a spill", id, size()); michael@0: michael@0: this->finishPool(); michael@0: if (this->oom()) michael@0: return uint32_t(-1); michael@0: return this->insertEntryForwards(instSize, inst, p, data); michael@0: } michael@0: // when moving back to front, calculating the alignment is hard, just be michael@0: // conservative with it. michael@0: poolOffset += tmp->immSize * tmp->numEntries + tmp->getAlignment(); michael@0: if (p == tmp) { michael@0: poolOffset += tmp->immSize; michael@0: } michael@0: } michael@0: return p->numEntries + p->other->insertEntry(data, this->nextOffset(), this->LifoAlloc_); michael@0: } michael@0: michael@0: // Simultaneously insert an instSized instruction into the stream, michael@0: // and an entry into the pool. There are many things that can happen. michael@0: // 1) the insertion goes as planned michael@0: // 2) inserting an instruction pushes a previous pool-reference out of range, forcing a dump michael@0: // 2a) there isn't a reasonable save point in the instruction stream. We need to save room for michael@0: // a guard instruction to branch over the pool. michael@0: int insertEntryForwards(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data) { michael@0: // Advance the "current offset" by an inst, so everyone knows what their offset should be. michael@0: uint32_t nextOffset = this->size() + instSize; michael@0: uint32_t poolOffset = nextOffset; michael@0: Pool *tmp; michael@0: // If we need a guard instruction, reserve space for that. michael@0: if (!perforatedNode) michael@0: poolOffset += guardSize; michael@0: // Also, take into account the size of the header that will be placed *after* michael@0: // the guard instruction michael@0: poolOffset += headerSize; michael@0: michael@0: // Perform the necessary range checks. michael@0: for (tmp = pools; tmp < &pools[numPoolKinds]; tmp++) { michael@0: // The pool may wish for a particular alignment, Let's give it one. michael@0: JS_ASSERT((tmp->getAlignment() & (tmp->getAlignment() - 1)) == 0); michael@0: // The pool only needs said alignment *if* there are any entries in the pool michael@0: // WARNING: the pool needs said alignment if there are going to be entries in michael@0: // the pool after this entry has been inserted michael@0: if (p == tmp) michael@0: poolOffset = tmp->forceAlign(poolOffset); michael@0: else michael@0: poolOffset = tmp->align(poolOffset); michael@0: michael@0: // If we're at the pool we want to insert into, find a new limiter michael@0: // before we do the range check. michael@0: if (p == tmp) { michael@0: p->updateLimiter(BufferOffset(nextOffset)); michael@0: } michael@0: if (tmp->checkFull(poolOffset)) { michael@0: // uh-oh. DUMP DUMP DUMP michael@0: if (p != nullptr) michael@0: IonSpew(IonSpew_Pools, "[%d] Inserting pool entry caused a spill", id); michael@0: else michael@0: IonSpew(IonSpew_Pools, "[%d] Inserting instruction(%d) caused a spill", id, size()); michael@0: michael@0: this->dumpPool(); michael@0: return this->insertEntryBackwards(instSize, inst, p, data); michael@0: } michael@0: // include the size of this pool in the running total michael@0: if (p == tmp) { michael@0: nextOffset += tmp->immSize; michael@0: } michael@0: nextOffset += tmp->immSize * tmp->numEntries; michael@0: } michael@0: if (p == nullptr) { michael@0: return INT_MIN; michael@0: } michael@0: return p->insertEntry(data, this->nextOffset(), this->LifoAlloc_); michael@0: } michael@0: BufferOffset putInt(uint32_t value) { michael@0: return insertEntry(sizeof(uint32_t) / sizeof(uint8_t), (uint8_t*)&value, nullptr, nullptr); michael@0: } michael@0: // Mark the current section as an area where we can michael@0: // later go to dump a pool michael@0: void perforate() { michael@0: // If we're filling the backrefrences, we don't want to start looking for a new dumpsite. michael@0: if (inBackref) michael@0: return; michael@0: if (canNotPlacePool) michael@0: return; michael@0: // If there is nothing in the pool, then it is strictly disadvantageous michael@0: // to attempt to place a pool here michael@0: bool empty = true; michael@0: for (int i = 0; i < numPoolKinds; i++) { michael@0: if (pools[i].numEntries != 0) { michael@0: empty = false; michael@0: break; michael@0: } michael@0: } michael@0: if (empty) michael@0: return; michael@0: perforatedNode = *getTail(); michael@0: perforation = this->nextOffset(); michael@0: Parent::perforate(); michael@0: IonSpew(IonSpew_Pools, "[%d] Adding a perforation at offset %d", id, perforation.getOffset()); michael@0: } michael@0: michael@0: // After a pool is finished, no more elements may be added to it. During this phase, we michael@0: // will know the exact offsets to the pool entries, and those values should be written into michael@0: // the given instructions. michael@0: PoolInfo getPoolData() const { michael@0: int prevOffset = getInfo(numDumps-1).offset; michael@0: int prevEnd = getInfo(numDumps-1).finalPos; michael@0: // calculate the offset of the start of this pool; michael@0: int perfOffset = perforation.assigned() ? michael@0: perforation.getOffset() : michael@0: this->nextOffset().getOffset() + this->guardSize; michael@0: int initOffset = prevEnd + (perfOffset - prevOffset); michael@0: int finOffset = initOffset; michael@0: bool poolIsEmpty = true; michael@0: for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { michael@0: if (pools[poolIdx].numEntries != 0) { michael@0: poolIsEmpty = false; michael@0: break; michael@0: } michael@0: if (pools[poolIdx].other != nullptr && pools[poolIdx].other->numEntries != 0) { michael@0: poolIsEmpty = false; michael@0: break; michael@0: } michael@0: } michael@0: if (!poolIsEmpty) { michael@0: finOffset += headerSize; michael@0: for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { michael@0: finOffset=pools[poolIdx].align(finOffset); michael@0: finOffset+=pools[poolIdx].numEntries * pools[poolIdx].immSize; michael@0: } michael@0: // And compute the necessary adjustments for the second half of the pool. michael@0: for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) { michael@0: finOffset=pools[poolIdx].other->align(finOffset); michael@0: finOffset+=pools[poolIdx].other->numEntries * pools[poolIdx].other->immSize; michael@0: } michael@0: finOffset += footerSize; michael@0: } michael@0: michael@0: PoolInfo ret; michael@0: ret.offset = perfOffset; michael@0: ret.size = finOffset - initOffset; michael@0: ret.finalPos = finOffset; michael@0: ret.slice = perforatedNode; michael@0: return ret; michael@0: } michael@0: void finishPool() { michael@0: // This function should only be called while the backwards half of the pool michael@0: // is being filled in. The backwards half of the pool is always in a state michael@0: // where it is sane. Everything that needs to be done here is for "sanity's sake". michael@0: // The per-buffer pools need to be reset, and we need to record the size of the pool. michael@0: IonSpew(IonSpew_Pools, "[%d] Finishing pool %d", id, numDumps); michael@0: JS_ASSERT(inBackref); michael@0: PoolInfo newPoolInfo = getPoolData(); michael@0: if (newPoolInfo.size == 0) { michael@0: // The code below also creates a new pool, but that is not necessary, since michael@0: // the pools have not been modified at all. michael@0: new (&perforation) BufferOffset(); michael@0: perforatedNode = nullptr; michael@0: inBackref = false; michael@0: IonSpew(IonSpew_Pools, "[%d] Aborting because the pool is empty", id); michael@0: // Bail out early, since we don't want to even pretend these pools exist. michael@0: return; michael@0: } michael@0: JS_ASSERT(perforatedNode != nullptr); michael@0: if (numDumps >= (1<(this->LifoAlloc_.alloc(sizeof(PoolInfo) * numDumps * 2)); michael@0: if (tmp == nullptr) { michael@0: this->fail_oom(); michael@0: return; michael@0: } michael@0: memcpy(tmp, poolInfo, sizeof(PoolInfo) * numDumps); michael@0: poolInfo = tmp; michael@0: michael@0: } michael@0: michael@0: // In order to figure out how to fix up the loads for the second half of the pool michael@0: // we need to find where the bits of the pool that have been implemented end. michael@0: int poolOffset = perforation.getOffset(); michael@0: int magicAlign = getInfo(numDumps-1).finalPos - getInfo(numDumps-1).offset; michael@0: poolOffset += magicAlign; michael@0: poolOffset += headerSize; michael@0: for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { michael@0: poolOffset=pools[poolIdx].align(poolOffset); michael@0: poolOffset+=pools[poolIdx].numEntries * pools[poolIdx].immSize; michael@0: } michael@0: mozilla::Array outcasts; michael@0: mozilla::Array outcastEntries; michael@0: // All of the pool loads referred to by this code are going to michael@0: // need fixing up here. michael@0: int skippedBytes = 0; michael@0: for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) { michael@0: Pool *p = pools[poolIdx].other; michael@0: JS_ASSERT(p != nullptr); michael@0: unsigned int idx = p->numEntries-1; michael@0: // Allocate space for tracking information that needs to be propagated to the next pool michael@0: // as well as space for quickly updating the pool entries in the current pool to remove michael@0: // the entries that don't actually fit. I probably should change this over to a vector michael@0: outcastEntries[poolIdx] = new uint8_t[p->getPoolSize()]; michael@0: bool *preservedEntries = new bool[p->numEntries]; michael@0: // Hacks on top of Hacks! michael@0: // the patching code takes in the address of the instruction to be patched, michael@0: // and the "address" of the element in the pool that we want to load. michael@0: // However, since the code isn't actually in an array, we need to lie about michael@0: // the address that the pool is in. Furthermore, since the offsets are michael@0: // technically from the beginning of the FORWARD reference section, we have michael@0: // to lie to ourselves about where this pool starts in order to make sure michael@0: // the distance into the pool is interpreted correctly. michael@0: // There is a more elegant way to fix this that will need to be implemented michael@0: // eventually. We will want to provide the fixup function with a method to michael@0: // convert from a 'token' into a pool offset. michael@0: poolOffset = p->align(poolOffset); michael@0: int numSkips = 0; michael@0: int fakePoolOffset = poolOffset - pools[poolIdx].numEntries * pools[poolIdx].immSize; michael@0: for (BufferOffset *iter = p->loadOffsets.end()-1; michael@0: iter != p->loadOffsets.begin()-1; --iter, --idx) michael@0: { michael@0: michael@0: IonSpew(IonSpew_Pools, "[%d] Linking entry %d in pool %d", id, idx+ pools[poolIdx].numEntries, poolIdx); michael@0: JS_ASSERT(iter->getOffset() >= perforation.getOffset()); michael@0: // Everything here is known, we can safely do the necessary substitutions michael@0: Inst * inst = this->getInst(*iter); michael@0: // Manually compute the offset, including a possible bias. michael@0: // Also take into account the whole size of the pool that is being placed. michael@0: int codeOffset = fakePoolOffset - iter->getOffset() - newPoolInfo.size + numSkips * p->immSize - skippedBytes; michael@0: // That is, patchConstantPoolLoad wants to be handed the address of the michael@0: // pool entry that is being loaded. We need to do a non-trivial amount michael@0: // of math here, since the pool that we've made does not actually reside there michael@0: // in memory. michael@0: IonSpew(IonSpew_Pools, "[%d] Fixing offset to %d", id, codeOffset - magicAlign); michael@0: if (!Asm::patchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign)) { michael@0: // NOTE: if removing this entry happens to change the alignment of the next michael@0: // block, chances are you will have a bad time. michael@0: // ADDENDUM: this CANNOT happen on ARM, because the only elements that michael@0: // fall into this case are doubles loaded via vfp, but they will also be michael@0: // the last pool, which means it cannot affect the alignment of any other michael@0: // Sub Pools. michael@0: IonSpew(IonSpew_Pools, "[%d]***Offset was still out of range!***", id, codeOffset - magicAlign); michael@0: IonSpew(IonSpew_Pools, "[%d] Too complicated; bailingp", id); michael@0: this->fail_bail(); michael@0: // only free up to the current offset michael@0: for (int pi = poolIdx; pi < numPoolKinds; pi++) michael@0: delete[] outcastEntries[pi]; michael@0: delete[] preservedEntries; michael@0: return; michael@0: } else { michael@0: preservedEntries[idx] = true; michael@0: } michael@0: } michael@0: // remove the elements of the pool that should not be there (YAY, MEMCPY) michael@0: unsigned int idxDest = 0; michael@0: // If no elements were skipped, no expensive copy is necessary. michael@0: if (numSkips != 0) { michael@0: for (idx = 0; idx < p->numEntries; idx++) { michael@0: if (preservedEntries[idx]) { michael@0: if (idx != idxDest) { michael@0: memcpy(&p->poolData[idxDest * p->immSize], michael@0: &p->poolData[idx * p->immSize], michael@0: p->immSize); michael@0: } michael@0: idxDest++; michael@0: } michael@0: } michael@0: p->numEntries -= numSkips; michael@0: } michael@0: poolOffset += p->numEntries * p->immSize; michael@0: delete[] preservedEntries; michael@0: preservedEntries = nullptr; michael@0: } michael@0: // bind the current pool to the perforation point. michael@0: Pool **tmp = &perforatedNode->data; michael@0: *tmp = static_cast(this->LifoAlloc_.alloc(sizeof(Pool) * numPoolKinds)); michael@0: if (tmp == nullptr) { michael@0: this->fail_oom(); michael@0: for (int pi = 0; pi < numPoolKinds; pi++) michael@0: delete[] outcastEntries[pi]; michael@0: return; michael@0: } michael@0: // The above operations may have changed the size of pools! michael@0: // recalibrate the size of the pool. michael@0: newPoolInfo = getPoolData(); michael@0: poolInfo[numDumps] = newPoolInfo; michael@0: poolSize += poolInfo[numDumps].size; michael@0: numDumps++; michael@0: michael@0: memcpy(*tmp, pools, sizeof(Pool) * numPoolKinds); michael@0: michael@0: // reset everything to the state that it was in when we started michael@0: for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { michael@0: if (!pools[poolIdx].reset(this->LifoAlloc_)) { michael@0: this->fail_oom(); michael@0: for (int pi = 0; pi < numPoolKinds; pi++) michael@0: delete[] outcastEntries[pi]; michael@0: return; michael@0: } michael@0: } michael@0: new (&perforation) BufferOffset(); michael@0: perforatedNode = nullptr; michael@0: inBackref = false; michael@0: michael@0: // Now that the backwards pool has been emptied, and a new forward pool michael@0: // has been allocated, it is time to populate the new forward pool with michael@0: // any entries that couldn't fit in the backwards pool. michael@0: for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { michael@0: // Technically, the innermost pool will never have this issue, but it is easier michael@0: // to just handle this case. michael@0: // Since the pool entry was filled back-to-front, and in the next buffer, the elements michael@0: // should be front-to-back, this insertion also needs to proceed backwards michael@0: int idx = outcasts[poolIdx].length(); michael@0: for (BufferOffset *iter = outcasts[poolIdx].end()-1; michael@0: iter != outcasts[poolIdx].begin()-1; michael@0: --iter, --idx) { michael@0: pools[poolIdx].updateLimiter(*iter); michael@0: Inst *inst = this->getInst(*iter); michael@0: Asm::insertTokenIntoTag(pools[poolIdx].instSize, (uint8_t*)inst, outcasts[poolIdx].end()-1-iter); michael@0: pools[poolIdx].insertEntry(&outcastEntries[poolIdx][idx*pools[poolIdx].immSize], *iter, this->LifoAlloc_); michael@0: } michael@0: delete[] outcastEntries[poolIdx]; michael@0: } michael@0: // this (*2) is not technically kosher, but I want to get this bug fixed. michael@0: // It should actually be guardSize + the size of the instruction that we're attempting michael@0: // to insert. Unfortunately that vaue is never passed in. On ARM, these instructions michael@0: // are always 4 bytes, so guardSize is legit to use. michael@0: poolOffset = this->size() + guardSize * 2; michael@0: poolOffset += headerSize; michael@0: for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { michael@0: // There can still be an awkward situation where the element that triggered the michael@0: // initial dump didn't fit into the pool backwards, and now, still does not fit into michael@0: // this pool. Now it is necessary to go and dump this pool (note: this is almost michael@0: // certainly being called from dumpPool()). michael@0: poolOffset = pools[poolIdx].align(poolOffset); michael@0: if (pools[poolIdx].checkFull(poolOffset)) { michael@0: // ONCE AGAIN, UH-OH, TIME TO BAIL michael@0: dumpPool(); michael@0: break; michael@0: } michael@0: poolOffset += pools[poolIdx].getPoolSize(); michael@0: } michael@0: } michael@0: michael@0: void dumpPool() { michael@0: JS_ASSERT(!inBackref); michael@0: IonSpew(IonSpew_Pools, "[%d] Attempting to dump the pool", id); michael@0: PoolInfo newPoolInfo = getPoolData(); michael@0: if (newPoolInfo.size == 0) { michael@0: // If there is no data in the pool being dumped, don't dump anything. michael@0: inBackref = true; michael@0: IonSpew(IonSpew_Pools, "[%d]Abort, no pool data", id); michael@0: return; michael@0: } michael@0: michael@0: IonSpew(IonSpew_Pools, "[%d] Dumping %d bytes", id, newPoolInfo.size); michael@0: if (!perforation.assigned()) { michael@0: IonSpew(IonSpew_Pools, "[%d] No Perforation point selected, generating a new one", id); michael@0: // There isn't a perforation here, we need to dump the pool with a guard. michael@0: BufferOffset branch = this->nextOffset(); michael@0: bool shouldMarkAsBranch = this->isNextBranch(); michael@0: this->markNextAsBranch(); michael@0: this->putBlob(guardSize, nullptr); michael@0: BufferOffset afterPool = this->nextOffset(); michael@0: Asm::writePoolGuard(branch, this->getInst(branch), afterPool); michael@0: markGuard(); michael@0: perforatedNode->isNatural = false; michael@0: if (shouldMarkAsBranch) michael@0: this->markNextAsBranch(); michael@0: } michael@0: michael@0: // We have a perforation. Time to cut the instruction stream, patch in the pool michael@0: // and possibly re-arrange the pool to accomodate its new location. michael@0: int poolOffset = perforation.getOffset(); michael@0: int magicAlign = getInfo(numDumps-1).finalPos - getInfo(numDumps-1).offset; michael@0: poolOffset += magicAlign; michael@0: poolOffset += headerSize; michael@0: for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { michael@0: mozilla::DebugOnly beforePool = true; michael@0: Pool *p = &pools[poolIdx]; michael@0: // Any entries that happened to be after the place we put our pool will need to be michael@0: // switched from the forward-referenced pool to the backward-refrenced pool. michael@0: int idx = 0; michael@0: for (BufferOffset *iter = p->loadOffsets.begin(); michael@0: iter != p->loadOffsets.end(); ++iter, ++idx) michael@0: { michael@0: if (iter->getOffset() >= perforation.getOffset()) { michael@0: IonSpew(IonSpew_Pools, "[%d] Pushing entry %d in pool %d into the backwards section.", id, idx, poolIdx); michael@0: // insert this into the rear part of the pool. michael@0: int offset = idx * p->immSize; michael@0: p->other->insertEntry(&p->poolData[offset], BufferOffset(*iter), this->LifoAlloc_); michael@0: // update the limiting entry for this pool. michael@0: p->other->updateLimiter(*iter); michael@0: michael@0: // Update the current pool to report fewer entries. They are now in the michael@0: // backwards section. michael@0: p->numEntries--; michael@0: beforePool = false; michael@0: } else { michael@0: JS_ASSERT(beforePool); michael@0: // align the pool offset to the alignment of this pool michael@0: // it already only aligns when the pool has data in it, but we want to not michael@0: // align when all entries will end up in the backwards half of the pool michael@0: poolOffset = p->align(poolOffset); michael@0: IonSpew(IonSpew_Pools, "[%d] Entry %d in pool %d is before the pool.", id, idx, poolIdx); michael@0: // Everything here is known, we can safely do the necessary substitutions michael@0: Inst * inst = this->getInst(*iter); michael@0: // We need to manually compute the offset, including a possible bias. michael@0: int codeOffset = poolOffset - iter->getOffset(); michael@0: // That is, patchConstantPoolLoad wants to be handed the address of the michael@0: // pool entry that is being loaded. We need to do a non-trivial amount michael@0: // of math here, since the pool that we've made does not actually reside there michael@0: // in memory. michael@0: IonSpew(IonSpew_Pools, "[%d] Fixing offset to %d", id, codeOffset - magicAlign); michael@0: Asm::patchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign); michael@0: } michael@0: } michael@0: // Some number of entries have been positively identified as being michael@0: // in this section of the pool. Before processing the next pool, michael@0: // update the offset from the beginning of the buffer michael@0: poolOffset += p->numEntries * p->immSize; michael@0: } michael@0: poolOffset = footerSize; michael@0: inBackref = true; michael@0: for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) { michael@0: Pool *tmp = pools[poolIdx].other; michael@0: if (tmp->checkFullBackref(poolOffset, perforation.getOffset())) { michael@0: // GNAAAH. While we rotated elements into the back half, one of them filled up michael@0: // Now, dumping the back half is necessary... michael@0: finishPool(); michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: michael@0: void flushPool() { michael@0: if (this->oom()) michael@0: return; michael@0: IonSpew(IonSpew_Pools, "[%d] Requesting a pool flush", id); michael@0: if (!inBackref) michael@0: dumpPool(); michael@0: finishPool(); michael@0: } michael@0: void patchBranch(Inst *i, int curpool, BufferOffset branch) { michael@0: const Inst *ci = i; michael@0: ptrdiff_t offset = Asm::getBranchOffset(ci); michael@0: // If the offset is 0, then there is nothing to do. michael@0: if (offset == 0) michael@0: return; michael@0: int destOffset = branch.getOffset() + offset; michael@0: if (offset > 0) { michael@0: michael@0: while (curpool < numDumps && poolInfo[curpool].offset <= destOffset) { michael@0: offset += poolInfo[curpool].size; michael@0: curpool++; michael@0: } michael@0: } else { michael@0: // Ignore the pool that comes next, since this is a backwards branch michael@0: curpool--; michael@0: while (curpool >= 0 && poolInfo[curpool].offset > destOffset) { michael@0: offset -= poolInfo[curpool].size; michael@0: curpool--; michael@0: } michael@0: // Can't assert anything here, since the first pool may be after the target. michael@0: } michael@0: Asm::retargetNearBranch(i, offset, false); michael@0: } michael@0: michael@0: // Mark the next instruction as a valid guard. This means we can place a pool here. michael@0: void markGuard() { michael@0: // If we are in a no pool zone then there is no point in dogearing michael@0: // this branch as a place to go back to michael@0: if (canNotPlacePool) michael@0: return; michael@0: // There is no point in trying to grab a new slot if we've already michael@0: // found one and are in the process of filling it in. michael@0: if (inBackref) michael@0: return; michael@0: perforate(); michael@0: } michael@0: void enterNoPool() { michael@0: if (!canNotPlacePool && !perforation.assigned()) { michael@0: // Embarassing mode: The Assembler requests the start of a no pool section michael@0: // and there have been no valid places that a pool could be dumped thusfar. michael@0: // If a pool were to fill up before this no-pool section ends, we need to go back michael@0: // in the stream and enter a pool guard after the fact. This is feasable, but michael@0: // for now, it is easier to just allocate a junk instruction, default it to a nop, and michael@0: // finally, if the pool *is* needed, patch the nop to apool guard. michael@0: // What the assembler requests: michael@0: michael@0: // #request no-pool zone michael@0: // push pc michael@0: // blx r12 michael@0: // #end no-pool zone michael@0: michael@0: // however, if we would need to insert a pool, and there is no perforation point... michael@0: // so, actual generated code: michael@0: michael@0: // b next; <= perforation point michael@0: // next: michael@0: // #beginning of no pool zone michael@0: // push pc michael@0: // blx r12 michael@0: michael@0: BufferOffset branch = this->nextOffset(); michael@0: this->markNextAsBranch(); michael@0: this->putBlob(guardSize, nullptr); michael@0: BufferOffset afterPool = this->nextOffset(); michael@0: Asm::writePoolGuard(branch, this->getInst(branch), afterPool); michael@0: markGuard(); michael@0: if (perforatedNode != nullptr) michael@0: perforatedNode->isNatural = false; michael@0: } michael@0: canNotPlacePool++; michael@0: } michael@0: void leaveNoPool() { michael@0: canNotPlacePool--; michael@0: } michael@0: int size() const { michael@0: return uncheckedSize(); michael@0: } michael@0: Pool *getPool(int idx) { michael@0: return &pools[idx]; michael@0: } michael@0: void markNextAsBranch() { michael@0: // If the previous thing inserted was the last instruction of michael@0: // the node, then whoops, we want to mark the first instruction of michael@0: // the next node. michael@0: this->ensureSpace(InstBaseSize); michael@0: JS_ASSERT(*this->getTail() != nullptr); michael@0: (*this->getTail())->markNextAsBranch(); michael@0: } michael@0: bool isNextBranch() { michael@0: JS_ASSERT(*this->getTail() != nullptr); michael@0: return (*this->getTail())->isNextBranch(); michael@0: } michael@0: michael@0: int uncheckedSize() const { michael@0: PoolInfo pi = getPoolData(); michael@0: int codeEnd = this->nextOffset().getOffset(); michael@0: return (codeEnd - pi.offset) + pi.finalPos; michael@0: } michael@0: ptrdiff_t curDumpsite; michael@0: void resetCounter() { michael@0: curDumpsite = 0; michael@0: } michael@0: ptrdiff_t poolSizeBefore(ptrdiff_t offset) const { michael@0: int cur = 0; michael@0: while(cur < numDumps && poolInfo[cur].offset <= offset) michael@0: cur++; michael@0: // poolInfo[curDumpsite] is now larger than the offset michael@0: // either this is the first one, or the previous is the last one we care about michael@0: if (cur == 0) michael@0: return 0; michael@0: return poolInfo[cur-1].finalPos - poolInfo[cur-1].offset; michael@0: } michael@0: michael@0: private: michael@0: void getPEPool(PoolEntry pe, Pool **retP, int32_t * retOffset, int32_t *poolNum) const { michael@0: int poolKind = pe.poolKind(); michael@0: Pool *p = nullptr; michael@0: uint32_t offset = pe.offset() * pools[poolKind].immSize; michael@0: int idx; michael@0: for (idx = 0; idx < numDumps; idx++) { michael@0: p = &poolInfo[idx].slice->data[poolKind]; michael@0: if (p->getPoolSize() > offset) michael@0: break; michael@0: offset -= p->getPoolSize(); michael@0: p = p->other; michael@0: if (p->getPoolSize() > offset) michael@0: break; michael@0: offset -= p->getPoolSize(); michael@0: p = nullptr; michael@0: } michael@0: if (poolNum != nullptr) michael@0: *poolNum = idx; michael@0: // If this offset is contained in any finished pool, forward or backwards, p now michael@0: // points to that pool, if it is not in any pool (should be in the currently building pool) michael@0: // then p is nullptr. michael@0: if (p == nullptr) { michael@0: p = &pools[poolKind]; michael@0: if (offset >= p->getPoolSize()) { michael@0: p = p->other; michael@0: offset -= p->getPoolSize(); michael@0: } michael@0: } michael@0: JS_ASSERT(p != nullptr); michael@0: JS_ASSERT(offset < p->getPoolSize()); michael@0: *retP = p; michael@0: *retOffset = offset; michael@0: } michael@0: uint8_t *getPoolEntry(PoolEntry pe) { michael@0: Pool *p; michael@0: int32_t offset; michael@0: getPEPool(pe, &p, &offset, nullptr); michael@0: return &p->poolData[offset]; michael@0: } michael@0: size_t getPoolEntrySize(PoolEntry pe) { michael@0: int idx = pe.poolKind(); michael@0: return pools[idx].immSize; michael@0: } michael@0: michael@0: public: michael@0: uint32_t poolEntryOffset(PoolEntry pe) const { michael@0: Pool *realPool; michael@0: // offset is in bytes, not entries. michael@0: int32_t offset; michael@0: int32_t poolNum; michael@0: getPEPool(pe, &realPool, &offset, &poolNum); michael@0: PoolInfo *pi = &poolInfo[poolNum]; michael@0: Pool *poolGroup = pi->slice->data; michael@0: uint32_t start = pi->finalPos - pi->size + headerSize; michael@0: /// The order of the pools is: michael@0: // A B C C_Rev B_Rev A_Rev, so in the initial pass, michael@0: // go through the pools forwards, and in the second pass michael@0: // go through them in reverse order. michael@0: for (int idx = 0; idx < numPoolKinds; idx++) { michael@0: if (&poolGroup[idx] == realPool) { michael@0: return start + offset; michael@0: } michael@0: start = poolGroup[idx].addPoolSize(start); michael@0: } michael@0: for (int idx = numPoolKinds-1; idx >= 0; idx--) { michael@0: if (poolGroup[idx].other == realPool) { michael@0: return start + offset; michael@0: } michael@0: start = poolGroup[idx].other->addPoolSize(start); michael@0: } michael@0: MOZ_ASSUME_UNREACHABLE("Entry is not in a pool"); michael@0: } michael@0: void writePoolEntry(PoolEntry pe, uint8_t *buff) { michael@0: size_t size = getPoolEntrySize(pe); michael@0: uint8_t *entry = getPoolEntry(pe); michael@0: memcpy(entry, buff, size); michael@0: } michael@0: void readPoolEntry(PoolEntry pe, uint8_t *buff) { michael@0: size_t size = getPoolEntrySize(pe); michael@0: uint8_t *entry = getPoolEntry(pe); michael@0: memcpy(buff, entry, size); michael@0: } michael@0: michael@0: }; michael@0: } // ion michael@0: } // js michael@0: #endif /* jit_shared_IonAssemblerBufferWithConstantPools_h */