Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #ifndef jit_shared_IonAssemblerBufferWithConstantPools_h |
michael@0 | 8 | #define jit_shared_IonAssemblerBufferWithConstantPools_h |
michael@0 | 9 | |
michael@0 | 10 | #include "mozilla/DebugOnly.h" |
michael@0 | 11 | |
michael@0 | 12 | #include "assembler/wtf/SegmentedVector.h" |
michael@0 | 13 | #include "jit/IonSpewer.h" |
michael@0 | 14 | #include "jit/shared/IonAssemblerBuffer.h" |
michael@0 | 15 | |
michael@0 | 16 | namespace js { |
michael@0 | 17 | namespace jit { |
michael@0 | 18 | typedef Vector<BufferOffset, 512, OldIonAllocPolicy> LoadOffsets; |
michael@0 | 19 | |
michael@0 | 20 | struct Pool |
michael@0 | 21 | : public OldIonAllocPolicy |
michael@0 | 22 | { |
michael@0 | 23 | const int maxOffset; |
michael@0 | 24 | const int immSize; |
michael@0 | 25 | const int instSize; |
michael@0 | 26 | const int bias; |
michael@0 | 27 | |
michael@0 | 28 | private: |
michael@0 | 29 | const int alignment; |
michael@0 | 30 | |
michael@0 | 31 | public: |
michael@0 | 32 | const bool isBackref; |
michael@0 | 33 | const bool canDedup; |
michael@0 | 34 | // "other" is the backwards half of this pool, it is held in another pool structure |
michael@0 | 35 | Pool *other; |
michael@0 | 36 | uint8_t *poolData; |
michael@0 | 37 | uint32_t numEntries; |
michael@0 | 38 | uint32_t buffSize; |
michael@0 | 39 | LoadOffsets loadOffsets; |
michael@0 | 40 | |
michael@0 | 41 | // When filling pools where the the size of an immediate is larger |
michael@0 | 42 | // than the size of an instruction, we find we're in a case where the distance between the |
michael@0 | 43 | // next instruction and the next pool slot is increasing! |
michael@0 | 44 | // Moreover, If we want to do fancy things like deduplicate pool entries at |
michael@0 | 45 | // dump time, we may not know the location in a pool (and thus the limiting load) |
michael@0 | 46 | // until very late. |
michael@0 | 47 | // Lastly, it may be beneficial to interleave the pools. I have absolutely no idea |
michael@0 | 48 | // how that will work, but my suspicions are that it will be difficult. |
michael@0 | 49 | |
michael@0 | 50 | BufferOffset limitingUser; |
michael@0 | 51 | int limitingUsee; |
michael@0 | 52 | |
michael@0 | 53 | Pool(int maxOffset_, int immSize_, int instSize_, int bias_, int alignment_, LifoAlloc &LifoAlloc_, |
michael@0 | 54 | bool isBackref_ = false, bool canDedup_ = false, Pool *other_ = nullptr) |
michael@0 | 55 | : maxOffset(maxOffset_), immSize(immSize_), instSize(instSize_), |
michael@0 | 56 | bias(bias_), alignment(alignment_), |
michael@0 | 57 | isBackref(isBackref_), canDedup(canDedup_), other(other_), |
michael@0 | 58 | poolData(static_cast<uint8_t *>(LifoAlloc_.alloc(8*immSize))), numEntries(0), |
michael@0 | 59 | buffSize(8), loadOffsets(), limitingUser(), limitingUsee(INT_MIN) |
michael@0 | 60 | { |
michael@0 | 61 | } |
michael@0 | 62 | static const int garbage=0xa5a5a5a5; |
michael@0 | 63 | Pool() : maxOffset(garbage), immSize(garbage), instSize(garbage), bias(garbage), |
michael@0 | 64 | alignment(garbage), isBackref(garbage), canDedup(garbage), other((Pool*)garbage) |
michael@0 | 65 | { |
michael@0 | 66 | } |
michael@0 | 67 | // Sometimes, when we are adding large values to a pool, the limiting use may change. |
michael@0 | 68 | // Handle this case. nextInst is the address of the |
michael@0 | 69 | void updateLimiter(BufferOffset nextInst) { |
michael@0 | 70 | int oldRange, newRange; |
michael@0 | 71 | if (isBackref) { |
michael@0 | 72 | // common expressions that are not subtracted: the location of the pool, ... |
michael@0 | 73 | oldRange = limitingUser.getOffset() - ((numEntries - limitingUsee) * immSize); |
michael@0 | 74 | newRange = nextInst.getOffset(); |
michael@0 | 75 | } else { |
michael@0 | 76 | oldRange = (limitingUsee * immSize) - limitingUser.getOffset(); |
michael@0 | 77 | newRange = (numEntries * immSize) - nextInst.getOffset(); |
michael@0 | 78 | } |
michael@0 | 79 | if (!limitingUser.assigned() || newRange > oldRange) { |
michael@0 | 80 | // We have a new largest range! |
michael@0 | 81 | limitingUser = nextInst; |
michael@0 | 82 | limitingUsee = numEntries; |
michael@0 | 83 | } |
michael@0 | 84 | } |
michael@0 | 85 | // checkFull is called before any modifications have been made. |
michael@0 | 86 | // It is "if we were to add this instruction and pool entry, |
michael@0 | 87 | // would we be in an invalid state?". If it is true, then it is in fact |
michael@0 | 88 | // time for a "pool dump". |
michael@0 | 89 | |
michael@0 | 90 | // poolOffset is the distance from the end of the current section to the end of the pool. |
michael@0 | 91 | // For the last section of the pool, this will be the size of the footer |
michael@0 | 92 | // For the first section of the pool, it will be the size of every other |
michael@0 | 93 | // section and the footer |
michael@0 | 94 | // codeOffset is the instruction-distance from the pool to the beginning of the buffer. |
michael@0 | 95 | // Since codeOffset only includes instructions, the number is the same for |
michael@0 | 96 | // the beginning and end of the pool. |
michael@0 | 97 | // instOffset is the offset from the beginning of the buffer to the instruction that |
michael@0 | 98 | // is about to be placed. |
michael@0 | 99 | bool checkFullBackref(int poolOffset, int codeOffset) { |
michael@0 | 100 | if (!limitingUser.assigned()) |
michael@0 | 101 | return false; |
michael@0 | 102 | signed int distance = |
michael@0 | 103 | limitingUser.getOffset() + bias |
michael@0 | 104 | - codeOffset + poolOffset + |
michael@0 | 105 | (numEntries - limitingUsee + 1) * immSize; |
michael@0 | 106 | if (distance >= maxOffset) |
michael@0 | 107 | return true; |
michael@0 | 108 | return false; |
michael@0 | 109 | } |
michael@0 | 110 | |
michael@0 | 111 | // checkFull answers the question "If a pool were placed at poolOffset, would |
michael@0 | 112 | // any reference into the pool be out of range?". It is meant to be used as instructions |
michael@0 | 113 | // and elements are inserted, to determine if a saved perforation point needs to be used. |
michael@0 | 114 | |
michael@0 | 115 | bool checkFull(int poolOffset) { |
michael@0 | 116 | // Inserting an instruction into the stream can |
michael@0 | 117 | // push any of the pools out of range. |
michael@0 | 118 | // Similarly, inserting into a pool can push the pool entry out of range |
michael@0 | 119 | JS_ASSERT(!isBackref); |
michael@0 | 120 | // Not full if there aren't any uses. |
michael@0 | 121 | if (!limitingUser.assigned()) { |
michael@0 | 122 | return false; |
michael@0 | 123 | } |
michael@0 | 124 | // We're considered "full" when: |
michael@0 | 125 | // bias + abs(poolOffset + limitingeUsee * numEntries - limitingUser) + sizeof(other_pools) >= maxOffset |
michael@0 | 126 | if (poolOffset + limitingUsee * immSize - (limitingUser.getOffset() + bias) >= maxOffset) { |
michael@0 | 127 | return true; |
michael@0 | 128 | } |
michael@0 | 129 | return false; |
michael@0 | 130 | } |
michael@0 | 131 | |
michael@0 | 132 | // By the time this function is called, we'd damn well better know that this is going to succeed. |
michael@0 | 133 | uint32_t insertEntry(uint8_t *data, BufferOffset off, LifoAlloc &LifoAlloc_) { |
michael@0 | 134 | if (numEntries == buffSize) { |
michael@0 | 135 | buffSize <<= 1; |
michael@0 | 136 | uint8_t *tmp = static_cast<uint8_t*>(LifoAlloc_.alloc(immSize * buffSize)); |
michael@0 | 137 | memcpy(tmp, poolData, immSize * numEntries); |
michael@0 | 138 | if (poolData == nullptr) { |
michael@0 | 139 | buffSize = 0; |
michael@0 | 140 | return -1; |
michael@0 | 141 | } |
michael@0 | 142 | poolData = tmp; |
michael@0 | 143 | } |
michael@0 | 144 | memcpy(&poolData[numEntries * immSize], data, immSize); |
michael@0 | 145 | loadOffsets.append(off.getOffset()); |
michael@0 | 146 | return numEntries++; |
michael@0 | 147 | } |
michael@0 | 148 | |
michael@0 | 149 | bool reset(LifoAlloc &a) { |
michael@0 | 150 | numEntries = 0; |
michael@0 | 151 | buffSize = 8; |
michael@0 | 152 | poolData = static_cast<uint8_t*>(a.alloc(buffSize * immSize)); |
michael@0 | 153 | if (poolData == nullptr) |
michael@0 | 154 | return false; |
michael@0 | 155 | |
michael@0 | 156 | void *otherSpace = a.alloc(sizeof(Pool)); |
michael@0 | 157 | if (otherSpace == nullptr) |
michael@0 | 158 | return false; |
michael@0 | 159 | |
michael@0 | 160 | other = new (otherSpace) Pool(other->maxOffset, other->immSize, other->instSize, |
michael@0 | 161 | other->bias, other->alignment, a, other->isBackref, |
michael@0 | 162 | other->canDedup); |
michael@0 | 163 | new (&loadOffsets) LoadOffsets; |
michael@0 | 164 | |
michael@0 | 165 | limitingUser = BufferOffset(); |
michael@0 | 166 | limitingUsee = -1; |
michael@0 | 167 | return true; |
michael@0 | 168 | |
michael@0 | 169 | } |
michael@0 | 170 | // WARNING: This will not always align values. It will only |
michael@0 | 171 | // align to the requirement of the pool. If the pool is empty, |
michael@0 | 172 | // there is nothing to be aligned, so it will not perform any alignment |
michael@0 | 173 | uint8_t* align(uint8_t *ptr) { |
michael@0 | 174 | return (uint8_t*)align((uint32_t)ptr); |
michael@0 | 175 | } |
michael@0 | 176 | uint32_t align(uint32_t ptr) { |
michael@0 | 177 | if (numEntries == 0) |
michael@0 | 178 | return ptr; |
michael@0 | 179 | return (ptr + alignment-1) & ~(alignment-1); |
michael@0 | 180 | } |
michael@0 | 181 | uint32_t forceAlign(uint32_t ptr) { |
michael@0 | 182 | return (ptr + alignment-1) & ~(alignment-1); |
michael@0 | 183 | } |
michael@0 | 184 | bool isAligned(uint32_t ptr) { |
michael@0 | 185 | return ptr == align(ptr); |
michael@0 | 186 | } |
michael@0 | 187 | int getAlignment() { |
michael@0 | 188 | return alignment; |
michael@0 | 189 | } |
michael@0 | 190 | |
michael@0 | 191 | uint32_t addPoolSize(uint32_t start) { |
michael@0 | 192 | start = align(start); |
michael@0 | 193 | start += immSize * numEntries; |
michael@0 | 194 | return start; |
michael@0 | 195 | } |
michael@0 | 196 | uint8_t *addPoolSize(uint8_t *start) { |
michael@0 | 197 | start = align(start); |
michael@0 | 198 | start += immSize * numEntries; |
michael@0 | 199 | return start; |
michael@0 | 200 | } |
michael@0 | 201 | uint32_t getPoolSize() { |
michael@0 | 202 | return immSize * numEntries; |
michael@0 | 203 | } |
michael@0 | 204 | }; |
michael@0 | 205 | |
michael@0 | 206 | |
michael@0 | 207 | template <int SliceSize, int InstBaseSize> |
michael@0 | 208 | struct BufferSliceTail : public BufferSlice<SliceSize> { |
michael@0 | 209 | Pool *data; |
michael@0 | 210 | mozilla::Array<uint8_t, (SliceSize + (InstBaseSize * 8 - 1)) / (InstBaseSize * 8)> isBranch; |
michael@0 | 211 | bool isNatural : 1; |
michael@0 | 212 | BufferSliceTail *getNext() { |
michael@0 | 213 | return (BufferSliceTail *)this->next; |
michael@0 | 214 | } |
michael@0 | 215 | BufferSliceTail() : data(nullptr), isNatural(true) { |
michael@0 | 216 | memset(&isBranch[0], 0, sizeof(isBranch)); |
michael@0 | 217 | } |
michael@0 | 218 | void markNextAsBranch() { |
michael@0 | 219 | int idx = this->nodeSize / InstBaseSize; |
michael@0 | 220 | isBranch[idx >> 3] |= 1 << (idx & 0x7); |
michael@0 | 221 | } |
michael@0 | 222 | bool isNextBranch() { |
michael@0 | 223 | unsigned int size = this->nodeSize; |
michael@0 | 224 | if (size >= SliceSize) |
michael@0 | 225 | return false; |
michael@0 | 226 | int idx = size / InstBaseSize; |
michael@0 | 227 | return (isBranch[idx >> 3] >> (idx & 0x7)) & 1; |
michael@0 | 228 | } |
michael@0 | 229 | }; |
michael@0 | 230 | |
michael@0 | 231 | #if 0 |
michael@0 | 232 | static int getId() { |
michael@0 | 233 | if (MaybeGetIonContext()) |
michael@0 | 234 | return MaybeGetIonContext()->getNextAssemblerId(); |
michael@0 | 235 | return NULL_ID; |
michael@0 | 236 | } |
michael@0 | 237 | #endif |
michael@0 | 238 | static inline void spewEntry(uint8_t *ptr, int length) { |
michael@0 | 239 | #if IS_LITTLE_ENDIAN |
michael@0 | 240 | for (int idx = 0; idx < length; idx++) { |
michael@0 | 241 | IonSpewCont(IonSpew_Pools, "%02x", ptr[length - idx - 1]); |
michael@0 | 242 | if (((idx & 3) == 3) && (idx + 1 != length)) |
michael@0 | 243 | IonSpewCont(IonSpew_Pools, "_"); |
michael@0 | 244 | } |
michael@0 | 245 | #else |
michael@0 | 246 | for (int idx = 0; idx < length; idx++) { |
michael@0 | 247 | IonSpewCont(IonSpew_Pools, "%02x", ptr[idx]); |
michael@0 | 248 | if (((idx & 3) == 3) && (idx + 1 != length)) |
michael@0 | 249 | IonSpewCont(IonSpew_Pools, "_"); |
michael@0 | 250 | } |
michael@0 | 251 | #endif |
michael@0 | 252 | } |
michael@0 | 253 | // NOTE: Adding in the ability to retroactively insert a pool has consequences! |
michael@0 | 254 | // Most notably, Labels stop working. Normally, we create a label, later bind it. |
michael@0 | 255 | // when the label is bound, We back-patch all previous references to the label with |
michael@0 | 256 | // the correct offset. However, since a pool may be retroactively inserted, we don't |
michael@0 | 257 | // actually know what the final offset is going to be until much later. This will |
michael@0 | 258 | // happen (in some instances) after the pools have been finalized. Attempting to compute |
michael@0 | 259 | // the correct offsets for branches as the pools are finalized is quite infeasible. |
michael@0 | 260 | // Instead, I write *just* the number of instructions that will be jumped over, then |
michael@0 | 261 | // when we go to copy the instructions into the executable buffer, fix up all of the |
michael@0 | 262 | // offsets to include the pools. Since we have about 32 megabytes worth of offset, |
michael@0 | 263 | // I am not very worried about the pools moving it out of range. |
michael@0 | 264 | // Now, How exactly do we generate these? The first step is to identify which |
michael@0 | 265 | // instructions are actually branches that need to be fixed up. A single bit |
michael@0 | 266 | // per instruction should be enough to determine which ones are branches, but |
michael@0 | 267 | // we have no guarantee that all instructions are the same size, so the start of |
michael@0 | 268 | // each branch instruction will be marked with a bit (1 bit per byte). |
michael@0 | 269 | // then we neet to call up to the assembler to determine what the offset of the branch |
michael@0 | 270 | // is. The offset will be the number of instructions that are being skipped over |
michael@0 | 271 | // along with any processor bias. We then need to calculate the offset, including pools |
michael@0 | 272 | // and write that value into the buffer. At this point, we can write it into the |
michael@0 | 273 | // executable buffer, or the AssemblerBuffer, and copy the data over later. |
michael@0 | 274 | // Previously, This was all handled by the assembler, since the location |
michael@0 | 275 | // and size of pools were always known as soon as its location had been reached. |
michael@0 | 276 | |
michael@0 | 277 | // A class for indexing into constant pools. |
michael@0 | 278 | // Each time a pool entry is added, one of these is generated. |
michael@0 | 279 | // This can be supplied to read and write that entry after the fact. |
michael@0 | 280 | // And it can be used to get the address of the entry once the buffer |
michael@0 | 281 | // has been finalized, and an executable copy allocated. |
michael@0 | 282 | |
michael@0 | 283 | template <int SliceSize, int InstBaseSize, class Inst, class Asm, int poolKindBits> |
michael@0 | 284 | struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst> { |
michael@0 | 285 | private: |
michael@0 | 286 | mozilla::Array<int, 1 << poolKindBits> entryCount; |
michael@0 | 287 | static const int offsetBits = 32 - poolKindBits; |
michael@0 | 288 | public: |
michael@0 | 289 | |
michael@0 | 290 | class PoolEntry { |
michael@0 | 291 | template <int ss, int ibs, class i, class a, int pkb> |
michael@0 | 292 | friend struct AssemblerBufferWithConstantPool; |
michael@0 | 293 | uint32_t offset_ : offsetBits; |
michael@0 | 294 | uint32_t kind_ : poolKindBits; |
michael@0 | 295 | PoolEntry(int offset, int kind) : offset_(offset), kind_(kind) { |
michael@0 | 296 | } |
michael@0 | 297 | public: |
michael@0 | 298 | uint32_t encode() { |
michael@0 | 299 | uint32_t ret; |
michael@0 | 300 | memcpy(&ret, this, sizeof(uint32_t)); |
michael@0 | 301 | return ret; |
michael@0 | 302 | } |
michael@0 | 303 | PoolEntry(uint32_t bits) : offset_(((1u << offsetBits) - 1) & bits), |
michael@0 | 304 | kind_(bits >> offsetBits) { |
michael@0 | 305 | } |
michael@0 | 306 | PoolEntry() : offset_((1u << offsetBits) - 1), kind_((1u << poolKindBits) - 1) { |
michael@0 | 307 | } |
michael@0 | 308 | |
michael@0 | 309 | uint32_t poolKind() const { |
michael@0 | 310 | return kind_; |
michael@0 | 311 | } |
michael@0 | 312 | uint32_t offset() const { |
michael@0 | 313 | return offset_; |
michael@0 | 314 | } |
michael@0 | 315 | }; |
michael@0 | 316 | private: |
michael@0 | 317 | typedef BufferSliceTail<SliceSize, InstBaseSize> BufferSlice; |
michael@0 | 318 | typedef AssemblerBuffer<SliceSize, Inst> Parent; |
michael@0 | 319 | |
michael@0 | 320 | // The size of a guard instruction |
michael@0 | 321 | const int guardSize; |
michael@0 | 322 | // The size of the header that is put at the beginning of a full pool |
michael@0 | 323 | const int headerSize; |
michael@0 | 324 | // The size of a footer that is put in a pool after it is full. |
michael@0 | 325 | const int footerSize; |
michael@0 | 326 | // the number of sub-pools that we can allocate into. |
michael@0 | 327 | static const int numPoolKinds = 1 << poolKindBits; |
michael@0 | 328 | |
michael@0 | 329 | Pool *pools; |
michael@0 | 330 | |
michael@0 | 331 | // The buffer should be aligned to this address. |
michael@0 | 332 | const int instBufferAlign; |
michael@0 | 333 | |
michael@0 | 334 | // the number of times we've dumped the pool. |
michael@0 | 335 | int numDumps; |
michael@0 | 336 | struct PoolInfo { |
michael@0 | 337 | int offset; // the number of instructions before the start of the pool |
michael@0 | 338 | int size; // the size of the pool, including padding |
michael@0 | 339 | int finalPos; // the end of the buffer, in bytes from the beginning of the buffer |
michael@0 | 340 | BufferSlice *slice; |
michael@0 | 341 | }; |
michael@0 | 342 | PoolInfo *poolInfo; |
michael@0 | 343 | // we need to keep track of how large the pools are, so we can allocate |
michael@0 | 344 | // enough space for them later. This should include any amount of padding |
michael@0 | 345 | // necessary to keep the pools aligned. |
michael@0 | 346 | int poolSize; |
michael@0 | 347 | // The Assembler should set this to true if it does not want us to dump a pool here |
michael@0 | 348 | int canNotPlacePool; |
michael@0 | 349 | // Are we filling up the forwards or backwards pools? |
michael@0 | 350 | bool inBackref; |
michael@0 | 351 | // Cache the last place we saw an opportunity to dump the pool |
michael@0 | 352 | BufferOffset perforation; |
michael@0 | 353 | BufferSlice *perforatedNode; |
michael@0 | 354 | public: |
michael@0 | 355 | int id; |
michael@0 | 356 | private: |
michael@0 | 357 | static const int logBasePoolInfo = 3; |
michael@0 | 358 | BufferSlice ** getHead() { |
michael@0 | 359 | return (BufferSlice**)&this->head; |
michael@0 | 360 | } |
michael@0 | 361 | BufferSlice ** getTail() { |
michael@0 | 362 | return (BufferSlice**)&this->tail; |
michael@0 | 363 | } |
michael@0 | 364 | |
michael@0 | 365 | virtual BufferSlice *newSlice(LifoAlloc &a) { |
michael@0 | 366 | BufferSlice *tmp = static_cast<BufferSlice*>(a.alloc(sizeof(BufferSlice))); |
michael@0 | 367 | if (!tmp) { |
michael@0 | 368 | this->m_oom = true; |
michael@0 | 369 | return nullptr; |
michael@0 | 370 | } |
michael@0 | 371 | new (tmp) BufferSlice; |
michael@0 | 372 | return tmp; |
michael@0 | 373 | } |
michael@0 | 374 | public: |
michael@0 | 375 | AssemblerBufferWithConstantPool(int guardSize_, int headerSize_, int footerSize_, Pool *pools_, int instBufferAlign_) |
michael@0 | 376 | : guardSize(guardSize_), headerSize(headerSize_), |
michael@0 | 377 | footerSize(footerSize_), |
michael@0 | 378 | pools(pools_), |
michael@0 | 379 | instBufferAlign(instBufferAlign_), numDumps(0), |
michael@0 | 380 | poolInfo(nullptr), |
michael@0 | 381 | poolSize(0), canNotPlacePool(0), inBackref(false), |
michael@0 | 382 | perforatedNode(nullptr), id(-1) |
michael@0 | 383 | { |
michael@0 | 384 | for (int idx = 0; idx < numPoolKinds; idx++) { |
michael@0 | 385 | entryCount[idx] = 0; |
michael@0 | 386 | } |
michael@0 | 387 | } |
michael@0 | 388 | |
michael@0 | 389 | // We need to wait until an AutoIonContextAlloc is created by the |
michael@0 | 390 | // IonMacroAssembler, before allocating any space. |
michael@0 | 391 | void initWithAllocator() { |
michael@0 | 392 | poolInfo = static_cast<PoolInfo*>(this->LifoAlloc_.alloc(sizeof(PoolInfo) * (1 << logBasePoolInfo))); |
michael@0 | 393 | } |
michael@0 | 394 | |
michael@0 | 395 | const PoolInfo & getInfo(int x) const { |
michael@0 | 396 | static const PoolInfo nil = {0,0,0}; |
michael@0 | 397 | if (x < 0 || x >= numDumps) |
michael@0 | 398 | return nil; |
michael@0 | 399 | return poolInfo[x]; |
michael@0 | 400 | } |
michael@0 | 401 | void executableCopy(uint8_t *dest_) { |
michael@0 | 402 | if (this->oom()) |
michael@0 | 403 | return; |
michael@0 | 404 | // TODO: only do this when the pool actually has a value in it |
michael@0 | 405 | flushPool(); |
michael@0 | 406 | for (int idx = 0; idx < numPoolKinds; idx++) { |
michael@0 | 407 | JS_ASSERT(pools[idx].numEntries == 0 && pools[idx].other->numEntries == 0); |
michael@0 | 408 | } |
michael@0 | 409 | typedef mozilla::Array<uint8_t, InstBaseSize> Chunk; |
michael@0 | 410 | mozilla::DebugOnly<Chunk *> start = (Chunk*)dest_; |
michael@0 | 411 | Chunk *dest = (Chunk*)(((uint32_t)dest_ + instBufferAlign - 1) & ~(instBufferAlign -1)); |
michael@0 | 412 | int curIndex = 0; |
michael@0 | 413 | int curInstOffset = 0; |
michael@0 | 414 | JS_ASSERT(start == dest); |
michael@0 | 415 | for (BufferSlice * cur = *getHead(); cur != nullptr; cur = cur->getNext()) { |
michael@0 | 416 | Chunk *src = (Chunk*)&cur->instructions; |
michael@0 | 417 | for (unsigned int idx = 0; idx <cur->size()/InstBaseSize; |
michael@0 | 418 | idx++, curInstOffset += InstBaseSize) { |
michael@0 | 419 | // Is the current instruction a branch? |
michael@0 | 420 | if (cur->isBranch[idx >> 3] & (1<<(idx&7))) { |
michael@0 | 421 | // It's a branch. fix up the branchiness! |
michael@0 | 422 | patchBranch((Inst*)&src[idx], curIndex, BufferOffset(curInstOffset)); |
michael@0 | 423 | } |
michael@0 | 424 | memcpy(&dest[idx], &src[idx], sizeof(Chunk)); |
michael@0 | 425 | } |
michael@0 | 426 | dest+=cur->size()/InstBaseSize; |
michael@0 | 427 | if (cur->data != nullptr) { |
michael@0 | 428 | // have the repatcher move on to the next pool |
michael@0 | 429 | curIndex ++; |
michael@0 | 430 | // loop over all of the pools, copying them into place. |
michael@0 | 431 | uint8_t *poolDest = (uint8_t*)dest; |
michael@0 | 432 | Asm::writePoolHeader(poolDest, cur->data, cur->isNatural); |
michael@0 | 433 | poolDest += headerSize; |
michael@0 | 434 | for (int idx = 0; idx < numPoolKinds; idx++) { |
michael@0 | 435 | Pool *curPool = &cur->data[idx]; |
michael@0 | 436 | // align the pool. |
michael@0 | 437 | poolDest = curPool->align(poolDest); |
michael@0 | 438 | memcpy(poolDest, curPool->poolData, curPool->immSize * curPool->numEntries); |
michael@0 | 439 | poolDest += curPool->immSize * curPool->numEntries; |
michael@0 | 440 | } |
michael@0 | 441 | // now go over the whole list backwards, and copy in the reverse portions |
michael@0 | 442 | for (int idx = numPoolKinds-1; idx >= 0; idx--) { |
michael@0 | 443 | Pool *curPool = cur->data[idx].other; |
michael@0 | 444 | // align the pool. |
michael@0 | 445 | poolDest = curPool->align(poolDest); |
michael@0 | 446 | memcpy(poolDest, curPool->poolData, curPool->immSize * curPool->numEntries); |
michael@0 | 447 | poolDest += curPool->immSize * curPool->numEntries; |
michael@0 | 448 | } |
michael@0 | 449 | // write a footer in place |
michael@0 | 450 | Asm::writePoolFooter(poolDest, cur->data, cur->isNatural); |
michael@0 | 451 | poolDest += footerSize; |
michael@0 | 452 | // at this point, poolDest had better still be aligned to a chunk boundary. |
michael@0 | 453 | dest = (Chunk*) poolDest; |
michael@0 | 454 | } |
michael@0 | 455 | } |
michael@0 | 456 | } |
michael@0 | 457 | |
michael@0 | 458 | BufferOffset insertEntry(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data, PoolEntry *pe = nullptr) { |
michael@0 | 459 | if (this->oom() && !this->bail()) |
michael@0 | 460 | return BufferOffset(); |
michael@0 | 461 | int token; |
michael@0 | 462 | if (p != nullptr) { |
michael@0 | 463 | int poolId = p - pools; |
michael@0 | 464 | const char sigil = inBackref ? 'B' : 'F'; |
michael@0 | 465 | |
michael@0 | 466 | IonSpew(IonSpew_Pools, "[%d]{%c} Inserting entry into pool %d", id, sigil, poolId); |
michael@0 | 467 | IonSpewStart(IonSpew_Pools, "[%d] data is: 0x", id); |
michael@0 | 468 | spewEntry(data, p->immSize); |
michael@0 | 469 | IonSpewFin(IonSpew_Pools); |
michael@0 | 470 | } |
michael@0 | 471 | // insert the pool value |
michael@0 | 472 | if (inBackref) |
michael@0 | 473 | token = insertEntryBackwards(instSize, inst, p, data); |
michael@0 | 474 | else |
michael@0 | 475 | token = insertEntryForwards(instSize, inst, p, data); |
michael@0 | 476 | // now to get an instruction to write |
michael@0 | 477 | PoolEntry retPE; |
michael@0 | 478 | if (p != nullptr) { |
michael@0 | 479 | if (this->oom()) |
michael@0 | 480 | return BufferOffset(); |
michael@0 | 481 | int poolId = p - pools; |
michael@0 | 482 | IonSpew(IonSpew_Pools, "[%d] Entry has token %d, offset ~%d", id, token, size()); |
michael@0 | 483 | Asm::insertTokenIntoTag(instSize, inst, token); |
michael@0 | 484 | JS_ASSERT(poolId < (1 << poolKindBits)); |
michael@0 | 485 | JS_ASSERT(poolId >= 0); |
michael@0 | 486 | // Figure out the offset within like-kinded pool entries |
michael@0 | 487 | retPE = PoolEntry(entryCount[poolId], poolId); |
michael@0 | 488 | entryCount[poolId]++; |
michael@0 | 489 | } |
michael@0 | 490 | // Now inst is a valid thing to insert into the instruction stream |
michael@0 | 491 | if (pe != nullptr) |
michael@0 | 492 | *pe = retPE; |
michael@0 | 493 | return this->putBlob(instSize, inst); |
michael@0 | 494 | } |
michael@0 | 495 | |
michael@0 | 496 | uint32_t insertEntryBackwards(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data) { |
michael@0 | 497 | // unlike the forward case, inserting an instruction without inserting |
michael@0 | 498 | // anything into a pool after a pool has been placed, we don't affect |
michael@0 | 499 | // anything relevant, so we can skip this check entirely! |
michael@0 | 500 | |
michael@0 | 501 | if (p == nullptr) |
michael@0 | 502 | return INT_MIN; |
michael@0 | 503 | // TODO: calculating offsets for the alignment requirements is *hard* |
michael@0 | 504 | // Instead, assume that we always add the maximum. |
michael@0 | 505 | int poolOffset = footerSize; |
michael@0 | 506 | Pool *cur, *tmp; |
michael@0 | 507 | // NOTE: we want to process the pools from last to first. |
michael@0 | 508 | // Since the last pool is pools[0].other, and the first pool |
michael@0 | 509 | // is pools[numPoolKinds-1], we actually want to process this |
michael@0 | 510 | // forwards. |
michael@0 | 511 | for (cur = pools; cur < &pools[numPoolKinds]; cur++) { |
michael@0 | 512 | // fetch the pool for the backwards half. |
michael@0 | 513 | tmp = cur->other; |
michael@0 | 514 | if (p == cur) |
michael@0 | 515 | tmp->updateLimiter(this->nextOffset()); |
michael@0 | 516 | |
michael@0 | 517 | if (tmp->checkFullBackref(poolOffset, perforation.getOffset())) { |
michael@0 | 518 | // uh-oh, the backwards pool is full. Time to finalize it, and |
michael@0 | 519 | // switch to a new forward pool. |
michael@0 | 520 | if (p != nullptr) |
michael@0 | 521 | IonSpew(IonSpew_Pools, "[%d]Inserting pool entry caused a spill", id); |
michael@0 | 522 | else |
michael@0 | 523 | IonSpew(IonSpew_Pools, "[%d]Inserting instruction(%d) caused a spill", id, size()); |
michael@0 | 524 | |
michael@0 | 525 | this->finishPool(); |
michael@0 | 526 | if (this->oom()) |
michael@0 | 527 | return uint32_t(-1); |
michael@0 | 528 | return this->insertEntryForwards(instSize, inst, p, data); |
michael@0 | 529 | } |
michael@0 | 530 | // when moving back to front, calculating the alignment is hard, just be |
michael@0 | 531 | // conservative with it. |
michael@0 | 532 | poolOffset += tmp->immSize * tmp->numEntries + tmp->getAlignment(); |
michael@0 | 533 | if (p == tmp) { |
michael@0 | 534 | poolOffset += tmp->immSize; |
michael@0 | 535 | } |
michael@0 | 536 | } |
michael@0 | 537 | return p->numEntries + p->other->insertEntry(data, this->nextOffset(), this->LifoAlloc_); |
michael@0 | 538 | } |
michael@0 | 539 | |
michael@0 | 540 | // Simultaneously insert an instSized instruction into the stream, |
michael@0 | 541 | // and an entry into the pool. There are many things that can happen. |
michael@0 | 542 | // 1) the insertion goes as planned |
michael@0 | 543 | // 2) inserting an instruction pushes a previous pool-reference out of range, forcing a dump |
michael@0 | 544 | // 2a) there isn't a reasonable save point in the instruction stream. We need to save room for |
michael@0 | 545 | // a guard instruction to branch over the pool. |
michael@0 | 546 | int insertEntryForwards(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data) { |
michael@0 | 547 | // Advance the "current offset" by an inst, so everyone knows what their offset should be. |
michael@0 | 548 | uint32_t nextOffset = this->size() + instSize; |
michael@0 | 549 | uint32_t poolOffset = nextOffset; |
michael@0 | 550 | Pool *tmp; |
michael@0 | 551 | // If we need a guard instruction, reserve space for that. |
michael@0 | 552 | if (!perforatedNode) |
michael@0 | 553 | poolOffset += guardSize; |
michael@0 | 554 | // Also, take into account the size of the header that will be placed *after* |
michael@0 | 555 | // the guard instruction |
michael@0 | 556 | poolOffset += headerSize; |
michael@0 | 557 | |
michael@0 | 558 | // Perform the necessary range checks. |
michael@0 | 559 | for (tmp = pools; tmp < &pools[numPoolKinds]; tmp++) { |
michael@0 | 560 | // The pool may wish for a particular alignment, Let's give it one. |
michael@0 | 561 | JS_ASSERT((tmp->getAlignment() & (tmp->getAlignment() - 1)) == 0); |
michael@0 | 562 | // The pool only needs said alignment *if* there are any entries in the pool |
michael@0 | 563 | // WARNING: the pool needs said alignment if there are going to be entries in |
michael@0 | 564 | // the pool after this entry has been inserted |
michael@0 | 565 | if (p == tmp) |
michael@0 | 566 | poolOffset = tmp->forceAlign(poolOffset); |
michael@0 | 567 | else |
michael@0 | 568 | poolOffset = tmp->align(poolOffset); |
michael@0 | 569 | |
michael@0 | 570 | // If we're at the pool we want to insert into, find a new limiter |
michael@0 | 571 | // before we do the range check. |
michael@0 | 572 | if (p == tmp) { |
michael@0 | 573 | p->updateLimiter(BufferOffset(nextOffset)); |
michael@0 | 574 | } |
michael@0 | 575 | if (tmp->checkFull(poolOffset)) { |
michael@0 | 576 | // uh-oh. DUMP DUMP DUMP |
michael@0 | 577 | if (p != nullptr) |
michael@0 | 578 | IonSpew(IonSpew_Pools, "[%d] Inserting pool entry caused a spill", id); |
michael@0 | 579 | else |
michael@0 | 580 | IonSpew(IonSpew_Pools, "[%d] Inserting instruction(%d) caused a spill", id, size()); |
michael@0 | 581 | |
michael@0 | 582 | this->dumpPool(); |
michael@0 | 583 | return this->insertEntryBackwards(instSize, inst, p, data); |
michael@0 | 584 | } |
michael@0 | 585 | // include the size of this pool in the running total |
michael@0 | 586 | if (p == tmp) { |
michael@0 | 587 | nextOffset += tmp->immSize; |
michael@0 | 588 | } |
michael@0 | 589 | nextOffset += tmp->immSize * tmp->numEntries; |
michael@0 | 590 | } |
michael@0 | 591 | if (p == nullptr) { |
michael@0 | 592 | return INT_MIN; |
michael@0 | 593 | } |
michael@0 | 594 | return p->insertEntry(data, this->nextOffset(), this->LifoAlloc_); |
michael@0 | 595 | } |
michael@0 | 596 | BufferOffset putInt(uint32_t value) { |
michael@0 | 597 | return insertEntry(sizeof(uint32_t) / sizeof(uint8_t), (uint8_t*)&value, nullptr, nullptr); |
michael@0 | 598 | } |
michael@0 | 599 | // Mark the current section as an area where we can |
michael@0 | 600 | // later go to dump a pool |
michael@0 | 601 | void perforate() { |
michael@0 | 602 | // If we're filling the backrefrences, we don't want to start looking for a new dumpsite. |
michael@0 | 603 | if (inBackref) |
michael@0 | 604 | return; |
michael@0 | 605 | if (canNotPlacePool) |
michael@0 | 606 | return; |
michael@0 | 607 | // If there is nothing in the pool, then it is strictly disadvantageous |
michael@0 | 608 | // to attempt to place a pool here |
michael@0 | 609 | bool empty = true; |
michael@0 | 610 | for (int i = 0; i < numPoolKinds; i++) { |
michael@0 | 611 | if (pools[i].numEntries != 0) { |
michael@0 | 612 | empty = false; |
michael@0 | 613 | break; |
michael@0 | 614 | } |
michael@0 | 615 | } |
michael@0 | 616 | if (empty) |
michael@0 | 617 | return; |
michael@0 | 618 | perforatedNode = *getTail(); |
michael@0 | 619 | perforation = this->nextOffset(); |
michael@0 | 620 | Parent::perforate(); |
michael@0 | 621 | IonSpew(IonSpew_Pools, "[%d] Adding a perforation at offset %d", id, perforation.getOffset()); |
michael@0 | 622 | } |
michael@0 | 623 | |
michael@0 | 624 | // After a pool is finished, no more elements may be added to it. During this phase, we |
michael@0 | 625 | // will know the exact offsets to the pool entries, and those values should be written into |
michael@0 | 626 | // the given instructions. |
michael@0 | 627 | PoolInfo getPoolData() const { |
michael@0 | 628 | int prevOffset = getInfo(numDumps-1).offset; |
michael@0 | 629 | int prevEnd = getInfo(numDumps-1).finalPos; |
michael@0 | 630 | // calculate the offset of the start of this pool; |
michael@0 | 631 | int perfOffset = perforation.assigned() ? |
michael@0 | 632 | perforation.getOffset() : |
michael@0 | 633 | this->nextOffset().getOffset() + this->guardSize; |
michael@0 | 634 | int initOffset = prevEnd + (perfOffset - prevOffset); |
michael@0 | 635 | int finOffset = initOffset; |
michael@0 | 636 | bool poolIsEmpty = true; |
michael@0 | 637 | for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { |
michael@0 | 638 | if (pools[poolIdx].numEntries != 0) { |
michael@0 | 639 | poolIsEmpty = false; |
michael@0 | 640 | break; |
michael@0 | 641 | } |
michael@0 | 642 | if (pools[poolIdx].other != nullptr && pools[poolIdx].other->numEntries != 0) { |
michael@0 | 643 | poolIsEmpty = false; |
michael@0 | 644 | break; |
michael@0 | 645 | } |
michael@0 | 646 | } |
michael@0 | 647 | if (!poolIsEmpty) { |
michael@0 | 648 | finOffset += headerSize; |
michael@0 | 649 | for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { |
michael@0 | 650 | finOffset=pools[poolIdx].align(finOffset); |
michael@0 | 651 | finOffset+=pools[poolIdx].numEntries * pools[poolIdx].immSize; |
michael@0 | 652 | } |
michael@0 | 653 | // And compute the necessary adjustments for the second half of the pool. |
michael@0 | 654 | for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) { |
michael@0 | 655 | finOffset=pools[poolIdx].other->align(finOffset); |
michael@0 | 656 | finOffset+=pools[poolIdx].other->numEntries * pools[poolIdx].other->immSize; |
michael@0 | 657 | } |
michael@0 | 658 | finOffset += footerSize; |
michael@0 | 659 | } |
michael@0 | 660 | |
michael@0 | 661 | PoolInfo ret; |
michael@0 | 662 | ret.offset = perfOffset; |
michael@0 | 663 | ret.size = finOffset - initOffset; |
michael@0 | 664 | ret.finalPos = finOffset; |
michael@0 | 665 | ret.slice = perforatedNode; |
michael@0 | 666 | return ret; |
michael@0 | 667 | } |
michael@0 | 668 | void finishPool() { |
michael@0 | 669 | // This function should only be called while the backwards half of the pool |
michael@0 | 670 | // is being filled in. The backwards half of the pool is always in a state |
michael@0 | 671 | // where it is sane. Everything that needs to be done here is for "sanity's sake". |
michael@0 | 672 | // The per-buffer pools need to be reset, and we need to record the size of the pool. |
michael@0 | 673 | IonSpew(IonSpew_Pools, "[%d] Finishing pool %d", id, numDumps); |
michael@0 | 674 | JS_ASSERT(inBackref); |
michael@0 | 675 | PoolInfo newPoolInfo = getPoolData(); |
michael@0 | 676 | if (newPoolInfo.size == 0) { |
michael@0 | 677 | // The code below also creates a new pool, but that is not necessary, since |
michael@0 | 678 | // the pools have not been modified at all. |
michael@0 | 679 | new (&perforation) BufferOffset(); |
michael@0 | 680 | perforatedNode = nullptr; |
michael@0 | 681 | inBackref = false; |
michael@0 | 682 | IonSpew(IonSpew_Pools, "[%d] Aborting because the pool is empty", id); |
michael@0 | 683 | // Bail out early, since we don't want to even pretend these pools exist. |
michael@0 | 684 | return; |
michael@0 | 685 | } |
michael@0 | 686 | JS_ASSERT(perforatedNode != nullptr); |
michael@0 | 687 | if (numDumps >= (1<<logBasePoolInfo) && (numDumps & (numDumps-1)) == 0) { |
michael@0 | 688 | // need to resize. |
michael@0 | 689 | PoolInfo *tmp = static_cast<PoolInfo*>(this->LifoAlloc_.alloc(sizeof(PoolInfo) * numDumps * 2)); |
michael@0 | 690 | if (tmp == nullptr) { |
michael@0 | 691 | this->fail_oom(); |
michael@0 | 692 | return; |
michael@0 | 693 | } |
michael@0 | 694 | memcpy(tmp, poolInfo, sizeof(PoolInfo) * numDumps); |
michael@0 | 695 | poolInfo = tmp; |
michael@0 | 696 | |
michael@0 | 697 | } |
michael@0 | 698 | |
michael@0 | 699 | // In order to figure out how to fix up the loads for the second half of the pool |
michael@0 | 700 | // we need to find where the bits of the pool that have been implemented end. |
michael@0 | 701 | int poolOffset = perforation.getOffset(); |
michael@0 | 702 | int magicAlign = getInfo(numDumps-1).finalPos - getInfo(numDumps-1).offset; |
michael@0 | 703 | poolOffset += magicAlign; |
michael@0 | 704 | poolOffset += headerSize; |
michael@0 | 705 | for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { |
michael@0 | 706 | poolOffset=pools[poolIdx].align(poolOffset); |
michael@0 | 707 | poolOffset+=pools[poolIdx].numEntries * pools[poolIdx].immSize; |
michael@0 | 708 | } |
michael@0 | 709 | mozilla::Array<LoadOffsets, 1 << poolKindBits> outcasts; |
michael@0 | 710 | mozilla::Array<uint8_t *, 1 << poolKindBits> outcastEntries; |
michael@0 | 711 | // All of the pool loads referred to by this code are going to |
michael@0 | 712 | // need fixing up here. |
michael@0 | 713 | int skippedBytes = 0; |
michael@0 | 714 | for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) { |
michael@0 | 715 | Pool *p = pools[poolIdx].other; |
michael@0 | 716 | JS_ASSERT(p != nullptr); |
michael@0 | 717 | unsigned int idx = p->numEntries-1; |
michael@0 | 718 | // Allocate space for tracking information that needs to be propagated to the next pool |
michael@0 | 719 | // as well as space for quickly updating the pool entries in the current pool to remove |
michael@0 | 720 | // the entries that don't actually fit. I probably should change this over to a vector |
michael@0 | 721 | outcastEntries[poolIdx] = new uint8_t[p->getPoolSize()]; |
michael@0 | 722 | bool *preservedEntries = new bool[p->numEntries]; |
michael@0 | 723 | // Hacks on top of Hacks! |
michael@0 | 724 | // the patching code takes in the address of the instruction to be patched, |
michael@0 | 725 | // and the "address" of the element in the pool that we want to load. |
michael@0 | 726 | // However, since the code isn't actually in an array, we need to lie about |
michael@0 | 727 | // the address that the pool is in. Furthermore, since the offsets are |
michael@0 | 728 | // technically from the beginning of the FORWARD reference section, we have |
michael@0 | 729 | // to lie to ourselves about where this pool starts in order to make sure |
michael@0 | 730 | // the distance into the pool is interpreted correctly. |
michael@0 | 731 | // There is a more elegant way to fix this that will need to be implemented |
michael@0 | 732 | // eventually. We will want to provide the fixup function with a method to |
michael@0 | 733 | // convert from a 'token' into a pool offset. |
michael@0 | 734 | poolOffset = p->align(poolOffset); |
michael@0 | 735 | int numSkips = 0; |
michael@0 | 736 | int fakePoolOffset = poolOffset - pools[poolIdx].numEntries * pools[poolIdx].immSize; |
michael@0 | 737 | for (BufferOffset *iter = p->loadOffsets.end()-1; |
michael@0 | 738 | iter != p->loadOffsets.begin()-1; --iter, --idx) |
michael@0 | 739 | { |
michael@0 | 740 | |
michael@0 | 741 | IonSpew(IonSpew_Pools, "[%d] Linking entry %d in pool %d", id, idx+ pools[poolIdx].numEntries, poolIdx); |
michael@0 | 742 | JS_ASSERT(iter->getOffset() >= perforation.getOffset()); |
michael@0 | 743 | // Everything here is known, we can safely do the necessary substitutions |
michael@0 | 744 | Inst * inst = this->getInst(*iter); |
michael@0 | 745 | // Manually compute the offset, including a possible bias. |
michael@0 | 746 | // Also take into account the whole size of the pool that is being placed. |
michael@0 | 747 | int codeOffset = fakePoolOffset - iter->getOffset() - newPoolInfo.size + numSkips * p->immSize - skippedBytes; |
michael@0 | 748 | // That is, patchConstantPoolLoad wants to be handed the address of the |
michael@0 | 749 | // pool entry that is being loaded. We need to do a non-trivial amount |
michael@0 | 750 | // of math here, since the pool that we've made does not actually reside there |
michael@0 | 751 | // in memory. |
michael@0 | 752 | IonSpew(IonSpew_Pools, "[%d] Fixing offset to %d", id, codeOffset - magicAlign); |
michael@0 | 753 | if (!Asm::patchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign)) { |
michael@0 | 754 | // NOTE: if removing this entry happens to change the alignment of the next |
michael@0 | 755 | // block, chances are you will have a bad time. |
michael@0 | 756 | // ADDENDUM: this CANNOT happen on ARM, because the only elements that |
michael@0 | 757 | // fall into this case are doubles loaded via vfp, but they will also be |
michael@0 | 758 | // the last pool, which means it cannot affect the alignment of any other |
michael@0 | 759 | // Sub Pools. |
michael@0 | 760 | IonSpew(IonSpew_Pools, "[%d]***Offset was still out of range!***", id, codeOffset - magicAlign); |
michael@0 | 761 | IonSpew(IonSpew_Pools, "[%d] Too complicated; bailingp", id); |
michael@0 | 762 | this->fail_bail(); |
michael@0 | 763 | // only free up to the current offset |
michael@0 | 764 | for (int pi = poolIdx; pi < numPoolKinds; pi++) |
michael@0 | 765 | delete[] outcastEntries[pi]; |
michael@0 | 766 | delete[] preservedEntries; |
michael@0 | 767 | return; |
michael@0 | 768 | } else { |
michael@0 | 769 | preservedEntries[idx] = true; |
michael@0 | 770 | } |
michael@0 | 771 | } |
michael@0 | 772 | // remove the elements of the pool that should not be there (YAY, MEMCPY) |
michael@0 | 773 | unsigned int idxDest = 0; |
michael@0 | 774 | // If no elements were skipped, no expensive copy is necessary. |
michael@0 | 775 | if (numSkips != 0) { |
michael@0 | 776 | for (idx = 0; idx < p->numEntries; idx++) { |
michael@0 | 777 | if (preservedEntries[idx]) { |
michael@0 | 778 | if (idx != idxDest) { |
michael@0 | 779 | memcpy(&p->poolData[idxDest * p->immSize], |
michael@0 | 780 | &p->poolData[idx * p->immSize], |
michael@0 | 781 | p->immSize); |
michael@0 | 782 | } |
michael@0 | 783 | idxDest++; |
michael@0 | 784 | } |
michael@0 | 785 | } |
michael@0 | 786 | p->numEntries -= numSkips; |
michael@0 | 787 | } |
michael@0 | 788 | poolOffset += p->numEntries * p->immSize; |
michael@0 | 789 | delete[] preservedEntries; |
michael@0 | 790 | preservedEntries = nullptr; |
michael@0 | 791 | } |
michael@0 | 792 | // bind the current pool to the perforation point. |
michael@0 | 793 | Pool **tmp = &perforatedNode->data; |
michael@0 | 794 | *tmp = static_cast<Pool*>(this->LifoAlloc_.alloc(sizeof(Pool) * numPoolKinds)); |
michael@0 | 795 | if (tmp == nullptr) { |
michael@0 | 796 | this->fail_oom(); |
michael@0 | 797 | for (int pi = 0; pi < numPoolKinds; pi++) |
michael@0 | 798 | delete[] outcastEntries[pi]; |
michael@0 | 799 | return; |
michael@0 | 800 | } |
michael@0 | 801 | // The above operations may have changed the size of pools! |
michael@0 | 802 | // recalibrate the size of the pool. |
michael@0 | 803 | newPoolInfo = getPoolData(); |
michael@0 | 804 | poolInfo[numDumps] = newPoolInfo; |
michael@0 | 805 | poolSize += poolInfo[numDumps].size; |
michael@0 | 806 | numDumps++; |
michael@0 | 807 | |
michael@0 | 808 | memcpy(*tmp, pools, sizeof(Pool) * numPoolKinds); |
michael@0 | 809 | |
michael@0 | 810 | // reset everything to the state that it was in when we started |
michael@0 | 811 | for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { |
michael@0 | 812 | if (!pools[poolIdx].reset(this->LifoAlloc_)) { |
michael@0 | 813 | this->fail_oom(); |
michael@0 | 814 | for (int pi = 0; pi < numPoolKinds; pi++) |
michael@0 | 815 | delete[] outcastEntries[pi]; |
michael@0 | 816 | return; |
michael@0 | 817 | } |
michael@0 | 818 | } |
michael@0 | 819 | new (&perforation) BufferOffset(); |
michael@0 | 820 | perforatedNode = nullptr; |
michael@0 | 821 | inBackref = false; |
michael@0 | 822 | |
michael@0 | 823 | // Now that the backwards pool has been emptied, and a new forward pool |
michael@0 | 824 | // has been allocated, it is time to populate the new forward pool with |
michael@0 | 825 | // any entries that couldn't fit in the backwards pool. |
michael@0 | 826 | for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { |
michael@0 | 827 | // Technically, the innermost pool will never have this issue, but it is easier |
michael@0 | 828 | // to just handle this case. |
michael@0 | 829 | // Since the pool entry was filled back-to-front, and in the next buffer, the elements |
michael@0 | 830 | // should be front-to-back, this insertion also needs to proceed backwards |
michael@0 | 831 | int idx = outcasts[poolIdx].length(); |
michael@0 | 832 | for (BufferOffset *iter = outcasts[poolIdx].end()-1; |
michael@0 | 833 | iter != outcasts[poolIdx].begin()-1; |
michael@0 | 834 | --iter, --idx) { |
michael@0 | 835 | pools[poolIdx].updateLimiter(*iter); |
michael@0 | 836 | Inst *inst = this->getInst(*iter); |
michael@0 | 837 | Asm::insertTokenIntoTag(pools[poolIdx].instSize, (uint8_t*)inst, outcasts[poolIdx].end()-1-iter); |
michael@0 | 838 | pools[poolIdx].insertEntry(&outcastEntries[poolIdx][idx*pools[poolIdx].immSize], *iter, this->LifoAlloc_); |
michael@0 | 839 | } |
michael@0 | 840 | delete[] outcastEntries[poolIdx]; |
michael@0 | 841 | } |
michael@0 | 842 | // this (*2) is not technically kosher, but I want to get this bug fixed. |
michael@0 | 843 | // It should actually be guardSize + the size of the instruction that we're attempting |
michael@0 | 844 | // to insert. Unfortunately that vaue is never passed in. On ARM, these instructions |
michael@0 | 845 | // are always 4 bytes, so guardSize is legit to use. |
michael@0 | 846 | poolOffset = this->size() + guardSize * 2; |
michael@0 | 847 | poolOffset += headerSize; |
michael@0 | 848 | for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { |
michael@0 | 849 | // There can still be an awkward situation where the element that triggered the |
michael@0 | 850 | // initial dump didn't fit into the pool backwards, and now, still does not fit into |
michael@0 | 851 | // this pool. Now it is necessary to go and dump this pool (note: this is almost |
michael@0 | 852 | // certainly being called from dumpPool()). |
michael@0 | 853 | poolOffset = pools[poolIdx].align(poolOffset); |
michael@0 | 854 | if (pools[poolIdx].checkFull(poolOffset)) { |
michael@0 | 855 | // ONCE AGAIN, UH-OH, TIME TO BAIL |
michael@0 | 856 | dumpPool(); |
michael@0 | 857 | break; |
michael@0 | 858 | } |
michael@0 | 859 | poolOffset += pools[poolIdx].getPoolSize(); |
michael@0 | 860 | } |
michael@0 | 861 | } |
michael@0 | 862 | |
michael@0 | 863 | void dumpPool() { |
michael@0 | 864 | JS_ASSERT(!inBackref); |
michael@0 | 865 | IonSpew(IonSpew_Pools, "[%d] Attempting to dump the pool", id); |
michael@0 | 866 | PoolInfo newPoolInfo = getPoolData(); |
michael@0 | 867 | if (newPoolInfo.size == 0) { |
michael@0 | 868 | // If there is no data in the pool being dumped, don't dump anything. |
michael@0 | 869 | inBackref = true; |
michael@0 | 870 | IonSpew(IonSpew_Pools, "[%d]Abort, no pool data", id); |
michael@0 | 871 | return; |
michael@0 | 872 | } |
michael@0 | 873 | |
michael@0 | 874 | IonSpew(IonSpew_Pools, "[%d] Dumping %d bytes", id, newPoolInfo.size); |
michael@0 | 875 | if (!perforation.assigned()) { |
michael@0 | 876 | IonSpew(IonSpew_Pools, "[%d] No Perforation point selected, generating a new one", id); |
michael@0 | 877 | // There isn't a perforation here, we need to dump the pool with a guard. |
michael@0 | 878 | BufferOffset branch = this->nextOffset(); |
michael@0 | 879 | bool shouldMarkAsBranch = this->isNextBranch(); |
michael@0 | 880 | this->markNextAsBranch(); |
michael@0 | 881 | this->putBlob(guardSize, nullptr); |
michael@0 | 882 | BufferOffset afterPool = this->nextOffset(); |
michael@0 | 883 | Asm::writePoolGuard(branch, this->getInst(branch), afterPool); |
michael@0 | 884 | markGuard(); |
michael@0 | 885 | perforatedNode->isNatural = false; |
michael@0 | 886 | if (shouldMarkAsBranch) |
michael@0 | 887 | this->markNextAsBranch(); |
michael@0 | 888 | } |
michael@0 | 889 | |
michael@0 | 890 | // We have a perforation. Time to cut the instruction stream, patch in the pool |
michael@0 | 891 | // and possibly re-arrange the pool to accomodate its new location. |
michael@0 | 892 | int poolOffset = perforation.getOffset(); |
michael@0 | 893 | int magicAlign = getInfo(numDumps-1).finalPos - getInfo(numDumps-1).offset; |
michael@0 | 894 | poolOffset += magicAlign; |
michael@0 | 895 | poolOffset += headerSize; |
michael@0 | 896 | for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { |
michael@0 | 897 | mozilla::DebugOnly<bool> beforePool = true; |
michael@0 | 898 | Pool *p = &pools[poolIdx]; |
michael@0 | 899 | // Any entries that happened to be after the place we put our pool will need to be |
michael@0 | 900 | // switched from the forward-referenced pool to the backward-refrenced pool. |
michael@0 | 901 | int idx = 0; |
michael@0 | 902 | for (BufferOffset *iter = p->loadOffsets.begin(); |
michael@0 | 903 | iter != p->loadOffsets.end(); ++iter, ++idx) |
michael@0 | 904 | { |
michael@0 | 905 | if (iter->getOffset() >= perforation.getOffset()) { |
michael@0 | 906 | IonSpew(IonSpew_Pools, "[%d] Pushing entry %d in pool %d into the backwards section.", id, idx, poolIdx); |
michael@0 | 907 | // insert this into the rear part of the pool. |
michael@0 | 908 | int offset = idx * p->immSize; |
michael@0 | 909 | p->other->insertEntry(&p->poolData[offset], BufferOffset(*iter), this->LifoAlloc_); |
michael@0 | 910 | // update the limiting entry for this pool. |
michael@0 | 911 | p->other->updateLimiter(*iter); |
michael@0 | 912 | |
michael@0 | 913 | // Update the current pool to report fewer entries. They are now in the |
michael@0 | 914 | // backwards section. |
michael@0 | 915 | p->numEntries--; |
michael@0 | 916 | beforePool = false; |
michael@0 | 917 | } else { |
michael@0 | 918 | JS_ASSERT(beforePool); |
michael@0 | 919 | // align the pool offset to the alignment of this pool |
michael@0 | 920 | // it already only aligns when the pool has data in it, but we want to not |
michael@0 | 921 | // align when all entries will end up in the backwards half of the pool |
michael@0 | 922 | poolOffset = p->align(poolOffset); |
michael@0 | 923 | IonSpew(IonSpew_Pools, "[%d] Entry %d in pool %d is before the pool.", id, idx, poolIdx); |
michael@0 | 924 | // Everything here is known, we can safely do the necessary substitutions |
michael@0 | 925 | Inst * inst = this->getInst(*iter); |
michael@0 | 926 | // We need to manually compute the offset, including a possible bias. |
michael@0 | 927 | int codeOffset = poolOffset - iter->getOffset(); |
michael@0 | 928 | // That is, patchConstantPoolLoad wants to be handed the address of the |
michael@0 | 929 | // pool entry that is being loaded. We need to do a non-trivial amount |
michael@0 | 930 | // of math here, since the pool that we've made does not actually reside there |
michael@0 | 931 | // in memory. |
michael@0 | 932 | IonSpew(IonSpew_Pools, "[%d] Fixing offset to %d", id, codeOffset - magicAlign); |
michael@0 | 933 | Asm::patchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign); |
michael@0 | 934 | } |
michael@0 | 935 | } |
michael@0 | 936 | // Some number of entries have been positively identified as being |
michael@0 | 937 | // in this section of the pool. Before processing the next pool, |
michael@0 | 938 | // update the offset from the beginning of the buffer |
michael@0 | 939 | poolOffset += p->numEntries * p->immSize; |
michael@0 | 940 | } |
michael@0 | 941 | poolOffset = footerSize; |
michael@0 | 942 | inBackref = true; |
michael@0 | 943 | for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) { |
michael@0 | 944 | Pool *tmp = pools[poolIdx].other; |
michael@0 | 945 | if (tmp->checkFullBackref(poolOffset, perforation.getOffset())) { |
michael@0 | 946 | // GNAAAH. While we rotated elements into the back half, one of them filled up |
michael@0 | 947 | // Now, dumping the back half is necessary... |
michael@0 | 948 | finishPool(); |
michael@0 | 949 | break; |
michael@0 | 950 | } |
michael@0 | 951 | } |
michael@0 | 952 | } |
michael@0 | 953 | |
michael@0 | 954 | void flushPool() { |
michael@0 | 955 | if (this->oom()) |
michael@0 | 956 | return; |
michael@0 | 957 | IonSpew(IonSpew_Pools, "[%d] Requesting a pool flush", id); |
michael@0 | 958 | if (!inBackref) |
michael@0 | 959 | dumpPool(); |
michael@0 | 960 | finishPool(); |
michael@0 | 961 | } |
michael@0 | 962 | void patchBranch(Inst *i, int curpool, BufferOffset branch) { |
michael@0 | 963 | const Inst *ci = i; |
michael@0 | 964 | ptrdiff_t offset = Asm::getBranchOffset(ci); |
michael@0 | 965 | // If the offset is 0, then there is nothing to do. |
michael@0 | 966 | if (offset == 0) |
michael@0 | 967 | return; |
michael@0 | 968 | int destOffset = branch.getOffset() + offset; |
michael@0 | 969 | if (offset > 0) { |
michael@0 | 970 | |
michael@0 | 971 | while (curpool < numDumps && poolInfo[curpool].offset <= destOffset) { |
michael@0 | 972 | offset += poolInfo[curpool].size; |
michael@0 | 973 | curpool++; |
michael@0 | 974 | } |
michael@0 | 975 | } else { |
michael@0 | 976 | // Ignore the pool that comes next, since this is a backwards branch |
michael@0 | 977 | curpool--; |
michael@0 | 978 | while (curpool >= 0 && poolInfo[curpool].offset > destOffset) { |
michael@0 | 979 | offset -= poolInfo[curpool].size; |
michael@0 | 980 | curpool--; |
michael@0 | 981 | } |
michael@0 | 982 | // Can't assert anything here, since the first pool may be after the target. |
michael@0 | 983 | } |
michael@0 | 984 | Asm::retargetNearBranch(i, offset, false); |
michael@0 | 985 | } |
michael@0 | 986 | |
michael@0 | 987 | // Mark the next instruction as a valid guard. This means we can place a pool here. |
michael@0 | 988 | void markGuard() { |
michael@0 | 989 | // If we are in a no pool zone then there is no point in dogearing |
michael@0 | 990 | // this branch as a place to go back to |
michael@0 | 991 | if (canNotPlacePool) |
michael@0 | 992 | return; |
michael@0 | 993 | // There is no point in trying to grab a new slot if we've already |
michael@0 | 994 | // found one and are in the process of filling it in. |
michael@0 | 995 | if (inBackref) |
michael@0 | 996 | return; |
michael@0 | 997 | perforate(); |
michael@0 | 998 | } |
michael@0 | 999 | void enterNoPool() { |
michael@0 | 1000 | if (!canNotPlacePool && !perforation.assigned()) { |
michael@0 | 1001 | // Embarassing mode: The Assembler requests the start of a no pool section |
michael@0 | 1002 | // and there have been no valid places that a pool could be dumped thusfar. |
michael@0 | 1003 | // If a pool were to fill up before this no-pool section ends, we need to go back |
michael@0 | 1004 | // in the stream and enter a pool guard after the fact. This is feasable, but |
michael@0 | 1005 | // for now, it is easier to just allocate a junk instruction, default it to a nop, and |
michael@0 | 1006 | // finally, if the pool *is* needed, patch the nop to apool guard. |
michael@0 | 1007 | // What the assembler requests: |
michael@0 | 1008 | |
michael@0 | 1009 | // #request no-pool zone |
michael@0 | 1010 | // push pc |
michael@0 | 1011 | // blx r12 |
michael@0 | 1012 | // #end no-pool zone |
michael@0 | 1013 | |
michael@0 | 1014 | // however, if we would need to insert a pool, and there is no perforation point... |
michael@0 | 1015 | // so, actual generated code: |
michael@0 | 1016 | |
michael@0 | 1017 | // b next; <= perforation point |
michael@0 | 1018 | // next: |
michael@0 | 1019 | // #beginning of no pool zone |
michael@0 | 1020 | // push pc |
michael@0 | 1021 | // blx r12 |
michael@0 | 1022 | |
michael@0 | 1023 | BufferOffset branch = this->nextOffset(); |
michael@0 | 1024 | this->markNextAsBranch(); |
michael@0 | 1025 | this->putBlob(guardSize, nullptr); |
michael@0 | 1026 | BufferOffset afterPool = this->nextOffset(); |
michael@0 | 1027 | Asm::writePoolGuard(branch, this->getInst(branch), afterPool); |
michael@0 | 1028 | markGuard(); |
michael@0 | 1029 | if (perforatedNode != nullptr) |
michael@0 | 1030 | perforatedNode->isNatural = false; |
michael@0 | 1031 | } |
michael@0 | 1032 | canNotPlacePool++; |
michael@0 | 1033 | } |
michael@0 | 1034 | void leaveNoPool() { |
michael@0 | 1035 | canNotPlacePool--; |
michael@0 | 1036 | } |
michael@0 | 1037 | int size() const { |
michael@0 | 1038 | return uncheckedSize(); |
michael@0 | 1039 | } |
michael@0 | 1040 | Pool *getPool(int idx) { |
michael@0 | 1041 | return &pools[idx]; |
michael@0 | 1042 | } |
michael@0 | 1043 | void markNextAsBranch() { |
michael@0 | 1044 | // If the previous thing inserted was the last instruction of |
michael@0 | 1045 | // the node, then whoops, we want to mark the first instruction of |
michael@0 | 1046 | // the next node. |
michael@0 | 1047 | this->ensureSpace(InstBaseSize); |
michael@0 | 1048 | JS_ASSERT(*this->getTail() != nullptr); |
michael@0 | 1049 | (*this->getTail())->markNextAsBranch(); |
michael@0 | 1050 | } |
michael@0 | 1051 | bool isNextBranch() { |
michael@0 | 1052 | JS_ASSERT(*this->getTail() != nullptr); |
michael@0 | 1053 | return (*this->getTail())->isNextBranch(); |
michael@0 | 1054 | } |
michael@0 | 1055 | |
michael@0 | 1056 | int uncheckedSize() const { |
michael@0 | 1057 | PoolInfo pi = getPoolData(); |
michael@0 | 1058 | int codeEnd = this->nextOffset().getOffset(); |
michael@0 | 1059 | return (codeEnd - pi.offset) + pi.finalPos; |
michael@0 | 1060 | } |
michael@0 | 1061 | ptrdiff_t curDumpsite; |
michael@0 | 1062 | void resetCounter() { |
michael@0 | 1063 | curDumpsite = 0; |
michael@0 | 1064 | } |
michael@0 | 1065 | ptrdiff_t poolSizeBefore(ptrdiff_t offset) const { |
michael@0 | 1066 | int cur = 0; |
michael@0 | 1067 | while(cur < numDumps && poolInfo[cur].offset <= offset) |
michael@0 | 1068 | cur++; |
michael@0 | 1069 | // poolInfo[curDumpsite] is now larger than the offset |
michael@0 | 1070 | // either this is the first one, or the previous is the last one we care about |
michael@0 | 1071 | if (cur == 0) |
michael@0 | 1072 | return 0; |
michael@0 | 1073 | return poolInfo[cur-1].finalPos - poolInfo[cur-1].offset; |
michael@0 | 1074 | } |
michael@0 | 1075 | |
michael@0 | 1076 | private: |
michael@0 | 1077 | void getPEPool(PoolEntry pe, Pool **retP, int32_t * retOffset, int32_t *poolNum) const { |
michael@0 | 1078 | int poolKind = pe.poolKind(); |
michael@0 | 1079 | Pool *p = nullptr; |
michael@0 | 1080 | uint32_t offset = pe.offset() * pools[poolKind].immSize; |
michael@0 | 1081 | int idx; |
michael@0 | 1082 | for (idx = 0; idx < numDumps; idx++) { |
michael@0 | 1083 | p = &poolInfo[idx].slice->data[poolKind]; |
michael@0 | 1084 | if (p->getPoolSize() > offset) |
michael@0 | 1085 | break; |
michael@0 | 1086 | offset -= p->getPoolSize(); |
michael@0 | 1087 | p = p->other; |
michael@0 | 1088 | if (p->getPoolSize() > offset) |
michael@0 | 1089 | break; |
michael@0 | 1090 | offset -= p->getPoolSize(); |
michael@0 | 1091 | p = nullptr; |
michael@0 | 1092 | } |
michael@0 | 1093 | if (poolNum != nullptr) |
michael@0 | 1094 | *poolNum = idx; |
michael@0 | 1095 | // If this offset is contained in any finished pool, forward or backwards, p now |
michael@0 | 1096 | // points to that pool, if it is not in any pool (should be in the currently building pool) |
michael@0 | 1097 | // then p is nullptr. |
michael@0 | 1098 | if (p == nullptr) { |
michael@0 | 1099 | p = &pools[poolKind]; |
michael@0 | 1100 | if (offset >= p->getPoolSize()) { |
michael@0 | 1101 | p = p->other; |
michael@0 | 1102 | offset -= p->getPoolSize(); |
michael@0 | 1103 | } |
michael@0 | 1104 | } |
michael@0 | 1105 | JS_ASSERT(p != nullptr); |
michael@0 | 1106 | JS_ASSERT(offset < p->getPoolSize()); |
michael@0 | 1107 | *retP = p; |
michael@0 | 1108 | *retOffset = offset; |
michael@0 | 1109 | } |
michael@0 | 1110 | uint8_t *getPoolEntry(PoolEntry pe) { |
michael@0 | 1111 | Pool *p; |
michael@0 | 1112 | int32_t offset; |
michael@0 | 1113 | getPEPool(pe, &p, &offset, nullptr); |
michael@0 | 1114 | return &p->poolData[offset]; |
michael@0 | 1115 | } |
michael@0 | 1116 | size_t getPoolEntrySize(PoolEntry pe) { |
michael@0 | 1117 | int idx = pe.poolKind(); |
michael@0 | 1118 | return pools[idx].immSize; |
michael@0 | 1119 | } |
michael@0 | 1120 | |
michael@0 | 1121 | public: |
michael@0 | 1122 | uint32_t poolEntryOffset(PoolEntry pe) const { |
michael@0 | 1123 | Pool *realPool; |
michael@0 | 1124 | // offset is in bytes, not entries. |
michael@0 | 1125 | int32_t offset; |
michael@0 | 1126 | int32_t poolNum; |
michael@0 | 1127 | getPEPool(pe, &realPool, &offset, &poolNum); |
michael@0 | 1128 | PoolInfo *pi = &poolInfo[poolNum]; |
michael@0 | 1129 | Pool *poolGroup = pi->slice->data; |
michael@0 | 1130 | uint32_t start = pi->finalPos - pi->size + headerSize; |
michael@0 | 1131 | /// The order of the pools is: |
michael@0 | 1132 | // A B C C_Rev B_Rev A_Rev, so in the initial pass, |
michael@0 | 1133 | // go through the pools forwards, and in the second pass |
michael@0 | 1134 | // go through them in reverse order. |
michael@0 | 1135 | for (int idx = 0; idx < numPoolKinds; idx++) { |
michael@0 | 1136 | if (&poolGroup[idx] == realPool) { |
michael@0 | 1137 | return start + offset; |
michael@0 | 1138 | } |
michael@0 | 1139 | start = poolGroup[idx].addPoolSize(start); |
michael@0 | 1140 | } |
michael@0 | 1141 | for (int idx = numPoolKinds-1; idx >= 0; idx--) { |
michael@0 | 1142 | if (poolGroup[idx].other == realPool) { |
michael@0 | 1143 | return start + offset; |
michael@0 | 1144 | } |
michael@0 | 1145 | start = poolGroup[idx].other->addPoolSize(start); |
michael@0 | 1146 | } |
michael@0 | 1147 | MOZ_ASSUME_UNREACHABLE("Entry is not in a pool"); |
michael@0 | 1148 | } |
michael@0 | 1149 | void writePoolEntry(PoolEntry pe, uint8_t *buff) { |
michael@0 | 1150 | size_t size = getPoolEntrySize(pe); |
michael@0 | 1151 | uint8_t *entry = getPoolEntry(pe); |
michael@0 | 1152 | memcpy(entry, buff, size); |
michael@0 | 1153 | } |
michael@0 | 1154 | void readPoolEntry(PoolEntry pe, uint8_t *buff) { |
michael@0 | 1155 | size_t size = getPoolEntrySize(pe); |
michael@0 | 1156 | uint8_t *entry = getPoolEntry(pe); |
michael@0 | 1157 | memcpy(buff, entry, size); |
michael@0 | 1158 | } |
michael@0 | 1159 | |
michael@0 | 1160 | }; |
michael@0 | 1161 | } // ion |
michael@0 | 1162 | } // js |
michael@0 | 1163 | #endif /* jit_shared_IonAssemblerBufferWithConstantPools_h */ |