Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef jit_shared_IonAssemblerBufferWithConstantPools_h
8 #define jit_shared_IonAssemblerBufferWithConstantPools_h
10 #include "mozilla/DebugOnly.h"
12 #include "assembler/wtf/SegmentedVector.h"
13 #include "jit/IonSpewer.h"
14 #include "jit/shared/IonAssemblerBuffer.h"
16 namespace js {
17 namespace jit {
18 typedef Vector<BufferOffset, 512, OldIonAllocPolicy> LoadOffsets;
20 struct Pool
21 : public OldIonAllocPolicy
22 {
23 const int maxOffset;
24 const int immSize;
25 const int instSize;
26 const int bias;
28 private:
29 const int alignment;
31 public:
32 const bool isBackref;
33 const bool canDedup;
34 // "other" is the backwards half of this pool, it is held in another pool structure
35 Pool *other;
36 uint8_t *poolData;
37 uint32_t numEntries;
38 uint32_t buffSize;
39 LoadOffsets loadOffsets;
41 // When filling pools where the the size of an immediate is larger
42 // than the size of an instruction, we find we're in a case where the distance between the
43 // next instruction and the next pool slot is increasing!
44 // Moreover, If we want to do fancy things like deduplicate pool entries at
45 // dump time, we may not know the location in a pool (and thus the limiting load)
46 // until very late.
47 // Lastly, it may be beneficial to interleave the pools. I have absolutely no idea
48 // how that will work, but my suspicions are that it will be difficult.
50 BufferOffset limitingUser;
51 int limitingUsee;
53 Pool(int maxOffset_, int immSize_, int instSize_, int bias_, int alignment_, LifoAlloc &LifoAlloc_,
54 bool isBackref_ = false, bool canDedup_ = false, Pool *other_ = nullptr)
55 : maxOffset(maxOffset_), immSize(immSize_), instSize(instSize_),
56 bias(bias_), alignment(alignment_),
57 isBackref(isBackref_), canDedup(canDedup_), other(other_),
58 poolData(static_cast<uint8_t *>(LifoAlloc_.alloc(8*immSize))), numEntries(0),
59 buffSize(8), loadOffsets(), limitingUser(), limitingUsee(INT_MIN)
60 {
61 }
62 static const int garbage=0xa5a5a5a5;
63 Pool() : maxOffset(garbage), immSize(garbage), instSize(garbage), bias(garbage),
64 alignment(garbage), isBackref(garbage), canDedup(garbage), other((Pool*)garbage)
65 {
66 }
67 // Sometimes, when we are adding large values to a pool, the limiting use may change.
68 // Handle this case. nextInst is the address of the
69 void updateLimiter(BufferOffset nextInst) {
70 int oldRange, newRange;
71 if (isBackref) {
72 // common expressions that are not subtracted: the location of the pool, ...
73 oldRange = limitingUser.getOffset() - ((numEntries - limitingUsee) * immSize);
74 newRange = nextInst.getOffset();
75 } else {
76 oldRange = (limitingUsee * immSize) - limitingUser.getOffset();
77 newRange = (numEntries * immSize) - nextInst.getOffset();
78 }
79 if (!limitingUser.assigned() || newRange > oldRange) {
80 // We have a new largest range!
81 limitingUser = nextInst;
82 limitingUsee = numEntries;
83 }
84 }
85 // checkFull is called before any modifications have been made.
86 // It is "if we were to add this instruction and pool entry,
87 // would we be in an invalid state?". If it is true, then it is in fact
88 // time for a "pool dump".
90 // poolOffset is the distance from the end of the current section to the end of the pool.
91 // For the last section of the pool, this will be the size of the footer
92 // For the first section of the pool, it will be the size of every other
93 // section and the footer
94 // codeOffset is the instruction-distance from the pool to the beginning of the buffer.
95 // Since codeOffset only includes instructions, the number is the same for
96 // the beginning and end of the pool.
97 // instOffset is the offset from the beginning of the buffer to the instruction that
98 // is about to be placed.
99 bool checkFullBackref(int poolOffset, int codeOffset) {
100 if (!limitingUser.assigned())
101 return false;
102 signed int distance =
103 limitingUser.getOffset() + bias
104 - codeOffset + poolOffset +
105 (numEntries - limitingUsee + 1) * immSize;
106 if (distance >= maxOffset)
107 return true;
108 return false;
109 }
111 // checkFull answers the question "If a pool were placed at poolOffset, would
112 // any reference into the pool be out of range?". It is meant to be used as instructions
113 // and elements are inserted, to determine if a saved perforation point needs to be used.
115 bool checkFull(int poolOffset) {
116 // Inserting an instruction into the stream can
117 // push any of the pools out of range.
118 // Similarly, inserting into a pool can push the pool entry out of range
119 JS_ASSERT(!isBackref);
120 // Not full if there aren't any uses.
121 if (!limitingUser.assigned()) {
122 return false;
123 }
124 // We're considered "full" when:
125 // bias + abs(poolOffset + limitingeUsee * numEntries - limitingUser) + sizeof(other_pools) >= maxOffset
126 if (poolOffset + limitingUsee * immSize - (limitingUser.getOffset() + bias) >= maxOffset) {
127 return true;
128 }
129 return false;
130 }
132 // By the time this function is called, we'd damn well better know that this is going to succeed.
133 uint32_t insertEntry(uint8_t *data, BufferOffset off, LifoAlloc &LifoAlloc_) {
134 if (numEntries == buffSize) {
135 buffSize <<= 1;
136 uint8_t *tmp = static_cast<uint8_t*>(LifoAlloc_.alloc(immSize * buffSize));
137 memcpy(tmp, poolData, immSize * numEntries);
138 if (poolData == nullptr) {
139 buffSize = 0;
140 return -1;
141 }
142 poolData = tmp;
143 }
144 memcpy(&poolData[numEntries * immSize], data, immSize);
145 loadOffsets.append(off.getOffset());
146 return numEntries++;
147 }
149 bool reset(LifoAlloc &a) {
150 numEntries = 0;
151 buffSize = 8;
152 poolData = static_cast<uint8_t*>(a.alloc(buffSize * immSize));
153 if (poolData == nullptr)
154 return false;
156 void *otherSpace = a.alloc(sizeof(Pool));
157 if (otherSpace == nullptr)
158 return false;
160 other = new (otherSpace) Pool(other->maxOffset, other->immSize, other->instSize,
161 other->bias, other->alignment, a, other->isBackref,
162 other->canDedup);
163 new (&loadOffsets) LoadOffsets;
165 limitingUser = BufferOffset();
166 limitingUsee = -1;
167 return true;
169 }
170 // WARNING: This will not always align values. It will only
171 // align to the requirement of the pool. If the pool is empty,
172 // there is nothing to be aligned, so it will not perform any alignment
173 uint8_t* align(uint8_t *ptr) {
174 return (uint8_t*)align((uint32_t)ptr);
175 }
176 uint32_t align(uint32_t ptr) {
177 if (numEntries == 0)
178 return ptr;
179 return (ptr + alignment-1) & ~(alignment-1);
180 }
181 uint32_t forceAlign(uint32_t ptr) {
182 return (ptr + alignment-1) & ~(alignment-1);
183 }
184 bool isAligned(uint32_t ptr) {
185 return ptr == align(ptr);
186 }
187 int getAlignment() {
188 return alignment;
189 }
191 uint32_t addPoolSize(uint32_t start) {
192 start = align(start);
193 start += immSize * numEntries;
194 return start;
195 }
196 uint8_t *addPoolSize(uint8_t *start) {
197 start = align(start);
198 start += immSize * numEntries;
199 return start;
200 }
201 uint32_t getPoolSize() {
202 return immSize * numEntries;
203 }
204 };
207 template <int SliceSize, int InstBaseSize>
208 struct BufferSliceTail : public BufferSlice<SliceSize> {
209 Pool *data;
210 mozilla::Array<uint8_t, (SliceSize + (InstBaseSize * 8 - 1)) / (InstBaseSize * 8)> isBranch;
211 bool isNatural : 1;
212 BufferSliceTail *getNext() {
213 return (BufferSliceTail *)this->next;
214 }
215 BufferSliceTail() : data(nullptr), isNatural(true) {
216 memset(&isBranch[0], 0, sizeof(isBranch));
217 }
218 void markNextAsBranch() {
219 int idx = this->nodeSize / InstBaseSize;
220 isBranch[idx >> 3] |= 1 << (idx & 0x7);
221 }
222 bool isNextBranch() {
223 unsigned int size = this->nodeSize;
224 if (size >= SliceSize)
225 return false;
226 int idx = size / InstBaseSize;
227 return (isBranch[idx >> 3] >> (idx & 0x7)) & 1;
228 }
229 };
231 #if 0
232 static int getId() {
233 if (MaybeGetIonContext())
234 return MaybeGetIonContext()->getNextAssemblerId();
235 return NULL_ID;
236 }
237 #endif
238 static inline void spewEntry(uint8_t *ptr, int length) {
239 #if IS_LITTLE_ENDIAN
240 for (int idx = 0; idx < length; idx++) {
241 IonSpewCont(IonSpew_Pools, "%02x", ptr[length - idx - 1]);
242 if (((idx & 3) == 3) && (idx + 1 != length))
243 IonSpewCont(IonSpew_Pools, "_");
244 }
245 #else
246 for (int idx = 0; idx < length; idx++) {
247 IonSpewCont(IonSpew_Pools, "%02x", ptr[idx]);
248 if (((idx & 3) == 3) && (idx + 1 != length))
249 IonSpewCont(IonSpew_Pools, "_");
250 }
251 #endif
252 }
253 // NOTE: Adding in the ability to retroactively insert a pool has consequences!
254 // Most notably, Labels stop working. Normally, we create a label, later bind it.
255 // when the label is bound, We back-patch all previous references to the label with
256 // the correct offset. However, since a pool may be retroactively inserted, we don't
257 // actually know what the final offset is going to be until much later. This will
258 // happen (in some instances) after the pools have been finalized. Attempting to compute
259 // the correct offsets for branches as the pools are finalized is quite infeasible.
260 // Instead, I write *just* the number of instructions that will be jumped over, then
261 // when we go to copy the instructions into the executable buffer, fix up all of the
262 // offsets to include the pools. Since we have about 32 megabytes worth of offset,
263 // I am not very worried about the pools moving it out of range.
264 // Now, How exactly do we generate these? The first step is to identify which
265 // instructions are actually branches that need to be fixed up. A single bit
266 // per instruction should be enough to determine which ones are branches, but
267 // we have no guarantee that all instructions are the same size, so the start of
268 // each branch instruction will be marked with a bit (1 bit per byte).
269 // then we neet to call up to the assembler to determine what the offset of the branch
270 // is. The offset will be the number of instructions that are being skipped over
271 // along with any processor bias. We then need to calculate the offset, including pools
272 // and write that value into the buffer. At this point, we can write it into the
273 // executable buffer, or the AssemblerBuffer, and copy the data over later.
274 // Previously, This was all handled by the assembler, since the location
275 // and size of pools were always known as soon as its location had been reached.
277 // A class for indexing into constant pools.
278 // Each time a pool entry is added, one of these is generated.
279 // This can be supplied to read and write that entry after the fact.
280 // And it can be used to get the address of the entry once the buffer
281 // has been finalized, and an executable copy allocated.
283 template <int SliceSize, int InstBaseSize, class Inst, class Asm, int poolKindBits>
284 struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst> {
285 private:
286 mozilla::Array<int, 1 << poolKindBits> entryCount;
287 static const int offsetBits = 32 - poolKindBits;
288 public:
290 class PoolEntry {
291 template <int ss, int ibs, class i, class a, int pkb>
292 friend struct AssemblerBufferWithConstantPool;
293 uint32_t offset_ : offsetBits;
294 uint32_t kind_ : poolKindBits;
295 PoolEntry(int offset, int kind) : offset_(offset), kind_(kind) {
296 }
297 public:
298 uint32_t encode() {
299 uint32_t ret;
300 memcpy(&ret, this, sizeof(uint32_t));
301 return ret;
302 }
303 PoolEntry(uint32_t bits) : offset_(((1u << offsetBits) - 1) & bits),
304 kind_(bits >> offsetBits) {
305 }
306 PoolEntry() : offset_((1u << offsetBits) - 1), kind_((1u << poolKindBits) - 1) {
307 }
309 uint32_t poolKind() const {
310 return kind_;
311 }
312 uint32_t offset() const {
313 return offset_;
314 }
315 };
316 private:
317 typedef BufferSliceTail<SliceSize, InstBaseSize> BufferSlice;
318 typedef AssemblerBuffer<SliceSize, Inst> Parent;
320 // The size of a guard instruction
321 const int guardSize;
322 // The size of the header that is put at the beginning of a full pool
323 const int headerSize;
324 // The size of a footer that is put in a pool after it is full.
325 const int footerSize;
326 // the number of sub-pools that we can allocate into.
327 static const int numPoolKinds = 1 << poolKindBits;
329 Pool *pools;
331 // The buffer should be aligned to this address.
332 const int instBufferAlign;
334 // the number of times we've dumped the pool.
335 int numDumps;
336 struct PoolInfo {
337 int offset; // the number of instructions before the start of the pool
338 int size; // the size of the pool, including padding
339 int finalPos; // the end of the buffer, in bytes from the beginning of the buffer
340 BufferSlice *slice;
341 };
342 PoolInfo *poolInfo;
343 // we need to keep track of how large the pools are, so we can allocate
344 // enough space for them later. This should include any amount of padding
345 // necessary to keep the pools aligned.
346 int poolSize;
347 // The Assembler should set this to true if it does not want us to dump a pool here
348 int canNotPlacePool;
349 // Are we filling up the forwards or backwards pools?
350 bool inBackref;
351 // Cache the last place we saw an opportunity to dump the pool
352 BufferOffset perforation;
353 BufferSlice *perforatedNode;
354 public:
355 int id;
356 private:
357 static const int logBasePoolInfo = 3;
358 BufferSlice ** getHead() {
359 return (BufferSlice**)&this->head;
360 }
361 BufferSlice ** getTail() {
362 return (BufferSlice**)&this->tail;
363 }
365 virtual BufferSlice *newSlice(LifoAlloc &a) {
366 BufferSlice *tmp = static_cast<BufferSlice*>(a.alloc(sizeof(BufferSlice)));
367 if (!tmp) {
368 this->m_oom = true;
369 return nullptr;
370 }
371 new (tmp) BufferSlice;
372 return tmp;
373 }
374 public:
375 AssemblerBufferWithConstantPool(int guardSize_, int headerSize_, int footerSize_, Pool *pools_, int instBufferAlign_)
376 : guardSize(guardSize_), headerSize(headerSize_),
377 footerSize(footerSize_),
378 pools(pools_),
379 instBufferAlign(instBufferAlign_), numDumps(0),
380 poolInfo(nullptr),
381 poolSize(0), canNotPlacePool(0), inBackref(false),
382 perforatedNode(nullptr), id(-1)
383 {
384 for (int idx = 0; idx < numPoolKinds; idx++) {
385 entryCount[idx] = 0;
386 }
387 }
389 // We need to wait until an AutoIonContextAlloc is created by the
390 // IonMacroAssembler, before allocating any space.
391 void initWithAllocator() {
392 poolInfo = static_cast<PoolInfo*>(this->LifoAlloc_.alloc(sizeof(PoolInfo) * (1 << logBasePoolInfo)));
393 }
395 const PoolInfo & getInfo(int x) const {
396 static const PoolInfo nil = {0,0,0};
397 if (x < 0 || x >= numDumps)
398 return nil;
399 return poolInfo[x];
400 }
401 void executableCopy(uint8_t *dest_) {
402 if (this->oom())
403 return;
404 // TODO: only do this when the pool actually has a value in it
405 flushPool();
406 for (int idx = 0; idx < numPoolKinds; idx++) {
407 JS_ASSERT(pools[idx].numEntries == 0 && pools[idx].other->numEntries == 0);
408 }
409 typedef mozilla::Array<uint8_t, InstBaseSize> Chunk;
410 mozilla::DebugOnly<Chunk *> start = (Chunk*)dest_;
411 Chunk *dest = (Chunk*)(((uint32_t)dest_ + instBufferAlign - 1) & ~(instBufferAlign -1));
412 int curIndex = 0;
413 int curInstOffset = 0;
414 JS_ASSERT(start == dest);
415 for (BufferSlice * cur = *getHead(); cur != nullptr; cur = cur->getNext()) {
416 Chunk *src = (Chunk*)&cur->instructions;
417 for (unsigned int idx = 0; idx <cur->size()/InstBaseSize;
418 idx++, curInstOffset += InstBaseSize) {
419 // Is the current instruction a branch?
420 if (cur->isBranch[idx >> 3] & (1<<(idx&7))) {
421 // It's a branch. fix up the branchiness!
422 patchBranch((Inst*)&src[idx], curIndex, BufferOffset(curInstOffset));
423 }
424 memcpy(&dest[idx], &src[idx], sizeof(Chunk));
425 }
426 dest+=cur->size()/InstBaseSize;
427 if (cur->data != nullptr) {
428 // have the repatcher move on to the next pool
429 curIndex ++;
430 // loop over all of the pools, copying them into place.
431 uint8_t *poolDest = (uint8_t*)dest;
432 Asm::writePoolHeader(poolDest, cur->data, cur->isNatural);
433 poolDest += headerSize;
434 for (int idx = 0; idx < numPoolKinds; idx++) {
435 Pool *curPool = &cur->data[idx];
436 // align the pool.
437 poolDest = curPool->align(poolDest);
438 memcpy(poolDest, curPool->poolData, curPool->immSize * curPool->numEntries);
439 poolDest += curPool->immSize * curPool->numEntries;
440 }
441 // now go over the whole list backwards, and copy in the reverse portions
442 for (int idx = numPoolKinds-1; idx >= 0; idx--) {
443 Pool *curPool = cur->data[idx].other;
444 // align the pool.
445 poolDest = curPool->align(poolDest);
446 memcpy(poolDest, curPool->poolData, curPool->immSize * curPool->numEntries);
447 poolDest += curPool->immSize * curPool->numEntries;
448 }
449 // write a footer in place
450 Asm::writePoolFooter(poolDest, cur->data, cur->isNatural);
451 poolDest += footerSize;
452 // at this point, poolDest had better still be aligned to a chunk boundary.
453 dest = (Chunk*) poolDest;
454 }
455 }
456 }
458 BufferOffset insertEntry(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data, PoolEntry *pe = nullptr) {
459 if (this->oom() && !this->bail())
460 return BufferOffset();
461 int token;
462 if (p != nullptr) {
463 int poolId = p - pools;
464 const char sigil = inBackref ? 'B' : 'F';
466 IonSpew(IonSpew_Pools, "[%d]{%c} Inserting entry into pool %d", id, sigil, poolId);
467 IonSpewStart(IonSpew_Pools, "[%d] data is: 0x", id);
468 spewEntry(data, p->immSize);
469 IonSpewFin(IonSpew_Pools);
470 }
471 // insert the pool value
472 if (inBackref)
473 token = insertEntryBackwards(instSize, inst, p, data);
474 else
475 token = insertEntryForwards(instSize, inst, p, data);
476 // now to get an instruction to write
477 PoolEntry retPE;
478 if (p != nullptr) {
479 if (this->oom())
480 return BufferOffset();
481 int poolId = p - pools;
482 IonSpew(IonSpew_Pools, "[%d] Entry has token %d, offset ~%d", id, token, size());
483 Asm::insertTokenIntoTag(instSize, inst, token);
484 JS_ASSERT(poolId < (1 << poolKindBits));
485 JS_ASSERT(poolId >= 0);
486 // Figure out the offset within like-kinded pool entries
487 retPE = PoolEntry(entryCount[poolId], poolId);
488 entryCount[poolId]++;
489 }
490 // Now inst is a valid thing to insert into the instruction stream
491 if (pe != nullptr)
492 *pe = retPE;
493 return this->putBlob(instSize, inst);
494 }
496 uint32_t insertEntryBackwards(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data) {
497 // unlike the forward case, inserting an instruction without inserting
498 // anything into a pool after a pool has been placed, we don't affect
499 // anything relevant, so we can skip this check entirely!
501 if (p == nullptr)
502 return INT_MIN;
503 // TODO: calculating offsets for the alignment requirements is *hard*
504 // Instead, assume that we always add the maximum.
505 int poolOffset = footerSize;
506 Pool *cur, *tmp;
507 // NOTE: we want to process the pools from last to first.
508 // Since the last pool is pools[0].other, and the first pool
509 // is pools[numPoolKinds-1], we actually want to process this
510 // forwards.
511 for (cur = pools; cur < &pools[numPoolKinds]; cur++) {
512 // fetch the pool for the backwards half.
513 tmp = cur->other;
514 if (p == cur)
515 tmp->updateLimiter(this->nextOffset());
517 if (tmp->checkFullBackref(poolOffset, perforation.getOffset())) {
518 // uh-oh, the backwards pool is full. Time to finalize it, and
519 // switch to a new forward pool.
520 if (p != nullptr)
521 IonSpew(IonSpew_Pools, "[%d]Inserting pool entry caused a spill", id);
522 else
523 IonSpew(IonSpew_Pools, "[%d]Inserting instruction(%d) caused a spill", id, size());
525 this->finishPool();
526 if (this->oom())
527 return uint32_t(-1);
528 return this->insertEntryForwards(instSize, inst, p, data);
529 }
530 // when moving back to front, calculating the alignment is hard, just be
531 // conservative with it.
532 poolOffset += tmp->immSize * tmp->numEntries + tmp->getAlignment();
533 if (p == tmp) {
534 poolOffset += tmp->immSize;
535 }
536 }
537 return p->numEntries + p->other->insertEntry(data, this->nextOffset(), this->LifoAlloc_);
538 }
540 // Simultaneously insert an instSized instruction into the stream,
541 // and an entry into the pool. There are many things that can happen.
542 // 1) the insertion goes as planned
543 // 2) inserting an instruction pushes a previous pool-reference out of range, forcing a dump
544 // 2a) there isn't a reasonable save point in the instruction stream. We need to save room for
545 // a guard instruction to branch over the pool.
546 int insertEntryForwards(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data) {
547 // Advance the "current offset" by an inst, so everyone knows what their offset should be.
548 uint32_t nextOffset = this->size() + instSize;
549 uint32_t poolOffset = nextOffset;
550 Pool *tmp;
551 // If we need a guard instruction, reserve space for that.
552 if (!perforatedNode)
553 poolOffset += guardSize;
554 // Also, take into account the size of the header that will be placed *after*
555 // the guard instruction
556 poolOffset += headerSize;
558 // Perform the necessary range checks.
559 for (tmp = pools; tmp < &pools[numPoolKinds]; tmp++) {
560 // The pool may wish for a particular alignment, Let's give it one.
561 JS_ASSERT((tmp->getAlignment() & (tmp->getAlignment() - 1)) == 0);
562 // The pool only needs said alignment *if* there are any entries in the pool
563 // WARNING: the pool needs said alignment if there are going to be entries in
564 // the pool after this entry has been inserted
565 if (p == tmp)
566 poolOffset = tmp->forceAlign(poolOffset);
567 else
568 poolOffset = tmp->align(poolOffset);
570 // If we're at the pool we want to insert into, find a new limiter
571 // before we do the range check.
572 if (p == tmp) {
573 p->updateLimiter(BufferOffset(nextOffset));
574 }
575 if (tmp->checkFull(poolOffset)) {
576 // uh-oh. DUMP DUMP DUMP
577 if (p != nullptr)
578 IonSpew(IonSpew_Pools, "[%d] Inserting pool entry caused a spill", id);
579 else
580 IonSpew(IonSpew_Pools, "[%d] Inserting instruction(%d) caused a spill", id, size());
582 this->dumpPool();
583 return this->insertEntryBackwards(instSize, inst, p, data);
584 }
585 // include the size of this pool in the running total
586 if (p == tmp) {
587 nextOffset += tmp->immSize;
588 }
589 nextOffset += tmp->immSize * tmp->numEntries;
590 }
591 if (p == nullptr) {
592 return INT_MIN;
593 }
594 return p->insertEntry(data, this->nextOffset(), this->LifoAlloc_);
595 }
596 BufferOffset putInt(uint32_t value) {
597 return insertEntry(sizeof(uint32_t) / sizeof(uint8_t), (uint8_t*)&value, nullptr, nullptr);
598 }
599 // Mark the current section as an area where we can
600 // later go to dump a pool
601 void perforate() {
602 // If we're filling the backrefrences, we don't want to start looking for a new dumpsite.
603 if (inBackref)
604 return;
605 if (canNotPlacePool)
606 return;
607 // If there is nothing in the pool, then it is strictly disadvantageous
608 // to attempt to place a pool here
609 bool empty = true;
610 for (int i = 0; i < numPoolKinds; i++) {
611 if (pools[i].numEntries != 0) {
612 empty = false;
613 break;
614 }
615 }
616 if (empty)
617 return;
618 perforatedNode = *getTail();
619 perforation = this->nextOffset();
620 Parent::perforate();
621 IonSpew(IonSpew_Pools, "[%d] Adding a perforation at offset %d", id, perforation.getOffset());
622 }
624 // After a pool is finished, no more elements may be added to it. During this phase, we
625 // will know the exact offsets to the pool entries, and those values should be written into
626 // the given instructions.
627 PoolInfo getPoolData() const {
628 int prevOffset = getInfo(numDumps-1).offset;
629 int prevEnd = getInfo(numDumps-1).finalPos;
630 // calculate the offset of the start of this pool;
631 int perfOffset = perforation.assigned() ?
632 perforation.getOffset() :
633 this->nextOffset().getOffset() + this->guardSize;
634 int initOffset = prevEnd + (perfOffset - prevOffset);
635 int finOffset = initOffset;
636 bool poolIsEmpty = true;
637 for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
638 if (pools[poolIdx].numEntries != 0) {
639 poolIsEmpty = false;
640 break;
641 }
642 if (pools[poolIdx].other != nullptr && pools[poolIdx].other->numEntries != 0) {
643 poolIsEmpty = false;
644 break;
645 }
646 }
647 if (!poolIsEmpty) {
648 finOffset += headerSize;
649 for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
650 finOffset=pools[poolIdx].align(finOffset);
651 finOffset+=pools[poolIdx].numEntries * pools[poolIdx].immSize;
652 }
653 // And compute the necessary adjustments for the second half of the pool.
654 for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) {
655 finOffset=pools[poolIdx].other->align(finOffset);
656 finOffset+=pools[poolIdx].other->numEntries * pools[poolIdx].other->immSize;
657 }
658 finOffset += footerSize;
659 }
661 PoolInfo ret;
662 ret.offset = perfOffset;
663 ret.size = finOffset - initOffset;
664 ret.finalPos = finOffset;
665 ret.slice = perforatedNode;
666 return ret;
667 }
668 void finishPool() {
669 // This function should only be called while the backwards half of the pool
670 // is being filled in. The backwards half of the pool is always in a state
671 // where it is sane. Everything that needs to be done here is for "sanity's sake".
672 // The per-buffer pools need to be reset, and we need to record the size of the pool.
673 IonSpew(IonSpew_Pools, "[%d] Finishing pool %d", id, numDumps);
674 JS_ASSERT(inBackref);
675 PoolInfo newPoolInfo = getPoolData();
676 if (newPoolInfo.size == 0) {
677 // The code below also creates a new pool, but that is not necessary, since
678 // the pools have not been modified at all.
679 new (&perforation) BufferOffset();
680 perforatedNode = nullptr;
681 inBackref = false;
682 IonSpew(IonSpew_Pools, "[%d] Aborting because the pool is empty", id);
683 // Bail out early, since we don't want to even pretend these pools exist.
684 return;
685 }
686 JS_ASSERT(perforatedNode != nullptr);
687 if (numDumps >= (1<<logBasePoolInfo) && (numDumps & (numDumps-1)) == 0) {
688 // need to resize.
689 PoolInfo *tmp = static_cast<PoolInfo*>(this->LifoAlloc_.alloc(sizeof(PoolInfo) * numDumps * 2));
690 if (tmp == nullptr) {
691 this->fail_oom();
692 return;
693 }
694 memcpy(tmp, poolInfo, sizeof(PoolInfo) * numDumps);
695 poolInfo = tmp;
697 }
699 // In order to figure out how to fix up the loads for the second half of the pool
700 // we need to find where the bits of the pool that have been implemented end.
701 int poolOffset = perforation.getOffset();
702 int magicAlign = getInfo(numDumps-1).finalPos - getInfo(numDumps-1).offset;
703 poolOffset += magicAlign;
704 poolOffset += headerSize;
705 for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
706 poolOffset=pools[poolIdx].align(poolOffset);
707 poolOffset+=pools[poolIdx].numEntries * pools[poolIdx].immSize;
708 }
709 mozilla::Array<LoadOffsets, 1 << poolKindBits> outcasts;
710 mozilla::Array<uint8_t *, 1 << poolKindBits> outcastEntries;
711 // All of the pool loads referred to by this code are going to
712 // need fixing up here.
713 int skippedBytes = 0;
714 for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) {
715 Pool *p = pools[poolIdx].other;
716 JS_ASSERT(p != nullptr);
717 unsigned int idx = p->numEntries-1;
718 // Allocate space for tracking information that needs to be propagated to the next pool
719 // as well as space for quickly updating the pool entries in the current pool to remove
720 // the entries that don't actually fit. I probably should change this over to a vector
721 outcastEntries[poolIdx] = new uint8_t[p->getPoolSize()];
722 bool *preservedEntries = new bool[p->numEntries];
723 // Hacks on top of Hacks!
724 // the patching code takes in the address of the instruction to be patched,
725 // and the "address" of the element in the pool that we want to load.
726 // However, since the code isn't actually in an array, we need to lie about
727 // the address that the pool is in. Furthermore, since the offsets are
728 // technically from the beginning of the FORWARD reference section, we have
729 // to lie to ourselves about where this pool starts in order to make sure
730 // the distance into the pool is interpreted correctly.
731 // There is a more elegant way to fix this that will need to be implemented
732 // eventually. We will want to provide the fixup function with a method to
733 // convert from a 'token' into a pool offset.
734 poolOffset = p->align(poolOffset);
735 int numSkips = 0;
736 int fakePoolOffset = poolOffset - pools[poolIdx].numEntries * pools[poolIdx].immSize;
737 for (BufferOffset *iter = p->loadOffsets.end()-1;
738 iter != p->loadOffsets.begin()-1; --iter, --idx)
739 {
741 IonSpew(IonSpew_Pools, "[%d] Linking entry %d in pool %d", id, idx+ pools[poolIdx].numEntries, poolIdx);
742 JS_ASSERT(iter->getOffset() >= perforation.getOffset());
743 // Everything here is known, we can safely do the necessary substitutions
744 Inst * inst = this->getInst(*iter);
745 // Manually compute the offset, including a possible bias.
746 // Also take into account the whole size of the pool that is being placed.
747 int codeOffset = fakePoolOffset - iter->getOffset() - newPoolInfo.size + numSkips * p->immSize - skippedBytes;
748 // That is, patchConstantPoolLoad wants to be handed the address of the
749 // pool entry that is being loaded. We need to do a non-trivial amount
750 // of math here, since the pool that we've made does not actually reside there
751 // in memory.
752 IonSpew(IonSpew_Pools, "[%d] Fixing offset to %d", id, codeOffset - magicAlign);
753 if (!Asm::patchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign)) {
754 // NOTE: if removing this entry happens to change the alignment of the next
755 // block, chances are you will have a bad time.
756 // ADDENDUM: this CANNOT happen on ARM, because the only elements that
757 // fall into this case are doubles loaded via vfp, but they will also be
758 // the last pool, which means it cannot affect the alignment of any other
759 // Sub Pools.
760 IonSpew(IonSpew_Pools, "[%d]***Offset was still out of range!***", id, codeOffset - magicAlign);
761 IonSpew(IonSpew_Pools, "[%d] Too complicated; bailingp", id);
762 this->fail_bail();
763 // only free up to the current offset
764 for (int pi = poolIdx; pi < numPoolKinds; pi++)
765 delete[] outcastEntries[pi];
766 delete[] preservedEntries;
767 return;
768 } else {
769 preservedEntries[idx] = true;
770 }
771 }
772 // remove the elements of the pool that should not be there (YAY, MEMCPY)
773 unsigned int idxDest = 0;
774 // If no elements were skipped, no expensive copy is necessary.
775 if (numSkips != 0) {
776 for (idx = 0; idx < p->numEntries; idx++) {
777 if (preservedEntries[idx]) {
778 if (idx != idxDest) {
779 memcpy(&p->poolData[idxDest * p->immSize],
780 &p->poolData[idx * p->immSize],
781 p->immSize);
782 }
783 idxDest++;
784 }
785 }
786 p->numEntries -= numSkips;
787 }
788 poolOffset += p->numEntries * p->immSize;
789 delete[] preservedEntries;
790 preservedEntries = nullptr;
791 }
792 // bind the current pool to the perforation point.
793 Pool **tmp = &perforatedNode->data;
794 *tmp = static_cast<Pool*>(this->LifoAlloc_.alloc(sizeof(Pool) * numPoolKinds));
795 if (tmp == nullptr) {
796 this->fail_oom();
797 for (int pi = 0; pi < numPoolKinds; pi++)
798 delete[] outcastEntries[pi];
799 return;
800 }
801 // The above operations may have changed the size of pools!
802 // recalibrate the size of the pool.
803 newPoolInfo = getPoolData();
804 poolInfo[numDumps] = newPoolInfo;
805 poolSize += poolInfo[numDumps].size;
806 numDumps++;
808 memcpy(*tmp, pools, sizeof(Pool) * numPoolKinds);
810 // reset everything to the state that it was in when we started
811 for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
812 if (!pools[poolIdx].reset(this->LifoAlloc_)) {
813 this->fail_oom();
814 for (int pi = 0; pi < numPoolKinds; pi++)
815 delete[] outcastEntries[pi];
816 return;
817 }
818 }
819 new (&perforation) BufferOffset();
820 perforatedNode = nullptr;
821 inBackref = false;
823 // Now that the backwards pool has been emptied, and a new forward pool
824 // has been allocated, it is time to populate the new forward pool with
825 // any entries that couldn't fit in the backwards pool.
826 for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
827 // Technically, the innermost pool will never have this issue, but it is easier
828 // to just handle this case.
829 // Since the pool entry was filled back-to-front, and in the next buffer, the elements
830 // should be front-to-back, this insertion also needs to proceed backwards
831 int idx = outcasts[poolIdx].length();
832 for (BufferOffset *iter = outcasts[poolIdx].end()-1;
833 iter != outcasts[poolIdx].begin()-1;
834 --iter, --idx) {
835 pools[poolIdx].updateLimiter(*iter);
836 Inst *inst = this->getInst(*iter);
837 Asm::insertTokenIntoTag(pools[poolIdx].instSize, (uint8_t*)inst, outcasts[poolIdx].end()-1-iter);
838 pools[poolIdx].insertEntry(&outcastEntries[poolIdx][idx*pools[poolIdx].immSize], *iter, this->LifoAlloc_);
839 }
840 delete[] outcastEntries[poolIdx];
841 }
842 // this (*2) is not technically kosher, but I want to get this bug fixed.
843 // It should actually be guardSize + the size of the instruction that we're attempting
844 // to insert. Unfortunately that vaue is never passed in. On ARM, these instructions
845 // are always 4 bytes, so guardSize is legit to use.
846 poolOffset = this->size() + guardSize * 2;
847 poolOffset += headerSize;
848 for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
849 // There can still be an awkward situation where the element that triggered the
850 // initial dump didn't fit into the pool backwards, and now, still does not fit into
851 // this pool. Now it is necessary to go and dump this pool (note: this is almost
852 // certainly being called from dumpPool()).
853 poolOffset = pools[poolIdx].align(poolOffset);
854 if (pools[poolIdx].checkFull(poolOffset)) {
855 // ONCE AGAIN, UH-OH, TIME TO BAIL
856 dumpPool();
857 break;
858 }
859 poolOffset += pools[poolIdx].getPoolSize();
860 }
861 }
863 void dumpPool() {
864 JS_ASSERT(!inBackref);
865 IonSpew(IonSpew_Pools, "[%d] Attempting to dump the pool", id);
866 PoolInfo newPoolInfo = getPoolData();
867 if (newPoolInfo.size == 0) {
868 // If there is no data in the pool being dumped, don't dump anything.
869 inBackref = true;
870 IonSpew(IonSpew_Pools, "[%d]Abort, no pool data", id);
871 return;
872 }
874 IonSpew(IonSpew_Pools, "[%d] Dumping %d bytes", id, newPoolInfo.size);
875 if (!perforation.assigned()) {
876 IonSpew(IonSpew_Pools, "[%d] No Perforation point selected, generating a new one", id);
877 // There isn't a perforation here, we need to dump the pool with a guard.
878 BufferOffset branch = this->nextOffset();
879 bool shouldMarkAsBranch = this->isNextBranch();
880 this->markNextAsBranch();
881 this->putBlob(guardSize, nullptr);
882 BufferOffset afterPool = this->nextOffset();
883 Asm::writePoolGuard(branch, this->getInst(branch), afterPool);
884 markGuard();
885 perforatedNode->isNatural = false;
886 if (shouldMarkAsBranch)
887 this->markNextAsBranch();
888 }
890 // We have a perforation. Time to cut the instruction stream, patch in the pool
891 // and possibly re-arrange the pool to accomodate its new location.
892 int poolOffset = perforation.getOffset();
893 int magicAlign = getInfo(numDumps-1).finalPos - getInfo(numDumps-1).offset;
894 poolOffset += magicAlign;
895 poolOffset += headerSize;
896 for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
897 mozilla::DebugOnly<bool> beforePool = true;
898 Pool *p = &pools[poolIdx];
899 // Any entries that happened to be after the place we put our pool will need to be
900 // switched from the forward-referenced pool to the backward-refrenced pool.
901 int idx = 0;
902 for (BufferOffset *iter = p->loadOffsets.begin();
903 iter != p->loadOffsets.end(); ++iter, ++idx)
904 {
905 if (iter->getOffset() >= perforation.getOffset()) {
906 IonSpew(IonSpew_Pools, "[%d] Pushing entry %d in pool %d into the backwards section.", id, idx, poolIdx);
907 // insert this into the rear part of the pool.
908 int offset = idx * p->immSize;
909 p->other->insertEntry(&p->poolData[offset], BufferOffset(*iter), this->LifoAlloc_);
910 // update the limiting entry for this pool.
911 p->other->updateLimiter(*iter);
913 // Update the current pool to report fewer entries. They are now in the
914 // backwards section.
915 p->numEntries--;
916 beforePool = false;
917 } else {
918 JS_ASSERT(beforePool);
919 // align the pool offset to the alignment of this pool
920 // it already only aligns when the pool has data in it, but we want to not
921 // align when all entries will end up in the backwards half of the pool
922 poolOffset = p->align(poolOffset);
923 IonSpew(IonSpew_Pools, "[%d] Entry %d in pool %d is before the pool.", id, idx, poolIdx);
924 // Everything here is known, we can safely do the necessary substitutions
925 Inst * inst = this->getInst(*iter);
926 // We need to manually compute the offset, including a possible bias.
927 int codeOffset = poolOffset - iter->getOffset();
928 // That is, patchConstantPoolLoad wants to be handed the address of the
929 // pool entry that is being loaded. We need to do a non-trivial amount
930 // of math here, since the pool that we've made does not actually reside there
931 // in memory.
932 IonSpew(IonSpew_Pools, "[%d] Fixing offset to %d", id, codeOffset - magicAlign);
933 Asm::patchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign);
934 }
935 }
936 // Some number of entries have been positively identified as being
937 // in this section of the pool. Before processing the next pool,
938 // update the offset from the beginning of the buffer
939 poolOffset += p->numEntries * p->immSize;
940 }
941 poolOffset = footerSize;
942 inBackref = true;
943 for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) {
944 Pool *tmp = pools[poolIdx].other;
945 if (tmp->checkFullBackref(poolOffset, perforation.getOffset())) {
946 // GNAAAH. While we rotated elements into the back half, one of them filled up
947 // Now, dumping the back half is necessary...
948 finishPool();
949 break;
950 }
951 }
952 }
954 void flushPool() {
955 if (this->oom())
956 return;
957 IonSpew(IonSpew_Pools, "[%d] Requesting a pool flush", id);
958 if (!inBackref)
959 dumpPool();
960 finishPool();
961 }
962 void patchBranch(Inst *i, int curpool, BufferOffset branch) {
963 const Inst *ci = i;
964 ptrdiff_t offset = Asm::getBranchOffset(ci);
965 // If the offset is 0, then there is nothing to do.
966 if (offset == 0)
967 return;
968 int destOffset = branch.getOffset() + offset;
969 if (offset > 0) {
971 while (curpool < numDumps && poolInfo[curpool].offset <= destOffset) {
972 offset += poolInfo[curpool].size;
973 curpool++;
974 }
975 } else {
976 // Ignore the pool that comes next, since this is a backwards branch
977 curpool--;
978 while (curpool >= 0 && poolInfo[curpool].offset > destOffset) {
979 offset -= poolInfo[curpool].size;
980 curpool--;
981 }
982 // Can't assert anything here, since the first pool may be after the target.
983 }
984 Asm::retargetNearBranch(i, offset, false);
985 }
987 // Mark the next instruction as a valid guard. This means we can place a pool here.
988 void markGuard() {
989 // If we are in a no pool zone then there is no point in dogearing
990 // this branch as a place to go back to
991 if (canNotPlacePool)
992 return;
993 // There is no point in trying to grab a new slot if we've already
994 // found one and are in the process of filling it in.
995 if (inBackref)
996 return;
997 perforate();
998 }
999 void enterNoPool() {
1000 if (!canNotPlacePool && !perforation.assigned()) {
1001 // Embarassing mode: The Assembler requests the start of a no pool section
1002 // and there have been no valid places that a pool could be dumped thusfar.
1003 // If a pool were to fill up before this no-pool section ends, we need to go back
1004 // in the stream and enter a pool guard after the fact. This is feasable, but
1005 // for now, it is easier to just allocate a junk instruction, default it to a nop, and
1006 // finally, if the pool *is* needed, patch the nop to apool guard.
1007 // What the assembler requests:
1009 // #request no-pool zone
1010 // push pc
1011 // blx r12
1012 // #end no-pool zone
1014 // however, if we would need to insert a pool, and there is no perforation point...
1015 // so, actual generated code:
1017 // b next; <= perforation point
1018 // next:
1019 // #beginning of no pool zone
1020 // push pc
1021 // blx r12
1023 BufferOffset branch = this->nextOffset();
1024 this->markNextAsBranch();
1025 this->putBlob(guardSize, nullptr);
1026 BufferOffset afterPool = this->nextOffset();
1027 Asm::writePoolGuard(branch, this->getInst(branch), afterPool);
1028 markGuard();
1029 if (perforatedNode != nullptr)
1030 perforatedNode->isNatural = false;
1031 }
1032 canNotPlacePool++;
1033 }
1034 void leaveNoPool() {
1035 canNotPlacePool--;
1036 }
1037 int size() const {
1038 return uncheckedSize();
1039 }
1040 Pool *getPool(int idx) {
1041 return &pools[idx];
1042 }
1043 void markNextAsBranch() {
1044 // If the previous thing inserted was the last instruction of
1045 // the node, then whoops, we want to mark the first instruction of
1046 // the next node.
1047 this->ensureSpace(InstBaseSize);
1048 JS_ASSERT(*this->getTail() != nullptr);
1049 (*this->getTail())->markNextAsBranch();
1050 }
1051 bool isNextBranch() {
1052 JS_ASSERT(*this->getTail() != nullptr);
1053 return (*this->getTail())->isNextBranch();
1054 }
1056 int uncheckedSize() const {
1057 PoolInfo pi = getPoolData();
1058 int codeEnd = this->nextOffset().getOffset();
1059 return (codeEnd - pi.offset) + pi.finalPos;
1060 }
1061 ptrdiff_t curDumpsite;
1062 void resetCounter() {
1063 curDumpsite = 0;
1064 }
1065 ptrdiff_t poolSizeBefore(ptrdiff_t offset) const {
1066 int cur = 0;
1067 while(cur < numDumps && poolInfo[cur].offset <= offset)
1068 cur++;
1069 // poolInfo[curDumpsite] is now larger than the offset
1070 // either this is the first one, or the previous is the last one we care about
1071 if (cur == 0)
1072 return 0;
1073 return poolInfo[cur-1].finalPos - poolInfo[cur-1].offset;
1074 }
1076 private:
1077 void getPEPool(PoolEntry pe, Pool **retP, int32_t * retOffset, int32_t *poolNum) const {
1078 int poolKind = pe.poolKind();
1079 Pool *p = nullptr;
1080 uint32_t offset = pe.offset() * pools[poolKind].immSize;
1081 int idx;
1082 for (idx = 0; idx < numDumps; idx++) {
1083 p = &poolInfo[idx].slice->data[poolKind];
1084 if (p->getPoolSize() > offset)
1085 break;
1086 offset -= p->getPoolSize();
1087 p = p->other;
1088 if (p->getPoolSize() > offset)
1089 break;
1090 offset -= p->getPoolSize();
1091 p = nullptr;
1092 }
1093 if (poolNum != nullptr)
1094 *poolNum = idx;
1095 // If this offset is contained in any finished pool, forward or backwards, p now
1096 // points to that pool, if it is not in any pool (should be in the currently building pool)
1097 // then p is nullptr.
1098 if (p == nullptr) {
1099 p = &pools[poolKind];
1100 if (offset >= p->getPoolSize()) {
1101 p = p->other;
1102 offset -= p->getPoolSize();
1103 }
1104 }
1105 JS_ASSERT(p != nullptr);
1106 JS_ASSERT(offset < p->getPoolSize());
1107 *retP = p;
1108 *retOffset = offset;
1109 }
1110 uint8_t *getPoolEntry(PoolEntry pe) {
1111 Pool *p;
1112 int32_t offset;
1113 getPEPool(pe, &p, &offset, nullptr);
1114 return &p->poolData[offset];
1115 }
1116 size_t getPoolEntrySize(PoolEntry pe) {
1117 int idx = pe.poolKind();
1118 return pools[idx].immSize;
1119 }
1121 public:
1122 uint32_t poolEntryOffset(PoolEntry pe) const {
1123 Pool *realPool;
1124 // offset is in bytes, not entries.
1125 int32_t offset;
1126 int32_t poolNum;
1127 getPEPool(pe, &realPool, &offset, &poolNum);
1128 PoolInfo *pi = &poolInfo[poolNum];
1129 Pool *poolGroup = pi->slice->data;
1130 uint32_t start = pi->finalPos - pi->size + headerSize;
1131 /// The order of the pools is:
1132 // A B C C_Rev B_Rev A_Rev, so in the initial pass,
1133 // go through the pools forwards, and in the second pass
1134 // go through them in reverse order.
1135 for (int idx = 0; idx < numPoolKinds; idx++) {
1136 if (&poolGroup[idx] == realPool) {
1137 return start + offset;
1138 }
1139 start = poolGroup[idx].addPoolSize(start);
1140 }
1141 for (int idx = numPoolKinds-1; idx >= 0; idx--) {
1142 if (poolGroup[idx].other == realPool) {
1143 return start + offset;
1144 }
1145 start = poolGroup[idx].other->addPoolSize(start);
1146 }
1147 MOZ_ASSUME_UNREACHABLE("Entry is not in a pool");
1148 }
1149 void writePoolEntry(PoolEntry pe, uint8_t *buff) {
1150 size_t size = getPoolEntrySize(pe);
1151 uint8_t *entry = getPoolEntry(pe);
1152 memcpy(entry, buff, size);
1153 }
1154 void readPoolEntry(PoolEntry pe, uint8_t *buff) {
1155 size_t size = getPoolEntrySize(pe);
1156 uint8_t *entry = getPoolEntry(pe);
1157 memcpy(buff, entry, size);
1158 }
1160 };
1161 } // ion
1162 } // js
1163 #endif /* jit_shared_IonAssemblerBufferWithConstantPools_h */