|
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
|
2 * vim: set ts=8 sts=4 et sw=4 tw=99: |
|
3 * This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #ifndef vm_ObjectImpl_h |
|
8 #define vm_ObjectImpl_h |
|
9 |
|
10 #include "mozilla/Assertions.h" |
|
11 #include "mozilla/Attributes.h" |
|
12 |
|
13 #include <stdint.h> |
|
14 |
|
15 #include "jsfriendapi.h" |
|
16 #include "jsinfer.h" |
|
17 #include "NamespaceImports.h" |
|
18 |
|
19 #include "gc/Barrier.h" |
|
20 #include "gc/Heap.h" |
|
21 #include "gc/Marking.h" |
|
22 #include "js/Value.h" |
|
23 #include "vm/NumericConversions.h" |
|
24 #include "vm/Shape.h" |
|
25 #include "vm/String.h" |
|
26 |
|
27 namespace js { |
|
28 |
|
29 class ObjectImpl; |
|
30 class Nursery; |
|
31 class Shape; |
|
32 |
|
33 /* |
|
34 * To really poison a set of values, using 'magic' or 'undefined' isn't good |
|
35 * enough since often these will just be ignored by buggy code (see bug 629974) |
|
36 * in debug builds and crash in release builds. Instead, we use a safe-for-crash |
|
37 * pointer. |
|
38 */ |
|
39 static MOZ_ALWAYS_INLINE void |
|
40 Debug_SetValueRangeToCrashOnTouch(Value *beg, Value *end) |
|
41 { |
|
42 #ifdef DEBUG |
|
43 for (Value *v = beg; v != end; ++v) |
|
44 v->setObject(*reinterpret_cast<JSObject *>(0x42)); |
|
45 #endif |
|
46 } |
|
47 |
|
48 static MOZ_ALWAYS_INLINE void |
|
49 Debug_SetValueRangeToCrashOnTouch(Value *vec, size_t len) |
|
50 { |
|
51 #ifdef DEBUG |
|
52 Debug_SetValueRangeToCrashOnTouch(vec, vec + len); |
|
53 #endif |
|
54 } |
|
55 |
|
56 static MOZ_ALWAYS_INLINE void |
|
57 Debug_SetValueRangeToCrashOnTouch(HeapValue *vec, size_t len) |
|
58 { |
|
59 #ifdef DEBUG |
|
60 Debug_SetValueRangeToCrashOnTouch((Value *) vec, len); |
|
61 #endif |
|
62 } |
|
63 |
|
64 static MOZ_ALWAYS_INLINE void |
|
65 Debug_SetSlotRangeToCrashOnTouch(HeapSlot *vec, uint32_t len) |
|
66 { |
|
67 #ifdef DEBUG |
|
68 Debug_SetValueRangeToCrashOnTouch((Value *) vec, len); |
|
69 #endif |
|
70 } |
|
71 |
|
72 static MOZ_ALWAYS_INLINE void |
|
73 Debug_SetSlotRangeToCrashOnTouch(HeapSlot *begin, HeapSlot *end) |
|
74 { |
|
75 #ifdef DEBUG |
|
76 Debug_SetValueRangeToCrashOnTouch((Value *) begin, end - begin); |
|
77 #endif |
|
78 } |
|
79 |
|
80 class ArrayObject; |
|
81 |
|
82 /* |
|
83 * ES6 20130308 draft 8.4.2.4 ArraySetLength. |
|
84 * |
|
85 * |id| must be "length", |attrs| are the attributes to be used for the newly- |
|
86 * changed length property, |value| is the value for the new length, and |
|
87 * |setterIsStrict| indicates whether invalid changes will cause a TypeError |
|
88 * to be thrown. |
|
89 */ |
|
90 template <ExecutionMode mode> |
|
91 extern bool |
|
92 ArraySetLength(typename ExecutionModeTraits<mode>::ContextType cx, |
|
93 Handle<ArrayObject*> obj, HandleId id, |
|
94 unsigned attrs, HandleValue value, bool setterIsStrict); |
|
95 |
|
96 /* |
|
97 * Elements header used for all objects. The elements component of such objects |
|
98 * offers an efficient representation for all or some of the indexed properties |
|
99 * of the object, using a flat array of Values rather than a shape hierarchy |
|
100 * stored in the object's slots. This structure is immediately followed by an |
|
101 * array of elements, with the elements member in an object pointing to the |
|
102 * beginning of that array (the end of this structure). |
|
103 * See below for usage of this structure. |
|
104 * |
|
105 * The sets of properties represented by an object's elements and slots |
|
106 * are disjoint. The elements contain only indexed properties, while the slots |
|
107 * can contain both named and indexed properties; any indexes in the slots are |
|
108 * distinct from those in the elements. If isIndexed() is false for an object, |
|
109 * all indexed properties (if any) are stored in the dense elements. |
|
110 * |
|
111 * Indexes will be stored in the object's slots instead of its elements in |
|
112 * the following case: |
|
113 * - there are more than MIN_SPARSE_INDEX slots total and the load factor |
|
114 * (COUNT / capacity) is less than 0.25 |
|
115 * - a property is defined that has non-default property attributes. |
|
116 * |
|
117 * We track these pieces of metadata for dense elements: |
|
118 * - The length property as a uint32_t, accessible for array objects with |
|
119 * ArrayObject::{length,setLength}(). This is unused for non-arrays. |
|
120 * - The number of element slots (capacity), gettable with |
|
121 * getDenseElementsCapacity(). |
|
122 * - The array's initialized length, accessible with |
|
123 * getDenseElementsInitializedLength(). |
|
124 * |
|
125 * Holes in the array are represented by MagicValue(JS_ELEMENTS_HOLE) values. |
|
126 * These indicate indexes which are not dense properties of the array. The |
|
127 * property may, however, be held by the object's properties. |
|
128 * |
|
129 * The capacity and length of an object's elements are almost entirely |
|
130 * unrelated! In general the length may be greater than, less than, or equal |
|
131 * to the capacity. The first case occurs with |new Array(100)|. The length |
|
132 * is 100, but the capacity remains 0 (indices below length and above capacity |
|
133 * must be treated as holes) until elements between capacity and length are |
|
134 * set. The other two cases are common, depending upon the number of elements |
|
135 * in an array and the underlying allocator used for element storage. |
|
136 * |
|
137 * The only case in which the capacity and length of an object's elements are |
|
138 * related is when the object is an array with non-writable length. In this |
|
139 * case the capacity is always less than or equal to the length. This permits |
|
140 * JIT code to optimize away the check for non-writable length when assigning |
|
141 * to possibly out-of-range elements: such code already has to check for |
|
142 * |index < capacity|, and fallback code checks for non-writable length. |
|
143 * |
|
144 * The initialized length of an object specifies the number of elements that |
|
145 * have been initialized. All elements above the initialized length are |
|
146 * holes in the object, and the memory for all elements between the initialized |
|
147 * length and capacity is left uninitialized. The initialized length is some |
|
148 * value less than or equal to both the object's length and the object's |
|
149 * capacity. |
|
150 * |
|
151 * There is flexibility in exactly the value the initialized length must hold, |
|
152 * e.g. if an array has length 5, capacity 10, completely empty, it is valid |
|
153 * for the initialized length to be any value between zero and 5, as long as |
|
154 * the in memory values below the initialized length have been initialized with |
|
155 * a hole value. However, in such cases we want to keep the initialized length |
|
156 * as small as possible: if the object is known to have no hole values below |
|
157 * its initialized length, then it is "packed" and can be accessed much faster |
|
158 * by JIT code. |
|
159 * |
|
160 * Elements do not track property creation order, so enumerating the elements |
|
161 * of an object does not necessarily visit indexes in the order they were |
|
162 * created. |
|
163 */ |
|
164 class ObjectElements |
|
165 { |
|
166 public: |
|
167 enum Flags { |
|
168 CONVERT_DOUBLE_ELEMENTS = 0x1, |
|
169 |
|
170 // Present only if these elements correspond to an array with |
|
171 // non-writable length; never present for non-arrays. |
|
172 NONWRITABLE_ARRAY_LENGTH = 0x2 |
|
173 }; |
|
174 |
|
175 private: |
|
176 friend class ::JSObject; |
|
177 friend class ObjectImpl; |
|
178 friend class ArrayObject; |
|
179 friend class Nursery; |
|
180 |
|
181 template <ExecutionMode mode> |
|
182 friend bool |
|
183 ArraySetLength(typename ExecutionModeTraits<mode>::ContextType cx, |
|
184 Handle<ArrayObject*> obj, HandleId id, |
|
185 unsigned attrs, HandleValue value, bool setterIsStrict); |
|
186 |
|
187 /* See Flags enum above. */ |
|
188 uint32_t flags; |
|
189 |
|
190 /* |
|
191 * Number of initialized elements. This is <= the capacity, and for arrays |
|
192 * is <= the length. Memory for elements above the initialized length is |
|
193 * uninitialized, but values between the initialized length and the proper |
|
194 * length are conceptually holes. |
|
195 */ |
|
196 uint32_t initializedLength; |
|
197 |
|
198 /* Number of allocated slots. */ |
|
199 uint32_t capacity; |
|
200 |
|
201 /* 'length' property of array objects, unused for other objects. */ |
|
202 uint32_t length; |
|
203 |
|
204 void staticAsserts() { |
|
205 static_assert(sizeof(ObjectElements) == VALUES_PER_HEADER * sizeof(Value), |
|
206 "Elements size and values-per-Elements mismatch"); |
|
207 } |
|
208 |
|
209 bool shouldConvertDoubleElements() const { |
|
210 return flags & CONVERT_DOUBLE_ELEMENTS; |
|
211 } |
|
212 void setShouldConvertDoubleElements() { |
|
213 flags |= CONVERT_DOUBLE_ELEMENTS; |
|
214 } |
|
215 void clearShouldConvertDoubleElements() { |
|
216 flags &= ~CONVERT_DOUBLE_ELEMENTS; |
|
217 } |
|
218 bool hasNonwritableArrayLength() const { |
|
219 return flags & NONWRITABLE_ARRAY_LENGTH; |
|
220 } |
|
221 void setNonwritableArrayLength() { |
|
222 flags |= NONWRITABLE_ARRAY_LENGTH; |
|
223 } |
|
224 |
|
225 public: |
|
226 MOZ_CONSTEXPR ObjectElements(uint32_t capacity, uint32_t length) |
|
227 : flags(0), initializedLength(0), capacity(capacity), length(length) |
|
228 {} |
|
229 |
|
230 HeapSlot *elements() { |
|
231 return reinterpret_cast<HeapSlot*>(uintptr_t(this) + sizeof(ObjectElements)); |
|
232 } |
|
233 static ObjectElements * fromElements(HeapSlot *elems) { |
|
234 return reinterpret_cast<ObjectElements*>(uintptr_t(elems) - sizeof(ObjectElements)); |
|
235 } |
|
236 |
|
237 static int offsetOfFlags() { |
|
238 return int(offsetof(ObjectElements, flags)) - int(sizeof(ObjectElements)); |
|
239 } |
|
240 static int offsetOfInitializedLength() { |
|
241 return int(offsetof(ObjectElements, initializedLength)) - int(sizeof(ObjectElements)); |
|
242 } |
|
243 static int offsetOfCapacity() { |
|
244 return int(offsetof(ObjectElements, capacity)) - int(sizeof(ObjectElements)); |
|
245 } |
|
246 static int offsetOfLength() { |
|
247 return int(offsetof(ObjectElements, length)) - int(sizeof(ObjectElements)); |
|
248 } |
|
249 |
|
250 static bool ConvertElementsToDoubles(JSContext *cx, uintptr_t elements); |
|
251 |
|
252 static const size_t VALUES_PER_HEADER = 2; |
|
253 }; |
|
254 |
|
255 /* Shared singleton for objects with no elements. */ |
|
256 extern HeapSlot *const emptyObjectElements; |
|
257 |
|
258 struct Class; |
|
259 struct GCMarker; |
|
260 struct ObjectOps; |
|
261 class Shape; |
|
262 |
|
263 class NewObjectCache; |
|
264 class TaggedProto; |
|
265 |
|
266 inline Value |
|
267 ObjectValue(ObjectImpl &obj); |
|
268 |
|
269 #ifdef DEBUG |
|
270 static inline bool |
|
271 IsObjectValueInCompartment(js::Value v, JSCompartment *comp); |
|
272 #endif |
|
273 |
|
274 /* |
|
275 * ObjectImpl specifies the internal implementation of an object. (In contrast |
|
276 * JSObject specifies an "external" interface, at the conceptual level of that |
|
277 * exposed in ECMAScript.) |
|
278 * |
|
279 * The |shape_| member stores the shape of the object, which includes the |
|
280 * object's class and the layout of all its properties. |
|
281 * |
|
282 * The |type_| member stores the type of the object, which contains its |
|
283 * prototype object and the possible types of its properties. |
|
284 * |
|
285 * The rest of the object stores its named properties and indexed elements. |
|
286 * These are stored separately from one another. Objects are followed by a |
|
287 * variable-sized array of values for inline storage, which may be used by |
|
288 * either properties of native objects (fixed slots), by elements (fixed |
|
289 * elements), or by other data for certain kinds of objects, such as |
|
290 * ArrayBufferObjects and TypedArrayObjects. |
|
291 * |
|
292 * Two native objects with the same shape are guaranteed to have the same |
|
293 * number of fixed slots. |
|
294 * |
|
295 * Named property storage can be split between fixed slots and a dynamically |
|
296 * allocated array (the slots member). For an object with N fixed slots, shapes |
|
297 * with slots [0..N-1] are stored in the fixed slots, and the remainder are |
|
298 * stored in the dynamic array. If all properties fit in the fixed slots, the |
|
299 * 'slots' member is nullptr. |
|
300 * |
|
301 * Elements are indexed via the 'elements' member. This member can point to |
|
302 * either the shared emptyObjectElements singleton, into the inline value array |
|
303 * (the address of the third value, to leave room for a ObjectElements header; |
|
304 * in this case numFixedSlots() is zero) or to a dynamically allocated array. |
|
305 * |
|
306 * Only certain combinations of slots and elements storage are possible. |
|
307 * |
|
308 * - For native objects, slots and elements may both be non-empty. The |
|
309 * slots may be either names or indexes; no indexed property will be in both |
|
310 * the slots and elements. |
|
311 * |
|
312 * - For non-native objects, slots and elements are both empty. |
|
313 * |
|
314 * The members of this class are currently protected; in the long run this will |
|
315 * will change so that some members are private, and only certain methods that |
|
316 * act upon them will be protected. |
|
317 */ |
|
318 class ObjectImpl : public gc::BarrieredCell<ObjectImpl> |
|
319 { |
|
320 friend Zone *js::gc::BarrieredCell<ObjectImpl>::zone() const; |
|
321 friend Zone *js::gc::BarrieredCell<ObjectImpl>::zoneFromAnyThread() const; |
|
322 |
|
323 protected: |
|
324 /* |
|
325 * Shape of the object, encodes the layout of the object's properties and |
|
326 * all other information about its structure. See vm/Shape.h. |
|
327 */ |
|
328 HeapPtrShape shape_; |
|
329 |
|
330 /* |
|
331 * The object's type and prototype. For objects with the LAZY_TYPE flag |
|
332 * set, this is the prototype's default 'new' type and can only be used |
|
333 * to get that prototype. |
|
334 */ |
|
335 HeapPtrTypeObject type_; |
|
336 |
|
337 HeapSlot *slots; /* Slots for object properties. */ |
|
338 HeapSlot *elements; /* Slots for object elements. */ |
|
339 |
|
340 friend bool |
|
341 ArraySetLength(JSContext *cx, Handle<ArrayObject*> obj, HandleId id, unsigned attrs, |
|
342 HandleValue value, bool setterIsStrict); |
|
343 |
|
344 private: |
|
345 static void staticAsserts() { |
|
346 static_assert(sizeof(ObjectImpl) == sizeof(shadow::Object), |
|
347 "shadow interface must match actual implementation"); |
|
348 static_assert(sizeof(ObjectImpl) % sizeof(Value) == 0, |
|
349 "fixed slots after an object must be aligned"); |
|
350 |
|
351 static_assert(offsetof(ObjectImpl, shape_) == offsetof(shadow::Object, shape), |
|
352 "shadow shape must match actual shape"); |
|
353 static_assert(offsetof(ObjectImpl, type_) == offsetof(shadow::Object, type), |
|
354 "shadow type must match actual type"); |
|
355 static_assert(offsetof(ObjectImpl, slots) == offsetof(shadow::Object, slots), |
|
356 "shadow slots must match actual slots"); |
|
357 static_assert(offsetof(ObjectImpl, elements) == offsetof(shadow::Object, _1), |
|
358 "shadow placeholder must match actual elements"); |
|
359 } |
|
360 |
|
361 JSObject * asObjectPtr() { return reinterpret_cast<JSObject *>(this); } |
|
362 const JSObject * asObjectPtr() const { return reinterpret_cast<const JSObject *>(this); } |
|
363 |
|
364 friend inline Value ObjectValue(ObjectImpl &obj); |
|
365 |
|
366 /* These functions are public, and they should remain public. */ |
|
367 |
|
368 public: |
|
369 TaggedProto getTaggedProto() const { |
|
370 return type_->proto(); |
|
371 } |
|
372 |
|
373 bool hasTenuredProto() const; |
|
374 |
|
375 const Class *getClass() const { |
|
376 return type_->clasp(); |
|
377 } |
|
378 |
|
379 static inline bool |
|
380 isExtensible(ExclusiveContext *cx, Handle<ObjectImpl*> obj, bool *extensible); |
|
381 |
|
382 // Indicates whether a non-proxy is extensible. Don't call on proxies! |
|
383 // This method really shouldn't exist -- but there are a few internal |
|
384 // places that want it (JITs and the like), and it'd be a pain to mark them |
|
385 // all as friends. |
|
386 bool nonProxyIsExtensible() const { |
|
387 MOZ_ASSERT(!isProxy()); |
|
388 |
|
389 // [[Extensible]] for ordinary non-proxy objects is an object flag. |
|
390 return !lastProperty()->hasObjectFlag(BaseShape::NOT_EXTENSIBLE); |
|
391 } |
|
392 |
|
393 #ifdef DEBUG |
|
394 bool isProxy() const; |
|
395 #endif |
|
396 |
|
397 // Attempt to change the [[Extensible]] bit on |obj| to false. Callers |
|
398 // must ensure that |obj| is currently extensible before calling this! |
|
399 static bool |
|
400 preventExtensions(JSContext *cx, Handle<ObjectImpl*> obj); |
|
401 |
|
402 HeapSlotArray getDenseElements() { |
|
403 JS_ASSERT(isNative()); |
|
404 return HeapSlotArray(elements); |
|
405 } |
|
406 const Value &getDenseElement(uint32_t idx) { |
|
407 JS_ASSERT(isNative()); |
|
408 MOZ_ASSERT(idx < getDenseInitializedLength()); |
|
409 return elements[idx]; |
|
410 } |
|
411 bool containsDenseElement(uint32_t idx) { |
|
412 JS_ASSERT(isNative()); |
|
413 return idx < getDenseInitializedLength() && !elements[idx].isMagic(JS_ELEMENTS_HOLE); |
|
414 } |
|
415 uint32_t getDenseInitializedLength() { |
|
416 JS_ASSERT(getClass()->isNative()); |
|
417 return getElementsHeader()->initializedLength; |
|
418 } |
|
419 uint32_t getDenseCapacity() { |
|
420 JS_ASSERT(getClass()->isNative()); |
|
421 return getElementsHeader()->capacity; |
|
422 } |
|
423 |
|
424 protected: |
|
425 #ifdef DEBUG |
|
426 void checkShapeConsistency(); |
|
427 #else |
|
428 void checkShapeConsistency() { } |
|
429 #endif |
|
430 |
|
431 Shape * |
|
432 replaceWithNewEquivalentShape(ThreadSafeContext *cx, |
|
433 Shape *existingShape, Shape *newShape = nullptr); |
|
434 |
|
435 enum GenerateShape { |
|
436 GENERATE_NONE, |
|
437 GENERATE_SHAPE |
|
438 }; |
|
439 |
|
440 bool setFlag(ExclusiveContext *cx, /*BaseShape::Flag*/ uint32_t flag, |
|
441 GenerateShape generateShape = GENERATE_NONE); |
|
442 bool clearFlag(ExclusiveContext *cx, /*BaseShape::Flag*/ uint32_t flag); |
|
443 |
|
444 bool toDictionaryMode(ThreadSafeContext *cx); |
|
445 |
|
446 private: |
|
447 friend class Nursery; |
|
448 |
|
449 /* |
|
450 * Get internal pointers to the range of values starting at start and |
|
451 * running for length. |
|
452 */ |
|
453 void getSlotRangeUnchecked(uint32_t start, uint32_t length, |
|
454 HeapSlot **fixedStart, HeapSlot **fixedEnd, |
|
455 HeapSlot **slotsStart, HeapSlot **slotsEnd) |
|
456 { |
|
457 MOZ_ASSERT(start + length >= start); |
|
458 |
|
459 uint32_t fixed = numFixedSlots(); |
|
460 if (start < fixed) { |
|
461 if (start + length < fixed) { |
|
462 *fixedStart = &fixedSlots()[start]; |
|
463 *fixedEnd = &fixedSlots()[start + length]; |
|
464 *slotsStart = *slotsEnd = nullptr; |
|
465 } else { |
|
466 uint32_t localCopy = fixed - start; |
|
467 *fixedStart = &fixedSlots()[start]; |
|
468 *fixedEnd = &fixedSlots()[start + localCopy]; |
|
469 *slotsStart = &slots[0]; |
|
470 *slotsEnd = &slots[length - localCopy]; |
|
471 } |
|
472 } else { |
|
473 *fixedStart = *fixedEnd = nullptr; |
|
474 *slotsStart = &slots[start - fixed]; |
|
475 *slotsEnd = &slots[start - fixed + length]; |
|
476 } |
|
477 } |
|
478 |
|
479 void getSlotRange(uint32_t start, uint32_t length, |
|
480 HeapSlot **fixedStart, HeapSlot **fixedEnd, |
|
481 HeapSlot **slotsStart, HeapSlot **slotsEnd) |
|
482 { |
|
483 MOZ_ASSERT(slotInRange(start + length, SENTINEL_ALLOWED)); |
|
484 getSlotRangeUnchecked(start, length, fixedStart, fixedEnd, slotsStart, slotsEnd); |
|
485 } |
|
486 |
|
487 protected: |
|
488 friend struct GCMarker; |
|
489 friend class Shape; |
|
490 friend class NewObjectCache; |
|
491 |
|
492 void invalidateSlotRange(uint32_t start, uint32_t length) { |
|
493 #ifdef DEBUG |
|
494 HeapSlot *fixedStart, *fixedEnd, *slotsStart, *slotsEnd; |
|
495 getSlotRange(start, length, &fixedStart, &fixedEnd, &slotsStart, &slotsEnd); |
|
496 Debug_SetSlotRangeToCrashOnTouch(fixedStart, fixedEnd); |
|
497 Debug_SetSlotRangeToCrashOnTouch(slotsStart, slotsEnd); |
|
498 #endif /* DEBUG */ |
|
499 } |
|
500 |
|
501 void initializeSlotRange(uint32_t start, uint32_t count); |
|
502 |
|
503 /* |
|
504 * Initialize a flat array of slots to this object at a start slot. The |
|
505 * caller must ensure that are enough slots. |
|
506 */ |
|
507 void initSlotRange(uint32_t start, const Value *vector, uint32_t length); |
|
508 |
|
509 /* |
|
510 * Copy a flat array of slots to this object at a start slot. Caller must |
|
511 * ensure there are enough slots in this object. |
|
512 */ |
|
513 void copySlotRange(uint32_t start, const Value *vector, uint32_t length); |
|
514 |
|
515 #ifdef DEBUG |
|
516 enum SentinelAllowed { |
|
517 SENTINEL_NOT_ALLOWED, |
|
518 SENTINEL_ALLOWED |
|
519 }; |
|
520 |
|
521 /* |
|
522 * Check that slot is in range for the object's allocated slots. |
|
523 * If sentinelAllowed then slot may equal the slot capacity. |
|
524 */ |
|
525 bool slotInRange(uint32_t slot, SentinelAllowed sentinel = SENTINEL_NOT_ALLOWED) const; |
|
526 #endif |
|
527 |
|
528 /* |
|
529 * Minimum size for dynamically allocated slots in normal Objects. |
|
530 * ArrayObjects don't use this limit and can have a lower slot capacity, |
|
531 * since they normally don't have a lot of slots. |
|
532 */ |
|
533 static const uint32_t SLOT_CAPACITY_MIN = 8; |
|
534 |
|
535 HeapSlot *fixedSlots() const { |
|
536 return reinterpret_cast<HeapSlot *>(uintptr_t(this) + sizeof(ObjectImpl)); |
|
537 } |
|
538 |
|
539 /* |
|
540 * These functions are currently public for simplicity; in the long run |
|
541 * it may make sense to make at least some of them private. |
|
542 */ |
|
543 |
|
544 public: |
|
545 Shape * lastProperty() const { |
|
546 MOZ_ASSERT(shape_); |
|
547 return shape_; |
|
548 } |
|
549 |
|
550 bool generateOwnShape(ThreadSafeContext *cx, js::Shape *newShape = nullptr) { |
|
551 return replaceWithNewEquivalentShape(cx, lastProperty(), newShape); |
|
552 } |
|
553 |
|
554 JSCompartment *compartment() const { |
|
555 return lastProperty()->base()->compartment(); |
|
556 } |
|
557 |
|
558 bool isNative() const { |
|
559 return lastProperty()->isNative(); |
|
560 } |
|
561 |
|
562 types::TypeObject *type() const { |
|
563 MOZ_ASSERT(!hasLazyType()); |
|
564 return typeRaw(); |
|
565 } |
|
566 |
|
567 types::TypeObject *typeRaw() const { |
|
568 return type_; |
|
569 } |
|
570 |
|
571 uint32_t numFixedSlots() const { |
|
572 return reinterpret_cast<const shadow::Object *>(this)->numFixedSlots(); |
|
573 } |
|
574 |
|
575 /* |
|
576 * Whether this is the only object which has its specified type. This |
|
577 * object will have its type constructed lazily as needed by analysis. |
|
578 */ |
|
579 bool hasSingletonType() const { |
|
580 return !!type_->singleton(); |
|
581 } |
|
582 |
|
583 /* |
|
584 * Whether the object's type has not been constructed yet. If an object |
|
585 * might have a lazy type, use getType() below, otherwise type(). |
|
586 */ |
|
587 bool hasLazyType() const { |
|
588 return type_->lazy(); |
|
589 } |
|
590 |
|
591 uint32_t slotSpan() const { |
|
592 if (inDictionaryMode()) |
|
593 return lastProperty()->base()->slotSpan(); |
|
594 return lastProperty()->slotSpan(); |
|
595 } |
|
596 |
|
597 /* Compute dynamicSlotsCount() for this object. */ |
|
598 uint32_t numDynamicSlots() const { |
|
599 return dynamicSlotsCount(numFixedSlots(), slotSpan(), getClass()); |
|
600 } |
|
601 |
|
602 |
|
603 Shape *nativeLookup(ExclusiveContext *cx, jsid id); |
|
604 Shape *nativeLookup(ExclusiveContext *cx, PropertyName *name) { |
|
605 return nativeLookup(cx, NameToId(name)); |
|
606 } |
|
607 |
|
608 bool nativeContains(ExclusiveContext *cx, jsid id) { |
|
609 return nativeLookup(cx, id) != nullptr; |
|
610 } |
|
611 bool nativeContains(ExclusiveContext *cx, PropertyName* name) { |
|
612 return nativeLookup(cx, name) != nullptr; |
|
613 } |
|
614 bool nativeContains(ExclusiveContext *cx, Shape* shape) { |
|
615 return nativeLookup(cx, shape->propid()) == shape; |
|
616 } |
|
617 |
|
618 /* Contextless; can be called from parallel code. */ |
|
619 Shape *nativeLookupPure(jsid id); |
|
620 Shape *nativeLookupPure(PropertyName *name) { |
|
621 return nativeLookupPure(NameToId(name)); |
|
622 } |
|
623 |
|
624 bool nativeContainsPure(jsid id) { |
|
625 return nativeLookupPure(id) != nullptr; |
|
626 } |
|
627 bool nativeContainsPure(PropertyName* name) { |
|
628 return nativeContainsPure(NameToId(name)); |
|
629 } |
|
630 bool nativeContainsPure(Shape* shape) { |
|
631 return nativeLookupPure(shape->propid()) == shape; |
|
632 } |
|
633 |
|
634 const JSClass *getJSClass() const { |
|
635 return Jsvalify(getClass()); |
|
636 } |
|
637 bool hasClass(const Class *c) const { |
|
638 return getClass() == c; |
|
639 } |
|
640 const ObjectOps *getOps() const { |
|
641 return &getClass()->ops; |
|
642 } |
|
643 |
|
644 /* |
|
645 * An object is a delegate if it is on another object's prototype or scope |
|
646 * chain, and therefore the delegate might be asked implicitly to get or |
|
647 * set a property on behalf of another object. Delegates may be accessed |
|
648 * directly too, as may any object, but only those objects linked after the |
|
649 * head of any prototype or scope chain are flagged as delegates. This |
|
650 * definition helps to optimize shape-based property cache invalidation |
|
651 * (see Purge{Scope,Proto}Chain in jsobj.cpp). |
|
652 */ |
|
653 bool isDelegate() const { |
|
654 return lastProperty()->hasObjectFlag(BaseShape::DELEGATE); |
|
655 } |
|
656 |
|
657 /* |
|
658 * Return true if this object is a native one that has been converted from |
|
659 * shared-immutable prototype-rooted shape storage to dictionary-shapes in |
|
660 * a doubly-linked list. |
|
661 */ |
|
662 bool inDictionaryMode() const { |
|
663 return lastProperty()->inDictionary(); |
|
664 } |
|
665 |
|
666 const Value &getSlot(uint32_t slot) const { |
|
667 MOZ_ASSERT(slotInRange(slot)); |
|
668 uint32_t fixed = numFixedSlots(); |
|
669 if (slot < fixed) |
|
670 return fixedSlots()[slot]; |
|
671 return slots[slot - fixed]; |
|
672 } |
|
673 |
|
674 const HeapSlot *getSlotAddressUnchecked(uint32_t slot) const { |
|
675 uint32_t fixed = numFixedSlots(); |
|
676 if (slot < fixed) |
|
677 return fixedSlots() + slot; |
|
678 return slots + (slot - fixed); |
|
679 } |
|
680 |
|
681 HeapSlot *getSlotAddressUnchecked(uint32_t slot) { |
|
682 const ObjectImpl *obj = static_cast<const ObjectImpl*>(this); |
|
683 return const_cast<HeapSlot*>(obj->getSlotAddressUnchecked(slot)); |
|
684 } |
|
685 |
|
686 HeapSlot *getSlotAddress(uint32_t slot) { |
|
687 /* |
|
688 * This can be used to get the address of the end of the slots for the |
|
689 * object, which may be necessary when fetching zero-length arrays of |
|
690 * slots (e.g. for callObjVarArray). |
|
691 */ |
|
692 MOZ_ASSERT(slotInRange(slot, SENTINEL_ALLOWED)); |
|
693 return getSlotAddressUnchecked(slot); |
|
694 } |
|
695 |
|
696 const HeapSlot *getSlotAddress(uint32_t slot) const { |
|
697 /* |
|
698 * This can be used to get the address of the end of the slots for the |
|
699 * object, which may be necessary when fetching zero-length arrays of |
|
700 * slots (e.g. for callObjVarArray). |
|
701 */ |
|
702 MOZ_ASSERT(slotInRange(slot, SENTINEL_ALLOWED)); |
|
703 return getSlotAddressUnchecked(slot); |
|
704 } |
|
705 |
|
706 HeapSlot &getSlotRef(uint32_t slot) { |
|
707 MOZ_ASSERT(slotInRange(slot)); |
|
708 return *getSlotAddress(slot); |
|
709 } |
|
710 |
|
711 const HeapSlot &getSlotRef(uint32_t slot) const { |
|
712 MOZ_ASSERT(slotInRange(slot)); |
|
713 return *getSlotAddress(slot); |
|
714 } |
|
715 |
|
716 HeapSlot &nativeGetSlotRef(uint32_t slot) { |
|
717 JS_ASSERT(isNative() && slot < slotSpan()); |
|
718 return getSlotRef(slot); |
|
719 } |
|
720 const Value &nativeGetSlot(uint32_t slot) const { |
|
721 JS_ASSERT(isNative() && slot < slotSpan()); |
|
722 return getSlot(slot); |
|
723 } |
|
724 |
|
725 void setSlot(uint32_t slot, const Value &value) { |
|
726 MOZ_ASSERT(slotInRange(slot)); |
|
727 MOZ_ASSERT(IsObjectValueInCompartment(value, compartment())); |
|
728 getSlotRef(slot).set(this->asObjectPtr(), HeapSlot::Slot, slot, value); |
|
729 } |
|
730 |
|
731 inline void setCrossCompartmentSlot(uint32_t slot, const Value &value) { |
|
732 MOZ_ASSERT(slotInRange(slot)); |
|
733 getSlotRef(slot).set(this->asObjectPtr(), HeapSlot::Slot, slot, value); |
|
734 } |
|
735 |
|
736 void initSlot(uint32_t slot, const Value &value) { |
|
737 MOZ_ASSERT(getSlot(slot).isUndefined()); |
|
738 MOZ_ASSERT(slotInRange(slot)); |
|
739 MOZ_ASSERT(IsObjectValueInCompartment(value, compartment())); |
|
740 initSlotUnchecked(slot, value); |
|
741 } |
|
742 |
|
743 void initCrossCompartmentSlot(uint32_t slot, const Value &value) { |
|
744 MOZ_ASSERT(getSlot(slot).isUndefined()); |
|
745 MOZ_ASSERT(slotInRange(slot)); |
|
746 initSlotUnchecked(slot, value); |
|
747 } |
|
748 |
|
749 void initSlotUnchecked(uint32_t slot, const Value &value) { |
|
750 getSlotAddressUnchecked(slot)->init(this->asObjectPtr(), HeapSlot::Slot, slot, value); |
|
751 } |
|
752 |
|
753 /* For slots which are known to always be fixed, due to the way they are allocated. */ |
|
754 |
|
755 HeapSlot &getFixedSlotRef(uint32_t slot) { |
|
756 MOZ_ASSERT(slot < numFixedSlots()); |
|
757 return fixedSlots()[slot]; |
|
758 } |
|
759 |
|
760 const Value &getFixedSlot(uint32_t slot) const { |
|
761 MOZ_ASSERT(slot < numFixedSlots()); |
|
762 return fixedSlots()[slot]; |
|
763 } |
|
764 |
|
765 void setFixedSlot(uint32_t slot, const Value &value) { |
|
766 MOZ_ASSERT(slot < numFixedSlots()); |
|
767 fixedSlots()[slot].set(this->asObjectPtr(), HeapSlot::Slot, slot, value); |
|
768 } |
|
769 |
|
770 void initFixedSlot(uint32_t slot, const Value &value) { |
|
771 MOZ_ASSERT(slot < numFixedSlots()); |
|
772 fixedSlots()[slot].init(this->asObjectPtr(), HeapSlot::Slot, slot, value); |
|
773 } |
|
774 |
|
775 /* |
|
776 * Get the number of dynamic slots to allocate to cover the properties in |
|
777 * an object with the given number of fixed slots and slot span. The slot |
|
778 * capacity is not stored explicitly, and the allocated size of the slot |
|
779 * array is kept in sync with this count. |
|
780 */ |
|
781 static uint32_t dynamicSlotsCount(uint32_t nfixed, uint32_t span, const Class *clasp); |
|
782 |
|
783 /* Memory usage functions. */ |
|
784 size_t tenuredSizeOfThis() const { |
|
785 return js::gc::Arena::thingSize(tenuredGetAllocKind()); |
|
786 } |
|
787 |
|
788 /* Elements accessors. */ |
|
789 |
|
790 ObjectElements * getElementsHeader() const { |
|
791 return ObjectElements::fromElements(elements); |
|
792 } |
|
793 |
|
794 inline HeapSlot *fixedElements() const { |
|
795 static_assert(2 * sizeof(Value) == sizeof(ObjectElements), |
|
796 "when elements are stored inline, the first two " |
|
797 "slots will hold the ObjectElements header"); |
|
798 return &fixedSlots()[2]; |
|
799 } |
|
800 |
|
801 #ifdef DEBUG |
|
802 bool canHaveNonEmptyElements(); |
|
803 #endif |
|
804 |
|
805 void setFixedElements() { |
|
806 JS_ASSERT(canHaveNonEmptyElements()); |
|
807 this->elements = fixedElements(); |
|
808 } |
|
809 |
|
810 inline bool hasDynamicElements() const { |
|
811 /* |
|
812 * Note: for objects with zero fixed slots this could potentially give |
|
813 * a spurious 'true' result, if the end of this object is exactly |
|
814 * aligned with the end of its arena and dynamic slots are allocated |
|
815 * immediately afterwards. Such cases cannot occur for dense arrays |
|
816 * (which have at least two fixed slots) and can only result in a leak. |
|
817 */ |
|
818 return !hasEmptyElements() && elements != fixedElements(); |
|
819 } |
|
820 |
|
821 inline bool hasFixedElements() const { |
|
822 return elements == fixedElements(); |
|
823 } |
|
824 |
|
825 inline bool hasEmptyElements() const { |
|
826 return elements == emptyObjectElements; |
|
827 } |
|
828 |
|
829 /* |
|
830 * Get a pointer to the unused data in the object's allocation immediately |
|
831 * following this object, for use with objects which allocate a larger size |
|
832 * class than they need and store non-elements data inline. |
|
833 */ |
|
834 inline void *fixedData(size_t nslots) const; |
|
835 |
|
836 /* GC support. */ |
|
837 static ThingRootKind rootKind() { return THING_ROOT_OBJECT; } |
|
838 |
|
839 inline void privateWriteBarrierPre(void **oldval); |
|
840 |
|
841 void privateWriteBarrierPost(void **pprivate) { |
|
842 #ifdef JSGC_GENERATIONAL |
|
843 shadowRuntimeFromAnyThread()->gcStoreBufferPtr()->putCell(reinterpret_cast<js::gc::Cell **>(pprivate)); |
|
844 #endif |
|
845 } |
|
846 |
|
847 void markChildren(JSTracer *trc); |
|
848 |
|
849 /* Private data accessors. */ |
|
850 |
|
851 inline void *&privateRef(uint32_t nfixed) const { /* XXX should be private, not protected! */ |
|
852 /* |
|
853 * The private pointer of an object can hold any word sized value. |
|
854 * Private pointers are stored immediately after the last fixed slot of |
|
855 * the object. |
|
856 */ |
|
857 MOZ_ASSERT(nfixed == numFixedSlots()); |
|
858 MOZ_ASSERT(hasPrivate()); |
|
859 HeapSlot *end = &fixedSlots()[nfixed]; |
|
860 return *reinterpret_cast<void**>(end); |
|
861 } |
|
862 |
|
863 bool hasPrivate() const { |
|
864 return getClass()->hasPrivate(); |
|
865 } |
|
866 void *getPrivate() const { |
|
867 return privateRef(numFixedSlots()); |
|
868 } |
|
869 void setPrivate(void *data) { |
|
870 void **pprivate = &privateRef(numFixedSlots()); |
|
871 privateWriteBarrierPre(pprivate); |
|
872 *pprivate = data; |
|
873 } |
|
874 |
|
875 void setPrivateGCThing(gc::Cell *cell) { |
|
876 void **pprivate = &privateRef(numFixedSlots()); |
|
877 privateWriteBarrierPre(pprivate); |
|
878 *pprivate = reinterpret_cast<void *>(cell); |
|
879 privateWriteBarrierPost(pprivate); |
|
880 } |
|
881 |
|
882 void setPrivateUnbarriered(void *data) { |
|
883 void **pprivate = &privateRef(numFixedSlots()); |
|
884 *pprivate = data; |
|
885 } |
|
886 void initPrivate(void *data) { |
|
887 privateRef(numFixedSlots()) = data; |
|
888 } |
|
889 |
|
890 /* Access private data for an object with a known number of fixed slots. */ |
|
891 inline void *getPrivate(uint32_t nfixed) const { |
|
892 return privateRef(nfixed); |
|
893 } |
|
894 |
|
895 /* GC Accessors */ |
|
896 void setInitialSlots(HeapSlot *newSlots) { slots = newSlots; } |
|
897 |
|
898 /* JIT Accessors */ |
|
899 static size_t offsetOfShape() { return offsetof(ObjectImpl, shape_); } |
|
900 HeapPtrShape *addressOfShape() { return &shape_; } |
|
901 |
|
902 static size_t offsetOfType() { return offsetof(ObjectImpl, type_); } |
|
903 HeapPtrTypeObject *addressOfType() { return &type_; } |
|
904 |
|
905 static size_t offsetOfElements() { return offsetof(ObjectImpl, elements); } |
|
906 static size_t offsetOfFixedElements() { |
|
907 return sizeof(ObjectImpl) + sizeof(ObjectElements); |
|
908 } |
|
909 |
|
910 static size_t getFixedSlotOffset(size_t slot) { |
|
911 return sizeof(ObjectImpl) + slot * sizeof(Value); |
|
912 } |
|
913 static size_t getPrivateDataOffset(size_t nfixed) { return getFixedSlotOffset(nfixed); } |
|
914 static size_t offsetOfSlots() { return offsetof(ObjectImpl, slots); } |
|
915 }; |
|
916 |
|
917 namespace gc { |
|
918 |
|
919 template <> |
|
920 MOZ_ALWAYS_INLINE Zone * |
|
921 BarrieredCell<ObjectImpl>::zone() const |
|
922 { |
|
923 const ObjectImpl* obj = static_cast<const ObjectImpl*>(this); |
|
924 JS::Zone *zone = obj->shape_->zone(); |
|
925 JS_ASSERT(CurrentThreadCanAccessZone(zone)); |
|
926 return zone; |
|
927 } |
|
928 |
|
929 template <> |
|
930 MOZ_ALWAYS_INLINE Zone * |
|
931 BarrieredCell<ObjectImpl>::zoneFromAnyThread() const |
|
932 { |
|
933 const ObjectImpl* obj = static_cast<const ObjectImpl*>(this); |
|
934 return obj->shape_->zoneFromAnyThread(); |
|
935 } |
|
936 |
|
937 // TypeScript::global uses 0x1 as a special value. |
|
938 template<> |
|
939 /* static */ inline bool |
|
940 BarrieredCell<ObjectImpl>::isNullLike(ObjectImpl *obj) |
|
941 { |
|
942 return IsNullTaggedPointer(obj); |
|
943 } |
|
944 |
|
945 template<> |
|
946 /* static */ inline void |
|
947 BarrieredCell<ObjectImpl>::writeBarrierPost(ObjectImpl *obj, void *addr) |
|
948 { |
|
949 #ifdef JSGC_GENERATIONAL |
|
950 if (IsNullTaggedPointer(obj)) |
|
951 return; |
|
952 obj->shadowRuntimeFromAnyThread()->gcStoreBufferPtr()->putCell((Cell **)addr); |
|
953 #endif |
|
954 } |
|
955 |
|
956 template<> |
|
957 /* static */ inline void |
|
958 BarrieredCell<ObjectImpl>::writeBarrierPostRelocate(ObjectImpl *obj, void *addr) |
|
959 { |
|
960 #ifdef JSGC_GENERATIONAL |
|
961 obj->shadowRuntimeFromAnyThread()->gcStoreBufferPtr()->putRelocatableCell((Cell **)addr); |
|
962 #endif |
|
963 } |
|
964 |
|
965 template<> |
|
966 /* static */ inline void |
|
967 BarrieredCell<ObjectImpl>::writeBarrierPostRemove(ObjectImpl *obj, void *addr) |
|
968 { |
|
969 #ifdef JSGC_GENERATIONAL |
|
970 obj->shadowRuntimeFromAnyThread()->gcStoreBufferPtr()->removeRelocatableCell((Cell **)addr); |
|
971 #endif |
|
972 } |
|
973 |
|
974 } // namespace gc |
|
975 |
|
976 inline void |
|
977 ObjectImpl::privateWriteBarrierPre(void **oldval) |
|
978 { |
|
979 #ifdef JSGC_INCREMENTAL |
|
980 JS::shadow::Zone *shadowZone = this->shadowZoneFromAnyThread(); |
|
981 if (shadowZone->needsBarrier()) { |
|
982 if (*oldval && getClass()->trace) |
|
983 getClass()->trace(shadowZone->barrierTracer(), this->asObjectPtr()); |
|
984 } |
|
985 #endif |
|
986 } |
|
987 |
|
988 inline Value |
|
989 ObjectValue(ObjectImpl &obj) |
|
990 { |
|
991 Value v; |
|
992 v.setObject(*obj.asObjectPtr()); |
|
993 return v; |
|
994 } |
|
995 |
|
996 inline Handle<JSObject*> |
|
997 Downcast(Handle<ObjectImpl*> obj) |
|
998 { |
|
999 return Handle<JSObject*>::fromMarkedLocation(reinterpret_cast<JSObject* const*>(obj.address())); |
|
1000 } |
|
1001 |
|
1002 #ifdef DEBUG |
|
1003 static inline bool |
|
1004 IsObjectValueInCompartment(js::Value v, JSCompartment *comp) |
|
1005 { |
|
1006 if (!v.isObject()) |
|
1007 return true; |
|
1008 return reinterpret_cast<ObjectImpl*>(&v.toObject())->compartment() == comp; |
|
1009 } |
|
1010 #endif |
|
1011 |
|
1012 } /* namespace js */ |
|
1013 |
|
1014 #endif /* vm_ObjectImpl_h */ |