Thu, 15 Jan 2015 21:03:48 +0100
Integrate friendly tips from Tor colleagues to make (or not) 4.5 alpha 3;
This includes removal of overloaded (but unused) methods, and addition of
a overlooked call to DataStruct::SetData(nsISupports, uint32_t, bool.)
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 /*
7 * Storage of the children and attributes of a DOM node; storage for
8 * the two is unified to minimize footprint.
9 */
11 #include "nsAttrAndChildArray.h"
13 #include "mozilla/MathAlgorithms.h"
14 #include "mozilla/MemoryReporting.h"
16 #include "nsMappedAttributeElement.h"
17 #include "nsString.h"
18 #include "nsHTMLStyleSheet.h"
19 #include "nsRuleWalker.h"
20 #include "nsMappedAttributes.h"
21 #include "nsUnicharUtils.h"
22 #include "nsAutoPtr.h"
23 #include "nsContentUtils.h" // nsAutoScriptBlocker
25 /*
26 CACHE_POINTER_SHIFT indicates how many steps to downshift the |this| pointer.
27 It should be small enough to not cause collisions between adjecent arrays, and
28 large enough to make sure that all indexes are used. The size below is based
29 on the size of the smallest possible element (currently 24[*] bytes) which is
30 the smallest distance between two nsAttrAndChildArray. 24/(2^_5_) is 0.75.
31 This means that two adjacent nsAttrAndChildArrays will overlap one in 4 times.
32 However not all elements will have enough children to get cached. And any
33 allocator that doesn't return addresses aligned to 64 bytes will ensure that
34 any index will get used.
36 [*] sizeof(Element) + 4 bytes for nsIDOMElement vtable pointer.
37 */
39 #define CACHE_POINTER_SHIFT 5
40 #define CACHE_NUM_SLOTS 128
41 #define CACHE_CHILD_LIMIT 10
43 #define CACHE_GET_INDEX(_array) \
44 ((NS_PTR_TO_INT32(_array) >> CACHE_POINTER_SHIFT) & \
45 (CACHE_NUM_SLOTS - 1))
47 struct IndexCacheSlot
48 {
49 const nsAttrAndChildArray* array;
50 int32_t index;
51 };
53 // This is inited to all zeroes since it's static. Though even if it wasn't
54 // the worst thing that'd happen is a small inefficency if you'd get a false
55 // positive cachehit.
56 static IndexCacheSlot indexCache[CACHE_NUM_SLOTS];
58 static
59 inline
60 void
61 AddIndexToCache(const nsAttrAndChildArray* aArray, int32_t aIndex)
62 {
63 uint32_t ix = CACHE_GET_INDEX(aArray);
64 indexCache[ix].array = aArray;
65 indexCache[ix].index = aIndex;
66 }
68 static
69 inline
70 int32_t
71 GetIndexFromCache(const nsAttrAndChildArray* aArray)
72 {
73 uint32_t ix = CACHE_GET_INDEX(aArray);
74 return indexCache[ix].array == aArray ? indexCache[ix].index : -1;
75 }
78 /**
79 * Due to a compiler bug in VisualAge C++ for AIX, we need to return the
80 * address of the first index into mBuffer here, instead of simply returning
81 * mBuffer itself.
82 *
83 * See Bug 231104 for more information.
84 */
85 #define ATTRS(_impl) \
86 reinterpret_cast<InternalAttr*>(&((_impl)->mBuffer[0]))
89 #define NS_IMPL_EXTRA_SIZE \
90 ((sizeof(Impl) - sizeof(mImpl->mBuffer)) / sizeof(void*))
92 nsAttrAndChildArray::nsAttrAndChildArray()
93 : mImpl(nullptr)
94 {
95 }
97 nsAttrAndChildArray::~nsAttrAndChildArray()
98 {
99 if (!mImpl) {
100 return;
101 }
103 Clear();
105 moz_free(mImpl);
106 }
108 nsIContent*
109 nsAttrAndChildArray::GetSafeChildAt(uint32_t aPos) const
110 {
111 if (aPos < ChildCount()) {
112 return ChildAt(aPos);
113 }
115 return nullptr;
116 }
118 nsIContent * const *
119 nsAttrAndChildArray::GetChildArray(uint32_t* aChildCount) const
120 {
121 *aChildCount = ChildCount();
123 if (!*aChildCount) {
124 return nullptr;
125 }
127 return reinterpret_cast<nsIContent**>(mImpl->mBuffer + AttrSlotsSize());
128 }
130 nsresult
131 nsAttrAndChildArray::InsertChildAt(nsIContent* aChild, uint32_t aPos)
132 {
133 NS_ASSERTION(aChild, "nullchild");
134 NS_ASSERTION(aPos <= ChildCount(), "out-of-bounds");
136 uint32_t offset = AttrSlotsSize();
137 uint32_t childCount = ChildCount();
139 NS_ENSURE_TRUE(childCount < ATTRCHILD_ARRAY_MAX_CHILD_COUNT,
140 NS_ERROR_FAILURE);
142 // First try to fit new child in existing childlist
143 if (mImpl && offset + childCount < mImpl->mBufferSize) {
144 void** pos = mImpl->mBuffer + offset + aPos;
145 if (childCount != aPos) {
146 memmove(pos + 1, pos, (childCount - aPos) * sizeof(nsIContent*));
147 }
148 SetChildAtPos(pos, aChild, aPos, childCount);
150 SetChildCount(childCount + 1);
152 return NS_OK;
153 }
155 // Try to fit new child in existing buffer by compressing attrslots
156 if (offset && !mImpl->mBuffer[offset - ATTRSIZE]) {
157 // Compress away all empty slots while we're at it. This might not be the
158 // optimal thing to do.
159 uint32_t attrCount = NonMappedAttrCount();
160 void** newStart = mImpl->mBuffer + attrCount * ATTRSIZE;
161 void** oldStart = mImpl->mBuffer + offset;
162 memmove(newStart, oldStart, aPos * sizeof(nsIContent*));
163 memmove(&newStart[aPos + 1], &oldStart[aPos],
164 (childCount - aPos) * sizeof(nsIContent*));
165 SetChildAtPos(newStart + aPos, aChild, aPos, childCount);
167 SetAttrSlotAndChildCount(attrCount, childCount + 1);
169 return NS_OK;
170 }
172 // We can't fit in current buffer, Realloc time!
173 if (!GrowBy(1)) {
174 return NS_ERROR_OUT_OF_MEMORY;
175 }
177 void** pos = mImpl->mBuffer + offset + aPos;
178 if (childCount != aPos) {
179 memmove(pos + 1, pos, (childCount - aPos) * sizeof(nsIContent*));
180 }
181 SetChildAtPos(pos, aChild, aPos, childCount);
183 SetChildCount(childCount + 1);
185 return NS_OK;
186 }
188 void
189 nsAttrAndChildArray::RemoveChildAt(uint32_t aPos)
190 {
191 // Just store the return value of TakeChildAt in an nsCOMPtr to
192 // trigger a release.
193 nsCOMPtr<nsIContent> child = TakeChildAt(aPos);
194 }
196 already_AddRefed<nsIContent>
197 nsAttrAndChildArray::TakeChildAt(uint32_t aPos)
198 {
199 NS_ASSERTION(aPos < ChildCount(), "out-of-bounds");
201 uint32_t childCount = ChildCount();
202 void** pos = mImpl->mBuffer + AttrSlotsSize() + aPos;
203 nsIContent* child = static_cast<nsIContent*>(*pos);
204 if (child->mPreviousSibling) {
205 child->mPreviousSibling->mNextSibling = child->mNextSibling;
206 }
207 if (child->mNextSibling) {
208 child->mNextSibling->mPreviousSibling = child->mPreviousSibling;
209 }
210 child->mPreviousSibling = child->mNextSibling = nullptr;
212 memmove(pos, pos + 1, (childCount - aPos - 1) * sizeof(nsIContent*));
213 SetChildCount(childCount - 1);
215 return dont_AddRef(child);
216 }
218 int32_t
219 nsAttrAndChildArray::IndexOfChild(const nsINode* aPossibleChild) const
220 {
221 if (!mImpl) {
222 return -1;
223 }
224 void** children = mImpl->mBuffer + AttrSlotsSize();
225 // Use signed here since we compare count to cursor which has to be signed
226 int32_t i, count = ChildCount();
228 if (count >= CACHE_CHILD_LIMIT) {
229 int32_t cursor = GetIndexFromCache(this);
230 // Need to compare to count here since we may have removed children since
231 // the index was added to the cache.
232 // We're also relying on that GetIndexFromCache returns -1 if no cached
233 // index was found.
234 if (cursor >= count) {
235 cursor = -1;
236 }
238 // Seek outward from the last found index. |inc| will change sign every
239 // run through the loop. |sign| just exists to make sure the absolute
240 // value of |inc| increases each time through.
241 int32_t inc = 1, sign = 1;
242 while (cursor >= 0 && cursor < count) {
243 if (children[cursor] == aPossibleChild) {
244 AddIndexToCache(this, cursor);
246 return cursor;
247 }
249 cursor += inc;
250 inc = -inc - sign;
251 sign = -sign;
252 }
254 // We ran into one 'edge'. Add inc to cursor once more to get back to
255 // the 'side' where we still need to search, then step in the |sign|
256 // direction.
257 cursor += inc;
259 if (sign > 0) {
260 for (; cursor < count; ++cursor) {
261 if (children[cursor] == aPossibleChild) {
262 AddIndexToCache(this, cursor);
264 return static_cast<int32_t>(cursor);
265 }
266 }
267 }
268 else {
269 for (; cursor >= 0; --cursor) {
270 if (children[cursor] == aPossibleChild) {
271 AddIndexToCache(this, cursor);
273 return static_cast<int32_t>(cursor);
274 }
275 }
276 }
278 // The child wasn't even in the remaining children
279 return -1;
280 }
282 for (i = 0; i < count; ++i) {
283 if (children[i] == aPossibleChild) {
284 return static_cast<int32_t>(i);
285 }
286 }
288 return -1;
289 }
291 uint32_t
292 nsAttrAndChildArray::AttrCount() const
293 {
294 return NonMappedAttrCount() + MappedAttrCount();
295 }
297 const nsAttrValue*
298 nsAttrAndChildArray::GetAttr(nsIAtom* aLocalName, int32_t aNamespaceID) const
299 {
300 uint32_t i, slotCount = AttrSlotCount();
301 if (aNamespaceID == kNameSpaceID_None) {
302 // This should be the common case so lets make an optimized loop
303 for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) {
304 if (ATTRS(mImpl)[i].mName.Equals(aLocalName)) {
305 return &ATTRS(mImpl)[i].mValue;
306 }
307 }
309 if (mImpl && mImpl->mMappedAttrs) {
310 return mImpl->mMappedAttrs->GetAttr(aLocalName);
311 }
312 }
313 else {
314 for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) {
315 if (ATTRS(mImpl)[i].mName.Equals(aLocalName, aNamespaceID)) {
316 return &ATTRS(mImpl)[i].mValue;
317 }
318 }
319 }
321 return nullptr;
322 }
324 const nsAttrValue*
325 nsAttrAndChildArray::GetAttr(const nsAString& aLocalName) const
326 {
327 uint32_t i, slotCount = AttrSlotCount();
328 for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) {
329 if (ATTRS(mImpl)[i].mName.Equals(aLocalName)) {
330 return &ATTRS(mImpl)[i].mValue;
331 }
332 }
334 if (mImpl && mImpl->mMappedAttrs) {
335 return mImpl->mMappedAttrs->GetAttr(aLocalName);
336 }
338 return nullptr;
339 }
341 const nsAttrValue*
342 nsAttrAndChildArray::GetAttr(const nsAString& aName,
343 nsCaseTreatment aCaseSensitive) const
344 {
345 // Check whether someone is being silly and passing non-lowercase
346 // attr names.
347 if (aCaseSensitive == eIgnoreCase &&
348 nsContentUtils::StringContainsASCIIUpper(aName)) {
349 // Try again with a lowercased name, but make sure we can't reenter this
350 // block by passing eCaseSensitive for aCaseSensitive.
351 nsAutoString lowercase;
352 nsContentUtils::ASCIIToLower(aName, lowercase);
353 return GetAttr(lowercase, eCaseMatters);
354 }
356 uint32_t i, slotCount = AttrSlotCount();
357 for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) {
358 if (ATTRS(mImpl)[i].mName.QualifiedNameEquals(aName)) {
359 return &ATTRS(mImpl)[i].mValue;
360 }
361 }
363 if (mImpl && mImpl->mMappedAttrs) {
364 const nsAttrValue* val =
365 mImpl->mMappedAttrs->GetAttr(aName);
366 if (val) {
367 return val;
368 }
369 }
371 return nullptr;
372 }
374 const nsAttrValue*
375 nsAttrAndChildArray::AttrAt(uint32_t aPos) const
376 {
377 NS_ASSERTION(aPos < AttrCount(),
378 "out-of-bounds access in nsAttrAndChildArray");
380 uint32_t mapped = MappedAttrCount();
381 if (aPos < mapped) {
382 return mImpl->mMappedAttrs->AttrAt(aPos);
383 }
385 return &ATTRS(mImpl)[aPos - mapped].mValue;
386 }
388 nsresult
389 nsAttrAndChildArray::SetAndTakeAttr(nsIAtom* aLocalName, nsAttrValue& aValue)
390 {
391 uint32_t i, slotCount = AttrSlotCount();
392 for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) {
393 if (ATTRS(mImpl)[i].mName.Equals(aLocalName)) {
394 ATTRS(mImpl)[i].mValue.Reset();
395 ATTRS(mImpl)[i].mValue.SwapValueWith(aValue);
397 return NS_OK;
398 }
399 }
401 NS_ENSURE_TRUE(i < ATTRCHILD_ARRAY_MAX_ATTR_COUNT,
402 NS_ERROR_FAILURE);
404 if (i == slotCount && !AddAttrSlot()) {
405 return NS_ERROR_OUT_OF_MEMORY;
406 }
408 new (&ATTRS(mImpl)[i].mName) nsAttrName(aLocalName);
409 new (&ATTRS(mImpl)[i].mValue) nsAttrValue();
410 ATTRS(mImpl)[i].mValue.SwapValueWith(aValue);
412 return NS_OK;
413 }
415 nsresult
416 nsAttrAndChildArray::SetAndTakeAttr(nsINodeInfo* aName, nsAttrValue& aValue)
417 {
418 int32_t namespaceID = aName->NamespaceID();
419 nsIAtom* localName = aName->NameAtom();
420 if (namespaceID == kNameSpaceID_None) {
421 return SetAndTakeAttr(localName, aValue);
422 }
424 uint32_t i, slotCount = AttrSlotCount();
425 for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) {
426 if (ATTRS(mImpl)[i].mName.Equals(localName, namespaceID)) {
427 ATTRS(mImpl)[i].mName.SetTo(aName);
428 ATTRS(mImpl)[i].mValue.Reset();
429 ATTRS(mImpl)[i].mValue.SwapValueWith(aValue);
431 return NS_OK;
432 }
433 }
435 NS_ENSURE_TRUE(i < ATTRCHILD_ARRAY_MAX_ATTR_COUNT,
436 NS_ERROR_FAILURE);
438 if (i == slotCount && !AddAttrSlot()) {
439 return NS_ERROR_OUT_OF_MEMORY;
440 }
442 new (&ATTRS(mImpl)[i].mName) nsAttrName(aName);
443 new (&ATTRS(mImpl)[i].mValue) nsAttrValue();
444 ATTRS(mImpl)[i].mValue.SwapValueWith(aValue);
446 return NS_OK;
447 }
450 nsresult
451 nsAttrAndChildArray::RemoveAttrAt(uint32_t aPos, nsAttrValue& aValue)
452 {
453 NS_ASSERTION(aPos < AttrCount(), "out-of-bounds");
455 uint32_t mapped = MappedAttrCount();
456 if (aPos < mapped) {
457 if (mapped == 1) {
458 // We're removing the last mapped attribute. Can't swap in this
459 // case; have to copy.
460 aValue.SetTo(*mImpl->mMappedAttrs->AttrAt(0));
461 NS_RELEASE(mImpl->mMappedAttrs);
463 return NS_OK;
464 }
466 nsRefPtr<nsMappedAttributes> mapped =
467 GetModifiableMapped(nullptr, nullptr, false);
469 mapped->RemoveAttrAt(aPos, aValue);
471 return MakeMappedUnique(mapped);
472 }
474 aPos -= mapped;
475 ATTRS(mImpl)[aPos].mValue.SwapValueWith(aValue);
476 ATTRS(mImpl)[aPos].~InternalAttr();
478 uint32_t slotCount = AttrSlotCount();
479 memmove(&ATTRS(mImpl)[aPos],
480 &ATTRS(mImpl)[aPos + 1],
481 (slotCount - aPos - 1) * sizeof(InternalAttr));
482 memset(&ATTRS(mImpl)[slotCount - 1], 0, sizeof(InternalAttr));
484 return NS_OK;
485 }
487 const nsAttrName*
488 nsAttrAndChildArray::AttrNameAt(uint32_t aPos) const
489 {
490 NS_ASSERTION(aPos < AttrCount(),
491 "out-of-bounds access in nsAttrAndChildArray");
493 uint32_t mapped = MappedAttrCount();
494 if (aPos < mapped) {
495 return mImpl->mMappedAttrs->NameAt(aPos);
496 }
498 return &ATTRS(mImpl)[aPos - mapped].mName;
499 }
501 const nsAttrName*
502 nsAttrAndChildArray::GetSafeAttrNameAt(uint32_t aPos) const
503 {
504 uint32_t mapped = MappedAttrCount();
505 if (aPos < mapped) {
506 return mImpl->mMappedAttrs->NameAt(aPos);
507 }
509 aPos -= mapped;
510 if (aPos >= AttrSlotCount()) {
511 return nullptr;
512 }
514 void** pos = mImpl->mBuffer + aPos * ATTRSIZE;
515 if (!*pos) {
516 return nullptr;
517 }
519 return &reinterpret_cast<InternalAttr*>(pos)->mName;
520 }
522 const nsAttrName*
523 nsAttrAndChildArray::GetExistingAttrNameFromQName(const nsAString& aName) const
524 {
525 uint32_t i, slotCount = AttrSlotCount();
526 for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) {
527 if (ATTRS(mImpl)[i].mName.QualifiedNameEquals(aName)) {
528 return &ATTRS(mImpl)[i].mName;
529 }
530 }
532 if (mImpl && mImpl->mMappedAttrs) {
533 return mImpl->mMappedAttrs->GetExistingAttrNameFromQName(aName);
534 }
536 return nullptr;
537 }
539 int32_t
540 nsAttrAndChildArray::IndexOfAttr(nsIAtom* aLocalName, int32_t aNamespaceID) const
541 {
542 int32_t idx;
543 if (mImpl && mImpl->mMappedAttrs && aNamespaceID == kNameSpaceID_None) {
544 idx = mImpl->mMappedAttrs->IndexOfAttr(aLocalName);
545 if (idx >= 0) {
546 return idx;
547 }
548 }
550 uint32_t i;
551 uint32_t mapped = MappedAttrCount();
552 uint32_t slotCount = AttrSlotCount();
553 if (aNamespaceID == kNameSpaceID_None) {
554 // This should be the common case so lets make an optimized loop
555 for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) {
556 if (ATTRS(mImpl)[i].mName.Equals(aLocalName)) {
557 return i + mapped;
558 }
559 }
560 }
561 else {
562 for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) {
563 if (ATTRS(mImpl)[i].mName.Equals(aLocalName, aNamespaceID)) {
564 return i + mapped;
565 }
566 }
567 }
569 return -1;
570 }
572 nsresult
573 nsAttrAndChildArray::SetAndTakeMappedAttr(nsIAtom* aLocalName,
574 nsAttrValue& aValue,
575 nsMappedAttributeElement* aContent,
576 nsHTMLStyleSheet* aSheet)
577 {
578 bool willAdd = true;
579 if (mImpl && mImpl->mMappedAttrs) {
580 willAdd = !mImpl->mMappedAttrs->GetAttr(aLocalName);
581 }
583 nsRefPtr<nsMappedAttributes> mapped =
584 GetModifiableMapped(aContent, aSheet, willAdd);
586 mapped->SetAndTakeAttr(aLocalName, aValue);
588 return MakeMappedUnique(mapped);
589 }
591 nsresult
592 nsAttrAndChildArray::DoSetMappedAttrStyleSheet(nsHTMLStyleSheet* aSheet)
593 {
594 NS_PRECONDITION(mImpl && mImpl->mMappedAttrs,
595 "Should have mapped attrs here!");
596 if (aSheet == mImpl->mMappedAttrs->GetStyleSheet()) {
597 return NS_OK;
598 }
600 nsRefPtr<nsMappedAttributes> mapped =
601 GetModifiableMapped(nullptr, nullptr, false);
603 mapped->SetStyleSheet(aSheet);
605 return MakeMappedUnique(mapped);
606 }
608 void
609 nsAttrAndChildArray::WalkMappedAttributeStyleRules(nsRuleWalker* aRuleWalker)
610 {
611 if (mImpl && mImpl->mMappedAttrs) {
612 aRuleWalker->Forward(mImpl->mMappedAttrs);
613 }
614 }
616 void
617 nsAttrAndChildArray::Compact()
618 {
619 if (!mImpl) {
620 return;
621 }
623 // First compress away empty attrslots
624 uint32_t slotCount = AttrSlotCount();
625 uint32_t attrCount = NonMappedAttrCount();
626 uint32_t childCount = ChildCount();
628 if (attrCount < slotCount) {
629 memmove(mImpl->mBuffer + attrCount * ATTRSIZE,
630 mImpl->mBuffer + slotCount * ATTRSIZE,
631 childCount * sizeof(nsIContent*));
632 SetAttrSlotCount(attrCount);
633 }
635 // Then resize or free buffer
636 uint32_t newSize = attrCount * ATTRSIZE + childCount;
637 if (!newSize && !mImpl->mMappedAttrs) {
638 moz_free(mImpl);
639 mImpl = nullptr;
640 }
641 else if (newSize < mImpl->mBufferSize) {
642 mImpl = static_cast<Impl*>(moz_realloc(mImpl, (newSize + NS_IMPL_EXTRA_SIZE) * sizeof(nsIContent*)));
643 NS_ASSERTION(mImpl, "failed to reallocate to smaller buffer");
645 mImpl->mBufferSize = newSize;
646 }
647 }
649 void
650 nsAttrAndChildArray::Clear()
651 {
652 if (!mImpl) {
653 return;
654 }
656 if (mImpl->mMappedAttrs) {
657 NS_RELEASE(mImpl->mMappedAttrs);
658 }
660 uint32_t i, slotCount = AttrSlotCount();
661 for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) {
662 ATTRS(mImpl)[i].~InternalAttr();
663 }
665 nsAutoScriptBlocker scriptBlocker;
666 uint32_t end = slotCount * ATTRSIZE + ChildCount();
667 for (i = slotCount * ATTRSIZE; i < end; ++i) {
668 nsIContent* child = static_cast<nsIContent*>(mImpl->mBuffer[i]);
669 // making this false so tree teardown doesn't end up being
670 // O(N*D) (number of nodes times average depth of tree).
671 child->UnbindFromTree(false); // XXX is it better to let the owner do this?
672 // Make sure to unlink our kids from each other, since someone
673 // else could stil be holding references to some of them.
675 // XXXbz We probably can't push this assignment down into the |aNullParent|
676 // case of UnbindFromTree because we still need the assignment in
677 // RemoveChildAt. In particular, ContentRemoved fires between
678 // RemoveChildAt and UnbindFromTree, and in ContentRemoved the sibling
679 // chain needs to be correct. Though maybe we could set the prev and next
680 // to point to each other but keep the kid being removed pointing to them
681 // through ContentRemoved so consumers can find where it used to be in the
682 // list?
683 child->mPreviousSibling = child->mNextSibling = nullptr;
684 NS_RELEASE(child);
685 }
687 SetAttrSlotAndChildCount(0, 0);
688 }
690 uint32_t
691 nsAttrAndChildArray::NonMappedAttrCount() const
692 {
693 if (!mImpl) {
694 return 0;
695 }
697 uint32_t count = AttrSlotCount();
698 while (count > 0 && !mImpl->mBuffer[(count - 1) * ATTRSIZE]) {
699 --count;
700 }
702 return count;
703 }
705 uint32_t
706 nsAttrAndChildArray::MappedAttrCount() const
707 {
708 return mImpl && mImpl->mMappedAttrs ? (uint32_t)mImpl->mMappedAttrs->Count() : 0;
709 }
711 nsMappedAttributes*
712 nsAttrAndChildArray::GetModifiableMapped(nsMappedAttributeElement* aContent,
713 nsHTMLStyleSheet* aSheet,
714 bool aWillAddAttr)
715 {
716 if (mImpl && mImpl->mMappedAttrs) {
717 return mImpl->mMappedAttrs->Clone(aWillAddAttr);
718 }
720 MOZ_ASSERT(aContent, "Trying to create modifiable without content");
722 nsMapRuleToAttributesFunc mapRuleFunc =
723 aContent->GetAttributeMappingFunction();
724 return new nsMappedAttributes(aSheet, mapRuleFunc);
725 }
727 nsresult
728 nsAttrAndChildArray::MakeMappedUnique(nsMappedAttributes* aAttributes)
729 {
730 NS_ASSERTION(aAttributes, "missing attributes");
732 if (!mImpl && !GrowBy(1)) {
733 return NS_ERROR_OUT_OF_MEMORY;
734 }
736 if (!aAttributes->GetStyleSheet()) {
737 // This doesn't currently happen, but it could if we do loading right
739 nsRefPtr<nsMappedAttributes> mapped(aAttributes);
740 mapped.swap(mImpl->mMappedAttrs);
742 return NS_OK;
743 }
745 nsRefPtr<nsMappedAttributes> mapped =
746 aAttributes->GetStyleSheet()->UniqueMappedAttributes(aAttributes);
747 NS_ENSURE_TRUE(mapped, NS_ERROR_OUT_OF_MEMORY);
749 if (mapped != aAttributes) {
750 // Reset the stylesheet of aAttributes so that it doesn't spend time
751 // trying to remove itself from the hash. There is no risk that aAttributes
752 // is in the hash since it will always have come from GetModifiableMapped,
753 // which never returns maps that are in the hash (such hashes are by
754 // nature not modifiable).
755 aAttributes->DropStyleSheetReference();
756 }
757 mapped.swap(mImpl->mMappedAttrs);
759 return NS_OK;
760 }
763 bool
764 nsAttrAndChildArray::GrowBy(uint32_t aGrowSize)
765 {
766 uint32_t size = mImpl ? mImpl->mBufferSize + NS_IMPL_EXTRA_SIZE : 0;
767 uint32_t minSize = size + aGrowSize;
769 if (minSize <= ATTRCHILD_ARRAY_LINEAR_THRESHOLD) {
770 do {
771 size += ATTRCHILD_ARRAY_GROWSIZE;
772 } while (size < minSize);
773 }
774 else {
775 size = 1u << mozilla::CeilingLog2(minSize);
776 }
778 bool needToInitialize = !mImpl;
779 Impl* newImpl = static_cast<Impl*>(moz_realloc(mImpl, size * sizeof(void*)));
780 NS_ENSURE_TRUE(newImpl, false);
782 mImpl = newImpl;
784 // Set initial counts if we didn't have a buffer before
785 if (needToInitialize) {
786 mImpl->mMappedAttrs = nullptr;
787 SetAttrSlotAndChildCount(0, 0);
788 }
790 mImpl->mBufferSize = size - NS_IMPL_EXTRA_SIZE;
792 return true;
793 }
795 bool
796 nsAttrAndChildArray::AddAttrSlot()
797 {
798 uint32_t slotCount = AttrSlotCount();
799 uint32_t childCount = ChildCount();
801 // Grow buffer if needed
802 if (!(mImpl && mImpl->mBufferSize >= (slotCount + 1) * ATTRSIZE + childCount) &&
803 !GrowBy(ATTRSIZE)) {
804 return false;
805 }
806 void** offset = mImpl->mBuffer + slotCount * ATTRSIZE;
808 if (childCount > 0) {
809 memmove(&ATTRS(mImpl)[slotCount + 1], &ATTRS(mImpl)[slotCount],
810 childCount * sizeof(nsIContent*));
811 }
813 SetAttrSlotCount(slotCount + 1);
814 offset[0] = nullptr;
815 offset[1] = nullptr;
817 return true;
818 }
820 inline void
821 nsAttrAndChildArray::SetChildAtPos(void** aPos, nsIContent* aChild,
822 uint32_t aIndex, uint32_t aChildCount)
823 {
824 NS_PRECONDITION(!aChild->GetNextSibling(), "aChild with next sibling?");
825 NS_PRECONDITION(!aChild->GetPreviousSibling(), "aChild with prev sibling?");
827 *aPos = aChild;
828 NS_ADDREF(aChild);
829 if (aIndex != 0) {
830 nsIContent* previous = static_cast<nsIContent*>(*(aPos - 1));
831 aChild->mPreviousSibling = previous;
832 previous->mNextSibling = aChild;
833 }
834 if (aIndex != aChildCount) {
835 nsIContent* next = static_cast<nsIContent*>(*(aPos + 1));
836 aChild->mNextSibling = next;
837 next->mPreviousSibling = aChild;
838 }
839 }
841 size_t
842 nsAttrAndChildArray::SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
843 {
844 size_t n = 0;
845 if (mImpl) {
846 // Don't add the size taken by *mMappedAttrs because it's shared.
848 n += aMallocSizeOf(mImpl);
850 uint32_t slotCount = AttrSlotCount();
851 for (uint32_t i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) {
852 nsAttrValue* value = &ATTRS(mImpl)[i].mValue;
853 n += value->SizeOfExcludingThis(aMallocSizeOf);
854 }
855 }
857 return n;
858 }