michael@0: /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: /* michael@0: * Storage of the children and attributes of a DOM node; storage for michael@0: * the two is unified to minimize footprint. michael@0: */ michael@0: michael@0: #include "nsAttrAndChildArray.h" michael@0: michael@0: #include "mozilla/MathAlgorithms.h" michael@0: #include "mozilla/MemoryReporting.h" michael@0: michael@0: #include "nsMappedAttributeElement.h" michael@0: #include "nsString.h" michael@0: #include "nsHTMLStyleSheet.h" michael@0: #include "nsRuleWalker.h" michael@0: #include "nsMappedAttributes.h" michael@0: #include "nsUnicharUtils.h" michael@0: #include "nsAutoPtr.h" michael@0: #include "nsContentUtils.h" // nsAutoScriptBlocker michael@0: michael@0: /* michael@0: CACHE_POINTER_SHIFT indicates how many steps to downshift the |this| pointer. michael@0: It should be small enough to not cause collisions between adjecent arrays, and michael@0: large enough to make sure that all indexes are used. The size below is based michael@0: on the size of the smallest possible element (currently 24[*] bytes) which is michael@0: the smallest distance between two nsAttrAndChildArray. 24/(2^_5_) is 0.75. michael@0: This means that two adjacent nsAttrAndChildArrays will overlap one in 4 times. michael@0: However not all elements will have enough children to get cached. And any michael@0: allocator that doesn't return addresses aligned to 64 bytes will ensure that michael@0: any index will get used. michael@0: michael@0: [*] sizeof(Element) + 4 bytes for nsIDOMElement vtable pointer. michael@0: */ michael@0: michael@0: #define CACHE_POINTER_SHIFT 5 michael@0: #define CACHE_NUM_SLOTS 128 michael@0: #define CACHE_CHILD_LIMIT 10 michael@0: michael@0: #define CACHE_GET_INDEX(_array) \ michael@0: ((NS_PTR_TO_INT32(_array) >> CACHE_POINTER_SHIFT) & \ michael@0: (CACHE_NUM_SLOTS - 1)) michael@0: michael@0: struct IndexCacheSlot michael@0: { michael@0: const nsAttrAndChildArray* array; michael@0: int32_t index; michael@0: }; michael@0: michael@0: // This is inited to all zeroes since it's static. Though even if it wasn't michael@0: // the worst thing that'd happen is a small inefficency if you'd get a false michael@0: // positive cachehit. michael@0: static IndexCacheSlot indexCache[CACHE_NUM_SLOTS]; michael@0: michael@0: static michael@0: inline michael@0: void michael@0: AddIndexToCache(const nsAttrAndChildArray* aArray, int32_t aIndex) michael@0: { michael@0: uint32_t ix = CACHE_GET_INDEX(aArray); michael@0: indexCache[ix].array = aArray; michael@0: indexCache[ix].index = aIndex; michael@0: } michael@0: michael@0: static michael@0: inline michael@0: int32_t michael@0: GetIndexFromCache(const nsAttrAndChildArray* aArray) michael@0: { michael@0: uint32_t ix = CACHE_GET_INDEX(aArray); michael@0: return indexCache[ix].array == aArray ? indexCache[ix].index : -1; michael@0: } michael@0: michael@0: michael@0: /** michael@0: * Due to a compiler bug in VisualAge C++ for AIX, we need to return the michael@0: * address of the first index into mBuffer here, instead of simply returning michael@0: * mBuffer itself. michael@0: * michael@0: * See Bug 231104 for more information. michael@0: */ michael@0: #define ATTRS(_impl) \ michael@0: reinterpret_cast(&((_impl)->mBuffer[0])) michael@0: michael@0: michael@0: #define NS_IMPL_EXTRA_SIZE \ michael@0: ((sizeof(Impl) - sizeof(mImpl->mBuffer)) / sizeof(void*)) michael@0: michael@0: nsAttrAndChildArray::nsAttrAndChildArray() michael@0: : mImpl(nullptr) michael@0: { michael@0: } michael@0: michael@0: nsAttrAndChildArray::~nsAttrAndChildArray() michael@0: { michael@0: if (!mImpl) { michael@0: return; michael@0: } michael@0: michael@0: Clear(); michael@0: michael@0: moz_free(mImpl); michael@0: } michael@0: michael@0: nsIContent* michael@0: nsAttrAndChildArray::GetSafeChildAt(uint32_t aPos) const michael@0: { michael@0: if (aPos < ChildCount()) { michael@0: return ChildAt(aPos); michael@0: } michael@0: michael@0: return nullptr; michael@0: } michael@0: michael@0: nsIContent * const * michael@0: nsAttrAndChildArray::GetChildArray(uint32_t* aChildCount) const michael@0: { michael@0: *aChildCount = ChildCount(); michael@0: michael@0: if (!*aChildCount) { michael@0: return nullptr; michael@0: } michael@0: michael@0: return reinterpret_cast(mImpl->mBuffer + AttrSlotsSize()); michael@0: } michael@0: michael@0: nsresult michael@0: nsAttrAndChildArray::InsertChildAt(nsIContent* aChild, uint32_t aPos) michael@0: { michael@0: NS_ASSERTION(aChild, "nullchild"); michael@0: NS_ASSERTION(aPos <= ChildCount(), "out-of-bounds"); michael@0: michael@0: uint32_t offset = AttrSlotsSize(); michael@0: uint32_t childCount = ChildCount(); michael@0: michael@0: NS_ENSURE_TRUE(childCount < ATTRCHILD_ARRAY_MAX_CHILD_COUNT, michael@0: NS_ERROR_FAILURE); michael@0: michael@0: // First try to fit new child in existing childlist michael@0: if (mImpl && offset + childCount < mImpl->mBufferSize) { michael@0: void** pos = mImpl->mBuffer + offset + aPos; michael@0: if (childCount != aPos) { michael@0: memmove(pos + 1, pos, (childCount - aPos) * sizeof(nsIContent*)); michael@0: } michael@0: SetChildAtPos(pos, aChild, aPos, childCount); michael@0: michael@0: SetChildCount(childCount + 1); michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: // Try to fit new child in existing buffer by compressing attrslots michael@0: if (offset && !mImpl->mBuffer[offset - ATTRSIZE]) { michael@0: // Compress away all empty slots while we're at it. This might not be the michael@0: // optimal thing to do. michael@0: uint32_t attrCount = NonMappedAttrCount(); michael@0: void** newStart = mImpl->mBuffer + attrCount * ATTRSIZE; michael@0: void** oldStart = mImpl->mBuffer + offset; michael@0: memmove(newStart, oldStart, aPos * sizeof(nsIContent*)); michael@0: memmove(&newStart[aPos + 1], &oldStart[aPos], michael@0: (childCount - aPos) * sizeof(nsIContent*)); michael@0: SetChildAtPos(newStart + aPos, aChild, aPos, childCount); michael@0: michael@0: SetAttrSlotAndChildCount(attrCount, childCount + 1); michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: // We can't fit in current buffer, Realloc time! michael@0: if (!GrowBy(1)) { michael@0: return NS_ERROR_OUT_OF_MEMORY; michael@0: } michael@0: michael@0: void** pos = mImpl->mBuffer + offset + aPos; michael@0: if (childCount != aPos) { michael@0: memmove(pos + 1, pos, (childCount - aPos) * sizeof(nsIContent*)); michael@0: } michael@0: SetChildAtPos(pos, aChild, aPos, childCount); michael@0: michael@0: SetChildCount(childCount + 1); michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: void michael@0: nsAttrAndChildArray::RemoveChildAt(uint32_t aPos) michael@0: { michael@0: // Just store the return value of TakeChildAt in an nsCOMPtr to michael@0: // trigger a release. michael@0: nsCOMPtr child = TakeChildAt(aPos); michael@0: } michael@0: michael@0: already_AddRefed michael@0: nsAttrAndChildArray::TakeChildAt(uint32_t aPos) michael@0: { michael@0: NS_ASSERTION(aPos < ChildCount(), "out-of-bounds"); michael@0: michael@0: uint32_t childCount = ChildCount(); michael@0: void** pos = mImpl->mBuffer + AttrSlotsSize() + aPos; michael@0: nsIContent* child = static_cast(*pos); michael@0: if (child->mPreviousSibling) { michael@0: child->mPreviousSibling->mNextSibling = child->mNextSibling; michael@0: } michael@0: if (child->mNextSibling) { michael@0: child->mNextSibling->mPreviousSibling = child->mPreviousSibling; michael@0: } michael@0: child->mPreviousSibling = child->mNextSibling = nullptr; michael@0: michael@0: memmove(pos, pos + 1, (childCount - aPos - 1) * sizeof(nsIContent*)); michael@0: SetChildCount(childCount - 1); michael@0: michael@0: return dont_AddRef(child); michael@0: } michael@0: michael@0: int32_t michael@0: nsAttrAndChildArray::IndexOfChild(const nsINode* aPossibleChild) const michael@0: { michael@0: if (!mImpl) { michael@0: return -1; michael@0: } michael@0: void** children = mImpl->mBuffer + AttrSlotsSize(); michael@0: // Use signed here since we compare count to cursor which has to be signed michael@0: int32_t i, count = ChildCount(); michael@0: michael@0: if (count >= CACHE_CHILD_LIMIT) { michael@0: int32_t cursor = GetIndexFromCache(this); michael@0: // Need to compare to count here since we may have removed children since michael@0: // the index was added to the cache. michael@0: // We're also relying on that GetIndexFromCache returns -1 if no cached michael@0: // index was found. michael@0: if (cursor >= count) { michael@0: cursor = -1; michael@0: } michael@0: michael@0: // Seek outward from the last found index. |inc| will change sign every michael@0: // run through the loop. |sign| just exists to make sure the absolute michael@0: // value of |inc| increases each time through. michael@0: int32_t inc = 1, sign = 1; michael@0: while (cursor >= 0 && cursor < count) { michael@0: if (children[cursor] == aPossibleChild) { michael@0: AddIndexToCache(this, cursor); michael@0: michael@0: return cursor; michael@0: } michael@0: michael@0: cursor += inc; michael@0: inc = -inc - sign; michael@0: sign = -sign; michael@0: } michael@0: michael@0: // We ran into one 'edge'. Add inc to cursor once more to get back to michael@0: // the 'side' where we still need to search, then step in the |sign| michael@0: // direction. michael@0: cursor += inc; michael@0: michael@0: if (sign > 0) { michael@0: for (; cursor < count; ++cursor) { michael@0: if (children[cursor] == aPossibleChild) { michael@0: AddIndexToCache(this, cursor); michael@0: michael@0: return static_cast(cursor); michael@0: } michael@0: } michael@0: } michael@0: else { michael@0: for (; cursor >= 0; --cursor) { michael@0: if (children[cursor] == aPossibleChild) { michael@0: AddIndexToCache(this, cursor); michael@0: michael@0: return static_cast(cursor); michael@0: } michael@0: } michael@0: } michael@0: michael@0: // The child wasn't even in the remaining children michael@0: return -1; michael@0: } michael@0: michael@0: for (i = 0; i < count; ++i) { michael@0: if (children[i] == aPossibleChild) { michael@0: return static_cast(i); michael@0: } michael@0: } michael@0: michael@0: return -1; michael@0: } michael@0: michael@0: uint32_t michael@0: nsAttrAndChildArray::AttrCount() const michael@0: { michael@0: return NonMappedAttrCount() + MappedAttrCount(); michael@0: } michael@0: michael@0: const nsAttrValue* michael@0: nsAttrAndChildArray::GetAttr(nsIAtom* aLocalName, int32_t aNamespaceID) const michael@0: { michael@0: uint32_t i, slotCount = AttrSlotCount(); michael@0: if (aNamespaceID == kNameSpaceID_None) { michael@0: // This should be the common case so lets make an optimized loop michael@0: for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) { michael@0: if (ATTRS(mImpl)[i].mName.Equals(aLocalName)) { michael@0: return &ATTRS(mImpl)[i].mValue; michael@0: } michael@0: } michael@0: michael@0: if (mImpl && mImpl->mMappedAttrs) { michael@0: return mImpl->mMappedAttrs->GetAttr(aLocalName); michael@0: } michael@0: } michael@0: else { michael@0: for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) { michael@0: if (ATTRS(mImpl)[i].mName.Equals(aLocalName, aNamespaceID)) { michael@0: return &ATTRS(mImpl)[i].mValue; michael@0: } michael@0: } michael@0: } michael@0: michael@0: return nullptr; michael@0: } michael@0: michael@0: const nsAttrValue* michael@0: nsAttrAndChildArray::GetAttr(const nsAString& aLocalName) const michael@0: { michael@0: uint32_t i, slotCount = AttrSlotCount(); michael@0: for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) { michael@0: if (ATTRS(mImpl)[i].mName.Equals(aLocalName)) { michael@0: return &ATTRS(mImpl)[i].mValue; michael@0: } michael@0: } michael@0: michael@0: if (mImpl && mImpl->mMappedAttrs) { michael@0: return mImpl->mMappedAttrs->GetAttr(aLocalName); michael@0: } michael@0: michael@0: return nullptr; michael@0: } michael@0: michael@0: const nsAttrValue* michael@0: nsAttrAndChildArray::GetAttr(const nsAString& aName, michael@0: nsCaseTreatment aCaseSensitive) const michael@0: { michael@0: // Check whether someone is being silly and passing non-lowercase michael@0: // attr names. michael@0: if (aCaseSensitive == eIgnoreCase && michael@0: nsContentUtils::StringContainsASCIIUpper(aName)) { michael@0: // Try again with a lowercased name, but make sure we can't reenter this michael@0: // block by passing eCaseSensitive for aCaseSensitive. michael@0: nsAutoString lowercase; michael@0: nsContentUtils::ASCIIToLower(aName, lowercase); michael@0: return GetAttr(lowercase, eCaseMatters); michael@0: } michael@0: michael@0: uint32_t i, slotCount = AttrSlotCount(); michael@0: for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) { michael@0: if (ATTRS(mImpl)[i].mName.QualifiedNameEquals(aName)) { michael@0: return &ATTRS(mImpl)[i].mValue; michael@0: } michael@0: } michael@0: michael@0: if (mImpl && mImpl->mMappedAttrs) { michael@0: const nsAttrValue* val = michael@0: mImpl->mMappedAttrs->GetAttr(aName); michael@0: if (val) { michael@0: return val; michael@0: } michael@0: } michael@0: michael@0: return nullptr; michael@0: } michael@0: michael@0: const nsAttrValue* michael@0: nsAttrAndChildArray::AttrAt(uint32_t aPos) const michael@0: { michael@0: NS_ASSERTION(aPos < AttrCount(), michael@0: "out-of-bounds access in nsAttrAndChildArray"); michael@0: michael@0: uint32_t mapped = MappedAttrCount(); michael@0: if (aPos < mapped) { michael@0: return mImpl->mMappedAttrs->AttrAt(aPos); michael@0: } michael@0: michael@0: return &ATTRS(mImpl)[aPos - mapped].mValue; michael@0: } michael@0: michael@0: nsresult michael@0: nsAttrAndChildArray::SetAndTakeAttr(nsIAtom* aLocalName, nsAttrValue& aValue) michael@0: { michael@0: uint32_t i, slotCount = AttrSlotCount(); michael@0: for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) { michael@0: if (ATTRS(mImpl)[i].mName.Equals(aLocalName)) { michael@0: ATTRS(mImpl)[i].mValue.Reset(); michael@0: ATTRS(mImpl)[i].mValue.SwapValueWith(aValue); michael@0: michael@0: return NS_OK; michael@0: } michael@0: } michael@0: michael@0: NS_ENSURE_TRUE(i < ATTRCHILD_ARRAY_MAX_ATTR_COUNT, michael@0: NS_ERROR_FAILURE); michael@0: michael@0: if (i == slotCount && !AddAttrSlot()) { michael@0: return NS_ERROR_OUT_OF_MEMORY; michael@0: } michael@0: michael@0: new (&ATTRS(mImpl)[i].mName) nsAttrName(aLocalName); michael@0: new (&ATTRS(mImpl)[i].mValue) nsAttrValue(); michael@0: ATTRS(mImpl)[i].mValue.SwapValueWith(aValue); michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsresult michael@0: nsAttrAndChildArray::SetAndTakeAttr(nsINodeInfo* aName, nsAttrValue& aValue) michael@0: { michael@0: int32_t namespaceID = aName->NamespaceID(); michael@0: nsIAtom* localName = aName->NameAtom(); michael@0: if (namespaceID == kNameSpaceID_None) { michael@0: return SetAndTakeAttr(localName, aValue); michael@0: } michael@0: michael@0: uint32_t i, slotCount = AttrSlotCount(); michael@0: for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) { michael@0: if (ATTRS(mImpl)[i].mName.Equals(localName, namespaceID)) { michael@0: ATTRS(mImpl)[i].mName.SetTo(aName); michael@0: ATTRS(mImpl)[i].mValue.Reset(); michael@0: ATTRS(mImpl)[i].mValue.SwapValueWith(aValue); michael@0: michael@0: return NS_OK; michael@0: } michael@0: } michael@0: michael@0: NS_ENSURE_TRUE(i < ATTRCHILD_ARRAY_MAX_ATTR_COUNT, michael@0: NS_ERROR_FAILURE); michael@0: michael@0: if (i == slotCount && !AddAttrSlot()) { michael@0: return NS_ERROR_OUT_OF_MEMORY; michael@0: } michael@0: michael@0: new (&ATTRS(mImpl)[i].mName) nsAttrName(aName); michael@0: new (&ATTRS(mImpl)[i].mValue) nsAttrValue(); michael@0: ATTRS(mImpl)[i].mValue.SwapValueWith(aValue); michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: michael@0: nsresult michael@0: nsAttrAndChildArray::RemoveAttrAt(uint32_t aPos, nsAttrValue& aValue) michael@0: { michael@0: NS_ASSERTION(aPos < AttrCount(), "out-of-bounds"); michael@0: michael@0: uint32_t mapped = MappedAttrCount(); michael@0: if (aPos < mapped) { michael@0: if (mapped == 1) { michael@0: // We're removing the last mapped attribute. Can't swap in this michael@0: // case; have to copy. michael@0: aValue.SetTo(*mImpl->mMappedAttrs->AttrAt(0)); michael@0: NS_RELEASE(mImpl->mMappedAttrs); michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsRefPtr mapped = michael@0: GetModifiableMapped(nullptr, nullptr, false); michael@0: michael@0: mapped->RemoveAttrAt(aPos, aValue); michael@0: michael@0: return MakeMappedUnique(mapped); michael@0: } michael@0: michael@0: aPos -= mapped; michael@0: ATTRS(mImpl)[aPos].mValue.SwapValueWith(aValue); michael@0: ATTRS(mImpl)[aPos].~InternalAttr(); michael@0: michael@0: uint32_t slotCount = AttrSlotCount(); michael@0: memmove(&ATTRS(mImpl)[aPos], michael@0: &ATTRS(mImpl)[aPos + 1], michael@0: (slotCount - aPos - 1) * sizeof(InternalAttr)); michael@0: memset(&ATTRS(mImpl)[slotCount - 1], 0, sizeof(InternalAttr)); michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: const nsAttrName* michael@0: nsAttrAndChildArray::AttrNameAt(uint32_t aPos) const michael@0: { michael@0: NS_ASSERTION(aPos < AttrCount(), michael@0: "out-of-bounds access in nsAttrAndChildArray"); michael@0: michael@0: uint32_t mapped = MappedAttrCount(); michael@0: if (aPos < mapped) { michael@0: return mImpl->mMappedAttrs->NameAt(aPos); michael@0: } michael@0: michael@0: return &ATTRS(mImpl)[aPos - mapped].mName; michael@0: } michael@0: michael@0: const nsAttrName* michael@0: nsAttrAndChildArray::GetSafeAttrNameAt(uint32_t aPos) const michael@0: { michael@0: uint32_t mapped = MappedAttrCount(); michael@0: if (aPos < mapped) { michael@0: return mImpl->mMappedAttrs->NameAt(aPos); michael@0: } michael@0: michael@0: aPos -= mapped; michael@0: if (aPos >= AttrSlotCount()) { michael@0: return nullptr; michael@0: } michael@0: michael@0: void** pos = mImpl->mBuffer + aPos * ATTRSIZE; michael@0: if (!*pos) { michael@0: return nullptr; michael@0: } michael@0: michael@0: return &reinterpret_cast(pos)->mName; michael@0: } michael@0: michael@0: const nsAttrName* michael@0: nsAttrAndChildArray::GetExistingAttrNameFromQName(const nsAString& aName) const michael@0: { michael@0: uint32_t i, slotCount = AttrSlotCount(); michael@0: for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) { michael@0: if (ATTRS(mImpl)[i].mName.QualifiedNameEquals(aName)) { michael@0: return &ATTRS(mImpl)[i].mName; michael@0: } michael@0: } michael@0: michael@0: if (mImpl && mImpl->mMappedAttrs) { michael@0: return mImpl->mMappedAttrs->GetExistingAttrNameFromQName(aName); michael@0: } michael@0: michael@0: return nullptr; michael@0: } michael@0: michael@0: int32_t michael@0: nsAttrAndChildArray::IndexOfAttr(nsIAtom* aLocalName, int32_t aNamespaceID) const michael@0: { michael@0: int32_t idx; michael@0: if (mImpl && mImpl->mMappedAttrs && aNamespaceID == kNameSpaceID_None) { michael@0: idx = mImpl->mMappedAttrs->IndexOfAttr(aLocalName); michael@0: if (idx >= 0) { michael@0: return idx; michael@0: } michael@0: } michael@0: michael@0: uint32_t i; michael@0: uint32_t mapped = MappedAttrCount(); michael@0: uint32_t slotCount = AttrSlotCount(); michael@0: if (aNamespaceID == kNameSpaceID_None) { michael@0: // This should be the common case so lets make an optimized loop michael@0: for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) { michael@0: if (ATTRS(mImpl)[i].mName.Equals(aLocalName)) { michael@0: return i + mapped; michael@0: } michael@0: } michael@0: } michael@0: else { michael@0: for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) { michael@0: if (ATTRS(mImpl)[i].mName.Equals(aLocalName, aNamespaceID)) { michael@0: return i + mapped; michael@0: } michael@0: } michael@0: } michael@0: michael@0: return -1; michael@0: } michael@0: michael@0: nsresult michael@0: nsAttrAndChildArray::SetAndTakeMappedAttr(nsIAtom* aLocalName, michael@0: nsAttrValue& aValue, michael@0: nsMappedAttributeElement* aContent, michael@0: nsHTMLStyleSheet* aSheet) michael@0: { michael@0: bool willAdd = true; michael@0: if (mImpl && mImpl->mMappedAttrs) { michael@0: willAdd = !mImpl->mMappedAttrs->GetAttr(aLocalName); michael@0: } michael@0: michael@0: nsRefPtr mapped = michael@0: GetModifiableMapped(aContent, aSheet, willAdd); michael@0: michael@0: mapped->SetAndTakeAttr(aLocalName, aValue); michael@0: michael@0: return MakeMappedUnique(mapped); michael@0: } michael@0: michael@0: nsresult michael@0: nsAttrAndChildArray::DoSetMappedAttrStyleSheet(nsHTMLStyleSheet* aSheet) michael@0: { michael@0: NS_PRECONDITION(mImpl && mImpl->mMappedAttrs, michael@0: "Should have mapped attrs here!"); michael@0: if (aSheet == mImpl->mMappedAttrs->GetStyleSheet()) { michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsRefPtr mapped = michael@0: GetModifiableMapped(nullptr, nullptr, false); michael@0: michael@0: mapped->SetStyleSheet(aSheet); michael@0: michael@0: return MakeMappedUnique(mapped); michael@0: } michael@0: michael@0: void michael@0: nsAttrAndChildArray::WalkMappedAttributeStyleRules(nsRuleWalker* aRuleWalker) michael@0: { michael@0: if (mImpl && mImpl->mMappedAttrs) { michael@0: aRuleWalker->Forward(mImpl->mMappedAttrs); michael@0: } michael@0: } michael@0: michael@0: void michael@0: nsAttrAndChildArray::Compact() michael@0: { michael@0: if (!mImpl) { michael@0: return; michael@0: } michael@0: michael@0: // First compress away empty attrslots michael@0: uint32_t slotCount = AttrSlotCount(); michael@0: uint32_t attrCount = NonMappedAttrCount(); michael@0: uint32_t childCount = ChildCount(); michael@0: michael@0: if (attrCount < slotCount) { michael@0: memmove(mImpl->mBuffer + attrCount * ATTRSIZE, michael@0: mImpl->mBuffer + slotCount * ATTRSIZE, michael@0: childCount * sizeof(nsIContent*)); michael@0: SetAttrSlotCount(attrCount); michael@0: } michael@0: michael@0: // Then resize or free buffer michael@0: uint32_t newSize = attrCount * ATTRSIZE + childCount; michael@0: if (!newSize && !mImpl->mMappedAttrs) { michael@0: moz_free(mImpl); michael@0: mImpl = nullptr; michael@0: } michael@0: else if (newSize < mImpl->mBufferSize) { michael@0: mImpl = static_cast(moz_realloc(mImpl, (newSize + NS_IMPL_EXTRA_SIZE) * sizeof(nsIContent*))); michael@0: NS_ASSERTION(mImpl, "failed to reallocate to smaller buffer"); michael@0: michael@0: mImpl->mBufferSize = newSize; michael@0: } michael@0: } michael@0: michael@0: void michael@0: nsAttrAndChildArray::Clear() michael@0: { michael@0: if (!mImpl) { michael@0: return; michael@0: } michael@0: michael@0: if (mImpl->mMappedAttrs) { michael@0: NS_RELEASE(mImpl->mMappedAttrs); michael@0: } michael@0: michael@0: uint32_t i, slotCount = AttrSlotCount(); michael@0: for (i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) { michael@0: ATTRS(mImpl)[i].~InternalAttr(); michael@0: } michael@0: michael@0: nsAutoScriptBlocker scriptBlocker; michael@0: uint32_t end = slotCount * ATTRSIZE + ChildCount(); michael@0: for (i = slotCount * ATTRSIZE; i < end; ++i) { michael@0: nsIContent* child = static_cast(mImpl->mBuffer[i]); michael@0: // making this false so tree teardown doesn't end up being michael@0: // O(N*D) (number of nodes times average depth of tree). michael@0: child->UnbindFromTree(false); // XXX is it better to let the owner do this? michael@0: // Make sure to unlink our kids from each other, since someone michael@0: // else could stil be holding references to some of them. michael@0: michael@0: // XXXbz We probably can't push this assignment down into the |aNullParent| michael@0: // case of UnbindFromTree because we still need the assignment in michael@0: // RemoveChildAt. In particular, ContentRemoved fires between michael@0: // RemoveChildAt and UnbindFromTree, and in ContentRemoved the sibling michael@0: // chain needs to be correct. Though maybe we could set the prev and next michael@0: // to point to each other but keep the kid being removed pointing to them michael@0: // through ContentRemoved so consumers can find where it used to be in the michael@0: // list? michael@0: child->mPreviousSibling = child->mNextSibling = nullptr; michael@0: NS_RELEASE(child); michael@0: } michael@0: michael@0: SetAttrSlotAndChildCount(0, 0); michael@0: } michael@0: michael@0: uint32_t michael@0: nsAttrAndChildArray::NonMappedAttrCount() const michael@0: { michael@0: if (!mImpl) { michael@0: return 0; michael@0: } michael@0: michael@0: uint32_t count = AttrSlotCount(); michael@0: while (count > 0 && !mImpl->mBuffer[(count - 1) * ATTRSIZE]) { michael@0: --count; michael@0: } michael@0: michael@0: return count; michael@0: } michael@0: michael@0: uint32_t michael@0: nsAttrAndChildArray::MappedAttrCount() const michael@0: { michael@0: return mImpl && mImpl->mMappedAttrs ? (uint32_t)mImpl->mMappedAttrs->Count() : 0; michael@0: } michael@0: michael@0: nsMappedAttributes* michael@0: nsAttrAndChildArray::GetModifiableMapped(nsMappedAttributeElement* aContent, michael@0: nsHTMLStyleSheet* aSheet, michael@0: bool aWillAddAttr) michael@0: { michael@0: if (mImpl && mImpl->mMappedAttrs) { michael@0: return mImpl->mMappedAttrs->Clone(aWillAddAttr); michael@0: } michael@0: michael@0: MOZ_ASSERT(aContent, "Trying to create modifiable without content"); michael@0: michael@0: nsMapRuleToAttributesFunc mapRuleFunc = michael@0: aContent->GetAttributeMappingFunction(); michael@0: return new nsMappedAttributes(aSheet, mapRuleFunc); michael@0: } michael@0: michael@0: nsresult michael@0: nsAttrAndChildArray::MakeMappedUnique(nsMappedAttributes* aAttributes) michael@0: { michael@0: NS_ASSERTION(aAttributes, "missing attributes"); michael@0: michael@0: if (!mImpl && !GrowBy(1)) { michael@0: return NS_ERROR_OUT_OF_MEMORY; michael@0: } michael@0: michael@0: if (!aAttributes->GetStyleSheet()) { michael@0: // This doesn't currently happen, but it could if we do loading right michael@0: michael@0: nsRefPtr mapped(aAttributes); michael@0: mapped.swap(mImpl->mMappedAttrs); michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: nsRefPtr mapped = michael@0: aAttributes->GetStyleSheet()->UniqueMappedAttributes(aAttributes); michael@0: NS_ENSURE_TRUE(mapped, NS_ERROR_OUT_OF_MEMORY); michael@0: michael@0: if (mapped != aAttributes) { michael@0: // Reset the stylesheet of aAttributes so that it doesn't spend time michael@0: // trying to remove itself from the hash. There is no risk that aAttributes michael@0: // is in the hash since it will always have come from GetModifiableMapped, michael@0: // which never returns maps that are in the hash (such hashes are by michael@0: // nature not modifiable). michael@0: aAttributes->DropStyleSheetReference(); michael@0: } michael@0: mapped.swap(mImpl->mMappedAttrs); michael@0: michael@0: return NS_OK; michael@0: } michael@0: michael@0: michael@0: bool michael@0: nsAttrAndChildArray::GrowBy(uint32_t aGrowSize) michael@0: { michael@0: uint32_t size = mImpl ? mImpl->mBufferSize + NS_IMPL_EXTRA_SIZE : 0; michael@0: uint32_t minSize = size + aGrowSize; michael@0: michael@0: if (minSize <= ATTRCHILD_ARRAY_LINEAR_THRESHOLD) { michael@0: do { michael@0: size += ATTRCHILD_ARRAY_GROWSIZE; michael@0: } while (size < minSize); michael@0: } michael@0: else { michael@0: size = 1u << mozilla::CeilingLog2(minSize); michael@0: } michael@0: michael@0: bool needToInitialize = !mImpl; michael@0: Impl* newImpl = static_cast(moz_realloc(mImpl, size * sizeof(void*))); michael@0: NS_ENSURE_TRUE(newImpl, false); michael@0: michael@0: mImpl = newImpl; michael@0: michael@0: // Set initial counts if we didn't have a buffer before michael@0: if (needToInitialize) { michael@0: mImpl->mMappedAttrs = nullptr; michael@0: SetAttrSlotAndChildCount(0, 0); michael@0: } michael@0: michael@0: mImpl->mBufferSize = size - NS_IMPL_EXTRA_SIZE; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: nsAttrAndChildArray::AddAttrSlot() michael@0: { michael@0: uint32_t slotCount = AttrSlotCount(); michael@0: uint32_t childCount = ChildCount(); michael@0: michael@0: // Grow buffer if needed michael@0: if (!(mImpl && mImpl->mBufferSize >= (slotCount + 1) * ATTRSIZE + childCount) && michael@0: !GrowBy(ATTRSIZE)) { michael@0: return false; michael@0: } michael@0: void** offset = mImpl->mBuffer + slotCount * ATTRSIZE; michael@0: michael@0: if (childCount > 0) { michael@0: memmove(&ATTRS(mImpl)[slotCount + 1], &ATTRS(mImpl)[slotCount], michael@0: childCount * sizeof(nsIContent*)); michael@0: } michael@0: michael@0: SetAttrSlotCount(slotCount + 1); michael@0: offset[0] = nullptr; michael@0: offset[1] = nullptr; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: inline void michael@0: nsAttrAndChildArray::SetChildAtPos(void** aPos, nsIContent* aChild, michael@0: uint32_t aIndex, uint32_t aChildCount) michael@0: { michael@0: NS_PRECONDITION(!aChild->GetNextSibling(), "aChild with next sibling?"); michael@0: NS_PRECONDITION(!aChild->GetPreviousSibling(), "aChild with prev sibling?"); michael@0: michael@0: *aPos = aChild; michael@0: NS_ADDREF(aChild); michael@0: if (aIndex != 0) { michael@0: nsIContent* previous = static_cast(*(aPos - 1)); michael@0: aChild->mPreviousSibling = previous; michael@0: previous->mNextSibling = aChild; michael@0: } michael@0: if (aIndex != aChildCount) { michael@0: nsIContent* next = static_cast(*(aPos + 1)); michael@0: aChild->mNextSibling = next; michael@0: next->mPreviousSibling = aChild; michael@0: } michael@0: } michael@0: michael@0: size_t michael@0: nsAttrAndChildArray::SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const michael@0: { michael@0: size_t n = 0; michael@0: if (mImpl) { michael@0: // Don't add the size taken by *mMappedAttrs because it's shared. michael@0: michael@0: n += aMallocSizeOf(mImpl); michael@0: michael@0: uint32_t slotCount = AttrSlotCount(); michael@0: for (uint32_t i = 0; i < slotCount && AttrSlotIsTaken(i); ++i) { michael@0: nsAttrValue* value = &ATTRS(mImpl)[i].mValue; michael@0: n += value->SizeOfExcludingThis(aMallocSizeOf); michael@0: } michael@0: } michael@0: michael@0: return n; michael@0: } michael@0: