Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ |
michael@0 | 2 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 3 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 4 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 5 | |
michael@0 | 6 | #include "nsString.h" |
michael@0 | 7 | #include "nsTreeRows.h" |
michael@0 | 8 | #include <algorithm> |
michael@0 | 9 | |
michael@0 | 10 | nsTreeRows::Subtree* |
michael@0 | 11 | nsTreeRows::EnsureSubtreeFor(Subtree* aParent, |
michael@0 | 12 | int32_t aChildIndex) |
michael@0 | 13 | { |
michael@0 | 14 | Subtree* subtree = GetSubtreeFor(aParent, aChildIndex); |
michael@0 | 15 | |
michael@0 | 16 | if (! subtree) { |
michael@0 | 17 | subtree = aParent->mRows[aChildIndex].mSubtree = new Subtree(aParent); |
michael@0 | 18 | InvalidateCachedRow(); |
michael@0 | 19 | } |
michael@0 | 20 | |
michael@0 | 21 | return subtree; |
michael@0 | 22 | } |
michael@0 | 23 | |
michael@0 | 24 | nsTreeRows::Subtree* |
michael@0 | 25 | nsTreeRows::GetSubtreeFor(const Subtree* aParent, |
michael@0 | 26 | int32_t aChildIndex, |
michael@0 | 27 | int32_t* aSubtreeSize) |
michael@0 | 28 | { |
michael@0 | 29 | NS_PRECONDITION(aParent, "no parent"); |
michael@0 | 30 | NS_PRECONDITION(aChildIndex >= 0, "bad child index"); |
michael@0 | 31 | |
michael@0 | 32 | Subtree* result = nullptr; |
michael@0 | 33 | |
michael@0 | 34 | if (aChildIndex < aParent->mCount) |
michael@0 | 35 | result = aParent->mRows[aChildIndex].mSubtree; |
michael@0 | 36 | |
michael@0 | 37 | if (aSubtreeSize) |
michael@0 | 38 | *aSubtreeSize = result ? result->mSubtreeSize : 0; |
michael@0 | 39 | |
michael@0 | 40 | return result; |
michael@0 | 41 | } |
michael@0 | 42 | |
michael@0 | 43 | void |
michael@0 | 44 | nsTreeRows::RemoveSubtreeFor(Subtree* aParent, int32_t aChildIndex) |
michael@0 | 45 | { |
michael@0 | 46 | NS_PRECONDITION(aParent, "no parent"); |
michael@0 | 47 | NS_PRECONDITION(aChildIndex >= 0 && aChildIndex < aParent->mCount, "bad child index"); |
michael@0 | 48 | |
michael@0 | 49 | Row& row = aParent->mRows[aChildIndex]; |
michael@0 | 50 | |
michael@0 | 51 | if (row.mSubtree) { |
michael@0 | 52 | int32_t subtreeSize = row.mSubtree->GetSubtreeSize(); |
michael@0 | 53 | |
michael@0 | 54 | delete row.mSubtree; |
michael@0 | 55 | row.mSubtree = nullptr; |
michael@0 | 56 | |
michael@0 | 57 | for (Subtree* subtree = aParent; subtree != nullptr; subtree = subtree->mParent) |
michael@0 | 58 | subtree->mSubtreeSize -= subtreeSize; |
michael@0 | 59 | } |
michael@0 | 60 | |
michael@0 | 61 | InvalidateCachedRow(); |
michael@0 | 62 | } |
michael@0 | 63 | |
michael@0 | 64 | nsTreeRows::iterator |
michael@0 | 65 | nsTreeRows::First() |
michael@0 | 66 | { |
michael@0 | 67 | iterator result; |
michael@0 | 68 | result.Append(&mRoot, 0); |
michael@0 | 69 | result.SetRowIndex(0); |
michael@0 | 70 | return result; |
michael@0 | 71 | } |
michael@0 | 72 | |
michael@0 | 73 | nsTreeRows::iterator |
michael@0 | 74 | nsTreeRows::Last() |
michael@0 | 75 | { |
michael@0 | 76 | iterator result; |
michael@0 | 77 | |
michael@0 | 78 | // Build up a path along the rightmost edge of the tree |
michael@0 | 79 | Subtree* current = &mRoot; |
michael@0 | 80 | int32_t count = current->Count(); |
michael@0 | 81 | do { |
michael@0 | 82 | int32_t last = count - 1; |
michael@0 | 83 | result.Append(current, last); |
michael@0 | 84 | current = count ? GetSubtreeFor(current, last) : nullptr; |
michael@0 | 85 | } while (current && ((count = current->Count()) != 0)); |
michael@0 | 86 | |
michael@0 | 87 | // Now, at the bottom rightmost leaf, advance us one off the end. |
michael@0 | 88 | result.GetTop().mChildIndex++; |
michael@0 | 89 | |
michael@0 | 90 | // Our row index will be the size of the root subree, plus one. |
michael@0 | 91 | result.SetRowIndex(mRoot.GetSubtreeSize() + 1); |
michael@0 | 92 | |
michael@0 | 93 | return result; |
michael@0 | 94 | } |
michael@0 | 95 | |
michael@0 | 96 | nsTreeRows::iterator |
michael@0 | 97 | nsTreeRows::operator[](int32_t aRow) |
michael@0 | 98 | { |
michael@0 | 99 | // See if we're just lucky, and end up with something |
michael@0 | 100 | // nearby. (This tends to happen a lot due to the way that we get |
michael@0 | 101 | // asked for rows n' stuff.) |
michael@0 | 102 | int32_t last = mLastRow.GetRowIndex(); |
michael@0 | 103 | if (last != -1) { |
michael@0 | 104 | if (aRow == last) |
michael@0 | 105 | return mLastRow; |
michael@0 | 106 | else if (last + 1 == aRow) |
michael@0 | 107 | return ++mLastRow; |
michael@0 | 108 | else if (last - 1 == aRow) |
michael@0 | 109 | return --mLastRow; |
michael@0 | 110 | } |
michael@0 | 111 | |
michael@0 | 112 | // Nope. Construct a path to the specified index. This is a little |
michael@0 | 113 | // bit better than O(n), because we can skip over subtrees. (So it |
michael@0 | 114 | // ends up being approximately linear in the subtree size, instead |
michael@0 | 115 | // of the entire view size. But, most of the time, big views are |
michael@0 | 116 | // flat. Oh well.) |
michael@0 | 117 | iterator result; |
michael@0 | 118 | Subtree* current = &mRoot; |
michael@0 | 119 | |
michael@0 | 120 | int32_t index = 0; |
michael@0 | 121 | result.SetRowIndex(aRow); |
michael@0 | 122 | |
michael@0 | 123 | do { |
michael@0 | 124 | int32_t subtreeSize; |
michael@0 | 125 | Subtree* subtree = GetSubtreeFor(current, index, &subtreeSize); |
michael@0 | 126 | |
michael@0 | 127 | if (subtreeSize >= aRow) { |
michael@0 | 128 | result.Append(current, index); |
michael@0 | 129 | current = subtree; |
michael@0 | 130 | index = 0; |
michael@0 | 131 | --aRow; |
michael@0 | 132 | } |
michael@0 | 133 | else { |
michael@0 | 134 | ++index; |
michael@0 | 135 | aRow -= subtreeSize + 1; |
michael@0 | 136 | } |
michael@0 | 137 | } while (aRow >= 0); |
michael@0 | 138 | |
michael@0 | 139 | mLastRow = result; |
michael@0 | 140 | return result; |
michael@0 | 141 | } |
michael@0 | 142 | |
michael@0 | 143 | nsTreeRows::iterator |
michael@0 | 144 | nsTreeRows::FindByResource(nsIRDFResource* aResource) |
michael@0 | 145 | { |
michael@0 | 146 | // XXX Mmm, scan through the rows one-by-one... |
michael@0 | 147 | iterator last = Last(); |
michael@0 | 148 | iterator iter; |
michael@0 | 149 | |
michael@0 | 150 | nsresult rv; |
michael@0 | 151 | nsAutoString resourceid; |
michael@0 | 152 | bool stringmode = false; |
michael@0 | 153 | |
michael@0 | 154 | for (iter = First(); iter != last; ++iter) { |
michael@0 | 155 | if (!stringmode) { |
michael@0 | 156 | nsCOMPtr<nsIRDFResource> findres; |
michael@0 | 157 | rv = iter->mMatch->mResult->GetResource(getter_AddRefs(findres)); |
michael@0 | 158 | if (NS_FAILED(rv)) return last; |
michael@0 | 159 | |
michael@0 | 160 | if (findres == aResource) |
michael@0 | 161 | break; |
michael@0 | 162 | |
michael@0 | 163 | if (! findres) { |
michael@0 | 164 | const char *uri; |
michael@0 | 165 | aResource->GetValueConst(&uri); |
michael@0 | 166 | CopyUTF8toUTF16(uri, resourceid); |
michael@0 | 167 | |
michael@0 | 168 | // set stringmode and fall through |
michael@0 | 169 | stringmode = true; |
michael@0 | 170 | } |
michael@0 | 171 | } |
michael@0 | 172 | |
michael@0 | 173 | // additional check because previous block could change stringmode |
michael@0 | 174 | if (stringmode) { |
michael@0 | 175 | nsAutoString findid; |
michael@0 | 176 | rv = iter->mMatch->mResult->GetId(findid); |
michael@0 | 177 | if (NS_FAILED(rv)) return last; |
michael@0 | 178 | |
michael@0 | 179 | if (resourceid.Equals(findid)) |
michael@0 | 180 | break; |
michael@0 | 181 | } |
michael@0 | 182 | } |
michael@0 | 183 | |
michael@0 | 184 | return iter; |
michael@0 | 185 | } |
michael@0 | 186 | |
michael@0 | 187 | nsTreeRows::iterator |
michael@0 | 188 | nsTreeRows::Find(nsIXULTemplateResult *aResult) |
michael@0 | 189 | { |
michael@0 | 190 | // XXX Mmm, scan through the rows one-by-one... |
michael@0 | 191 | iterator last = Last(); |
michael@0 | 192 | iterator iter; |
michael@0 | 193 | |
michael@0 | 194 | for (iter = First(); iter != last; ++iter) { |
michael@0 | 195 | if (aResult == iter->mMatch->mResult) |
michael@0 | 196 | break; |
michael@0 | 197 | } |
michael@0 | 198 | |
michael@0 | 199 | return iter; |
michael@0 | 200 | } |
michael@0 | 201 | |
michael@0 | 202 | void |
michael@0 | 203 | nsTreeRows::Clear() |
michael@0 | 204 | { |
michael@0 | 205 | mRoot.Clear(); |
michael@0 | 206 | InvalidateCachedRow(); |
michael@0 | 207 | } |
michael@0 | 208 | |
michael@0 | 209 | //---------------------------------------------------------------------- |
michael@0 | 210 | // |
michael@0 | 211 | // nsTreeRows::Subtree |
michael@0 | 212 | // |
michael@0 | 213 | |
michael@0 | 214 | nsTreeRows::Subtree::~Subtree() |
michael@0 | 215 | { |
michael@0 | 216 | Clear(); |
michael@0 | 217 | } |
michael@0 | 218 | |
michael@0 | 219 | void |
michael@0 | 220 | nsTreeRows::Subtree::Clear() |
michael@0 | 221 | { |
michael@0 | 222 | for (int32_t i = mCount - 1; i >= 0; --i) |
michael@0 | 223 | delete mRows[i].mSubtree; |
michael@0 | 224 | |
michael@0 | 225 | delete[] mRows; |
michael@0 | 226 | |
michael@0 | 227 | mRows = nullptr; |
michael@0 | 228 | mCount = mCapacity = mSubtreeSize = 0; |
michael@0 | 229 | } |
michael@0 | 230 | |
michael@0 | 231 | nsTreeRows::iterator |
michael@0 | 232 | nsTreeRows::Subtree::InsertRowAt(nsTemplateMatch* aMatch, int32_t aIndex) |
michael@0 | 233 | { |
michael@0 | 234 | if (mCount >= mCapacity || aIndex >= mCapacity) { |
michael@0 | 235 | int32_t newCapacity = std::max(mCapacity * 2, aIndex + 1); |
michael@0 | 236 | Row* newRows = new Row[newCapacity]; |
michael@0 | 237 | if (! newRows) |
michael@0 | 238 | return iterator(); |
michael@0 | 239 | |
michael@0 | 240 | for (int32_t i = mCount - 1; i >= 0; --i) |
michael@0 | 241 | newRows[i] = mRows[i]; |
michael@0 | 242 | |
michael@0 | 243 | delete[] mRows; |
michael@0 | 244 | |
michael@0 | 245 | mRows = newRows; |
michael@0 | 246 | mCapacity = newCapacity; |
michael@0 | 247 | } |
michael@0 | 248 | |
michael@0 | 249 | for (int32_t i = mCount - 1; i >= aIndex; --i) |
michael@0 | 250 | mRows[i + 1] = mRows[i]; |
michael@0 | 251 | |
michael@0 | 252 | mRows[aIndex].mMatch = aMatch; |
michael@0 | 253 | mRows[aIndex].mContainerType = eContainerType_Unknown; |
michael@0 | 254 | mRows[aIndex].mContainerState = eContainerState_Unknown; |
michael@0 | 255 | mRows[aIndex].mContainerFill = eContainerFill_Unknown; |
michael@0 | 256 | mRows[aIndex].mSubtree = nullptr; |
michael@0 | 257 | ++mCount; |
michael@0 | 258 | |
michael@0 | 259 | // Now build an iterator that points to the newly inserted element. |
michael@0 | 260 | int32_t rowIndex = 0; |
michael@0 | 261 | iterator result; |
michael@0 | 262 | result.Push(this, aIndex); |
michael@0 | 263 | |
michael@0 | 264 | for ( ; --aIndex >= 0; ++rowIndex) { |
michael@0 | 265 | // Account for open subtrees in the absolute row index. |
michael@0 | 266 | const Subtree *subtree = mRows[aIndex].mSubtree; |
michael@0 | 267 | if (subtree) |
michael@0 | 268 | rowIndex += subtree->mSubtreeSize; |
michael@0 | 269 | } |
michael@0 | 270 | |
michael@0 | 271 | Subtree *subtree = this; |
michael@0 | 272 | do { |
michael@0 | 273 | // Note that the subtree's size has expanded. |
michael@0 | 274 | ++subtree->mSubtreeSize; |
michael@0 | 275 | |
michael@0 | 276 | Subtree *parent = subtree->mParent; |
michael@0 | 277 | if (! parent) |
michael@0 | 278 | break; |
michael@0 | 279 | |
michael@0 | 280 | // Account for open subtrees in the absolute row index. |
michael@0 | 281 | int32_t count = parent->Count(); |
michael@0 | 282 | for (aIndex = 0; aIndex < count; ++aIndex, ++rowIndex) { |
michael@0 | 283 | const Subtree *child = (*parent)[aIndex].mSubtree; |
michael@0 | 284 | if (subtree == child) |
michael@0 | 285 | break; |
michael@0 | 286 | |
michael@0 | 287 | if (child) |
michael@0 | 288 | rowIndex += child->mSubtreeSize; |
michael@0 | 289 | } |
michael@0 | 290 | |
michael@0 | 291 | NS_ASSERTION(aIndex < count, "couldn't find subtree in parent"); |
michael@0 | 292 | |
michael@0 | 293 | result.Push(parent, aIndex); |
michael@0 | 294 | subtree = parent; |
michael@0 | 295 | ++rowIndex; // One for the parent row. |
michael@0 | 296 | } while (1); |
michael@0 | 297 | |
michael@0 | 298 | result.SetRowIndex(rowIndex); |
michael@0 | 299 | return result; |
michael@0 | 300 | } |
michael@0 | 301 | |
michael@0 | 302 | void |
michael@0 | 303 | nsTreeRows::Subtree::RemoveRowAt(int32_t aIndex) |
michael@0 | 304 | { |
michael@0 | 305 | NS_PRECONDITION(aIndex >= 0 && aIndex < Count(), "bad index"); |
michael@0 | 306 | if (aIndex < 0 || aIndex >= Count()) |
michael@0 | 307 | return; |
michael@0 | 308 | |
michael@0 | 309 | // How big is the subtree we're going to be removing? |
michael@0 | 310 | int32_t subtreeSize = mRows[aIndex].mSubtree |
michael@0 | 311 | ? mRows[aIndex].mSubtree->GetSubtreeSize() |
michael@0 | 312 | : 0; |
michael@0 | 313 | |
michael@0 | 314 | ++subtreeSize; |
michael@0 | 315 | |
michael@0 | 316 | delete mRows[aIndex].mSubtree; |
michael@0 | 317 | |
michael@0 | 318 | for (int32_t i = aIndex + 1; i < mCount; ++i) |
michael@0 | 319 | mRows[i - 1] = mRows[i]; |
michael@0 | 320 | |
michael@0 | 321 | --mCount; |
michael@0 | 322 | |
michael@0 | 323 | for (Subtree* subtree = this; subtree != nullptr; subtree = subtree->mParent) |
michael@0 | 324 | subtree->mSubtreeSize -= subtreeSize; |
michael@0 | 325 | } |
michael@0 | 326 | |
michael@0 | 327 | //---------------------------------------------------------------------- |
michael@0 | 328 | // |
michael@0 | 329 | // nsTreeRows::iterator |
michael@0 | 330 | // |
michael@0 | 331 | |
michael@0 | 332 | nsTreeRows::iterator::iterator(const iterator& aIterator) |
michael@0 | 333 | : mRowIndex(aIterator.mRowIndex), |
michael@0 | 334 | mLink(aIterator.mLink) |
michael@0 | 335 | { |
michael@0 | 336 | } |
michael@0 | 337 | |
michael@0 | 338 | nsTreeRows::iterator& |
michael@0 | 339 | nsTreeRows::iterator::operator=(const iterator& aIterator) |
michael@0 | 340 | { |
michael@0 | 341 | mRowIndex = aIterator.mRowIndex; |
michael@0 | 342 | mLink = aIterator.mLink; |
michael@0 | 343 | return *this; |
michael@0 | 344 | } |
michael@0 | 345 | |
michael@0 | 346 | void |
michael@0 | 347 | nsTreeRows::iterator::Append(Subtree* aParent, int32_t aChildIndex) |
michael@0 | 348 | { |
michael@0 | 349 | Link *link = mLink.AppendElement(); |
michael@0 | 350 | if (link) { |
michael@0 | 351 | link->mParent = aParent; |
michael@0 | 352 | link->mChildIndex = aChildIndex; |
michael@0 | 353 | } |
michael@0 | 354 | else |
michael@0 | 355 | NS_ERROR("out of memory"); |
michael@0 | 356 | } |
michael@0 | 357 | |
michael@0 | 358 | void |
michael@0 | 359 | nsTreeRows::iterator::Push(Subtree *aParent, int32_t aChildIndex) |
michael@0 | 360 | { |
michael@0 | 361 | Link *link = mLink.InsertElementAt(0); |
michael@0 | 362 | if (link) { |
michael@0 | 363 | link->mParent = aParent; |
michael@0 | 364 | link->mChildIndex = aChildIndex; |
michael@0 | 365 | } |
michael@0 | 366 | else |
michael@0 | 367 | NS_ERROR("out of memory"); |
michael@0 | 368 | } |
michael@0 | 369 | |
michael@0 | 370 | bool |
michael@0 | 371 | nsTreeRows::iterator::operator==(const iterator& aIterator) const |
michael@0 | 372 | { |
michael@0 | 373 | if (GetDepth() != aIterator.GetDepth()) |
michael@0 | 374 | return false; |
michael@0 | 375 | |
michael@0 | 376 | if (GetDepth() == 0) |
michael@0 | 377 | return true; |
michael@0 | 378 | |
michael@0 | 379 | return GetTop() == aIterator.GetTop(); |
michael@0 | 380 | } |
michael@0 | 381 | |
michael@0 | 382 | void |
michael@0 | 383 | nsTreeRows::iterator::Next() |
michael@0 | 384 | { |
michael@0 | 385 | NS_PRECONDITION(GetDepth() > 0, "cannot increment an uninitialized iterator"); |
michael@0 | 386 | |
michael@0 | 387 | // Increment the absolute row index |
michael@0 | 388 | ++mRowIndex; |
michael@0 | 389 | |
michael@0 | 390 | Link& top = GetTop(); |
michael@0 | 391 | |
michael@0 | 392 | // Is there a child subtree? If so, descend into the child |
michael@0 | 393 | // subtree. |
michael@0 | 394 | Subtree* subtree = top.GetRow().mSubtree; |
michael@0 | 395 | |
michael@0 | 396 | if (subtree && subtree->Count()) { |
michael@0 | 397 | Append(subtree, 0); |
michael@0 | 398 | return; |
michael@0 | 399 | } |
michael@0 | 400 | |
michael@0 | 401 | // Have we exhausted the current subtree? |
michael@0 | 402 | if (top.mChildIndex >= top.mParent->Count() - 1) { |
michael@0 | 403 | // Yep. See if we've just iterated path the last element in |
michael@0 | 404 | // the tree, period. Walk back up the stack, looking for any |
michael@0 | 405 | // unfinished subtrees. |
michael@0 | 406 | int32_t unfinished; |
michael@0 | 407 | for (unfinished = GetDepth() - 2; unfinished >= 0; --unfinished) { |
michael@0 | 408 | const Link& link = mLink[unfinished]; |
michael@0 | 409 | if (link.mChildIndex < link.mParent->Count() - 1) |
michael@0 | 410 | break; |
michael@0 | 411 | } |
michael@0 | 412 | |
michael@0 | 413 | // If there are no unfinished subtrees in the stack, then this |
michael@0 | 414 | // iterator is exhausted. Leave it in the same state that |
michael@0 | 415 | // Last() does. |
michael@0 | 416 | if (unfinished < 0) { |
michael@0 | 417 | top.mChildIndex++; |
michael@0 | 418 | return; |
michael@0 | 419 | } |
michael@0 | 420 | |
michael@0 | 421 | // Otherwise, we ran off the end of one of the inner |
michael@0 | 422 | // subtrees. Pop up to the next unfinished level in the stack. |
michael@0 | 423 | mLink.SetLength(unfinished + 1); |
michael@0 | 424 | } |
michael@0 | 425 | |
michael@0 | 426 | // Advance to the next child in this subtree |
michael@0 | 427 | ++(GetTop().mChildIndex); |
michael@0 | 428 | } |
michael@0 | 429 | |
michael@0 | 430 | void |
michael@0 | 431 | nsTreeRows::iterator::Prev() |
michael@0 | 432 | { |
michael@0 | 433 | NS_PRECONDITION(GetDepth() > 0, "cannot increment an uninitialized iterator"); |
michael@0 | 434 | |
michael@0 | 435 | // Decrement the absolute row index |
michael@0 | 436 | --mRowIndex; |
michael@0 | 437 | |
michael@0 | 438 | // Move to the previous child in this subtree |
michael@0 | 439 | --(GetTop().mChildIndex); |
michael@0 | 440 | |
michael@0 | 441 | // Have we exhausted the current subtree? |
michael@0 | 442 | if (GetTop().mChildIndex < 0) { |
michael@0 | 443 | // Yep. See if we've just iterated back to the first element |
michael@0 | 444 | // in the tree, period. Walk back up the stack, looking for |
michael@0 | 445 | // any unfinished subtrees. |
michael@0 | 446 | int32_t unfinished; |
michael@0 | 447 | for (unfinished = GetDepth() - 2; unfinished >= 0; --unfinished) { |
michael@0 | 448 | const Link& link = mLink[unfinished]; |
michael@0 | 449 | if (link.mChildIndex >= 0) |
michael@0 | 450 | break; |
michael@0 | 451 | } |
michael@0 | 452 | |
michael@0 | 453 | // If there are no unfinished subtrees in the stack, then this |
michael@0 | 454 | // iterator is exhausted. Leave it in the same state that |
michael@0 | 455 | // First() does. |
michael@0 | 456 | if (unfinished < 0) |
michael@0 | 457 | return; |
michael@0 | 458 | |
michael@0 | 459 | // Otherwise, we ran off the end of one of the inner |
michael@0 | 460 | // subtrees. Pop up to the next unfinished level in the stack. |
michael@0 | 461 | mLink.SetLength(unfinished + 1); |
michael@0 | 462 | return; |
michael@0 | 463 | } |
michael@0 | 464 | |
michael@0 | 465 | // Is there a child subtree immediately prior to our current |
michael@0 | 466 | // position? If so, descend into it, grovelling down to the |
michael@0 | 467 | // deepest, rightmost left edge. |
michael@0 | 468 | Subtree* parent = GetTop().GetParent(); |
michael@0 | 469 | int32_t index = GetTop().GetChildIndex(); |
michael@0 | 470 | |
michael@0 | 471 | Subtree* subtree = (*parent)[index].mSubtree; |
michael@0 | 472 | |
michael@0 | 473 | if (subtree && subtree->Count()) { |
michael@0 | 474 | do { |
michael@0 | 475 | index = subtree->Count() - 1; |
michael@0 | 476 | Append(subtree, index); |
michael@0 | 477 | |
michael@0 | 478 | parent = subtree; |
michael@0 | 479 | subtree = (*parent)[index].mSubtree; |
michael@0 | 480 | } while (subtree && subtree->Count()); |
michael@0 | 481 | } |
michael@0 | 482 | } |