michael@0: // Copyright (c) 2009, Google Inc. michael@0: // All rights reserved. michael@0: // michael@0: // Redistribution and use in source and binary forms, with or without michael@0: // modification, are permitted provided that the following conditions are michael@0: // met: michael@0: // michael@0: // * Redistributions of source code must retain the above copyright michael@0: // notice, this list of conditions and the following disclaimer. michael@0: // * Redistributions in binary form must reproduce the above michael@0: // copyright notice, this list of conditions and the following disclaimer michael@0: // in the documentation and/or other materials provided with the michael@0: // distribution. michael@0: // * Neither the name of Google Inc. nor the names of its michael@0: // contributors may be used to endorse or promote products derived from michael@0: // this software without specific prior written permission. michael@0: // michael@0: // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS michael@0: // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT michael@0: // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR michael@0: // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT michael@0: // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, michael@0: // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT michael@0: // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, michael@0: // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY michael@0: // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT michael@0: // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE michael@0: // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. michael@0: michael@0: #ifndef GOOGLE_BREAKPAD_COMMON_MEMORY_H_ michael@0: #define GOOGLE_BREAKPAD_COMMON_MEMORY_H_ michael@0: michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: michael@0: #ifdef __APPLE__ michael@0: #define sys_mmap mmap michael@0: #define sys_mmap2 mmap michael@0: #define sys_munmap munmap michael@0: #define MAP_ANONYMOUS MAP_ANON michael@0: #else michael@0: #include "third_party/lss/linux_syscall_support.h" michael@0: #endif michael@0: michael@0: namespace google_breakpad { michael@0: michael@0: // This is very simple allocator which fetches pages from the kernel directly. michael@0: // Thus, it can be used even when the heap may be corrupted. michael@0: // michael@0: // There is no free operation. The pages are only freed when the object is michael@0: // destroyed. michael@0: class PageAllocator { michael@0: public: michael@0: PageAllocator() michael@0: : page_size_(getpagesize()), michael@0: last_(NULL), michael@0: current_page_(NULL), michael@0: page_offset_(0) { michael@0: } michael@0: michael@0: ~PageAllocator() { michael@0: FreeAll(); michael@0: } michael@0: michael@0: void *Alloc(unsigned bytes) { michael@0: if (!bytes) michael@0: return NULL; michael@0: michael@0: if (current_page_ && page_size_ - page_offset_ >= bytes) { michael@0: uint8_t *const ret = current_page_ + page_offset_; michael@0: page_offset_ += bytes; michael@0: if (page_offset_ == page_size_) { michael@0: page_offset_ = 0; michael@0: current_page_ = NULL; michael@0: } michael@0: michael@0: return ret; michael@0: } michael@0: michael@0: const unsigned pages = michael@0: (bytes + sizeof(PageHeader) + page_size_ - 1) / page_size_; michael@0: uint8_t *const ret = GetNPages(pages); michael@0: if (!ret) michael@0: return NULL; michael@0: michael@0: page_offset_ = (page_size_ - (page_size_ * pages - (bytes + sizeof(PageHeader)))) % page_size_; michael@0: current_page_ = page_offset_ ? ret + page_size_ * (pages - 1) : NULL; michael@0: michael@0: return ret + sizeof(PageHeader); michael@0: } michael@0: michael@0: private: michael@0: uint8_t *GetNPages(unsigned num_pages) { michael@0: #ifdef __x86_64 michael@0: void *a = sys_mmap(NULL, page_size_ * num_pages, PROT_READ | PROT_WRITE, michael@0: MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); michael@0: #else michael@0: void *a = sys_mmap2(NULL, page_size_ * num_pages, PROT_READ | PROT_WRITE, michael@0: MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); michael@0: #endif michael@0: if (a == MAP_FAILED) michael@0: return NULL; michael@0: michael@0: struct PageHeader *header = reinterpret_cast(a); michael@0: header->next = last_; michael@0: header->num_pages = num_pages; michael@0: last_ = header; michael@0: michael@0: return reinterpret_cast(a); michael@0: } michael@0: michael@0: void FreeAll() { michael@0: PageHeader *next; michael@0: michael@0: for (PageHeader *cur = last_; cur; cur = next) { michael@0: next = cur->next; michael@0: sys_munmap(cur, cur->num_pages * page_size_); michael@0: } michael@0: } michael@0: michael@0: struct PageHeader { michael@0: PageHeader *next; // pointer to the start of the next set of pages. michael@0: unsigned num_pages; // the number of pages in this set. michael@0: }; michael@0: michael@0: const unsigned page_size_; michael@0: PageHeader *last_; michael@0: uint8_t *current_page_; michael@0: unsigned page_offset_; michael@0: }; michael@0: michael@0: // A wasteful vector is like a normal std::vector, except that it's very much michael@0: // simplier and it allocates memory from a PageAllocator. It's wasteful michael@0: // because, when resizing, it always allocates a whole new array since the michael@0: // PageAllocator doesn't support realloc. michael@0: template michael@0: class wasteful_vector { michael@0: public: michael@0: wasteful_vector(PageAllocator *allocator, unsigned size_hint = 16) michael@0: : allocator_(allocator), michael@0: a_((T*) allocator->Alloc(sizeof(T) * size_hint)), michael@0: allocated_(size_hint), michael@0: used_(0) { michael@0: } michael@0: michael@0: T& back() { michael@0: return a_[used_ - 1]; michael@0: } michael@0: michael@0: const T& back() const { michael@0: return a_[used_ - 1]; michael@0: } michael@0: michael@0: bool empty() const { michael@0: return used_ == 0; michael@0: } michael@0: michael@0: void push_back(const T& new_element) { michael@0: if (used_ == allocated_) michael@0: Realloc(allocated_ * 2); michael@0: a_[used_++] = new_element; michael@0: } michael@0: michael@0: size_t size() const { michael@0: return used_; michael@0: } michael@0: michael@0: void resize(unsigned sz, T c = T()) { michael@0: // No need to test "sz >= 0", as "sz" is unsigned. michael@0: if (sz <= used_) { michael@0: used_ = sz; michael@0: } else { michael@0: unsigned a = allocated_; michael@0: if (sz > a) { michael@0: while (sz > a) { michael@0: a *= 2; michael@0: } michael@0: Realloc(a); michael@0: } michael@0: while (sz > used_) { michael@0: a_[used_++] = c; michael@0: } michael@0: } michael@0: } michael@0: michael@0: T& operator[](size_t index) { michael@0: return a_[index]; michael@0: } michael@0: michael@0: const T& operator[](size_t index) const { michael@0: return a_[index]; michael@0: } michael@0: michael@0: private: michael@0: void Realloc(unsigned new_size) { michael@0: T *new_array = michael@0: reinterpret_cast(allocator_->Alloc(sizeof(T) * new_size)); michael@0: memcpy(new_array, a_, used_ * sizeof(T)); michael@0: a_ = new_array; michael@0: allocated_ = new_size; michael@0: } michael@0: michael@0: PageAllocator *const allocator_; michael@0: T *a_; // pointer to an array of |allocated_| elements. michael@0: unsigned allocated_; // size of |a_|, in elements. michael@0: unsigned used_; // number of used slots in |a_|. michael@0: }; michael@0: michael@0: } // namespace google_breakpad michael@0: michael@0: inline void* operator new(size_t nbytes, michael@0: google_breakpad::PageAllocator& allocator) { michael@0: return allocator.Alloc(nbytes); michael@0: } michael@0: michael@0: #endif // GOOGLE_BREAKPAD_COMMON_MEMORY_H_