michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this file, michael@0: * You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include "Mappable.h" michael@0: #ifdef ANDROID michael@0: #include michael@0: #endif michael@0: #include michael@0: #include michael@0: #include "ElfLoader.h" michael@0: #include "SeekableZStream.h" michael@0: #include "Logging.h" michael@0: michael@0: Mappable * michael@0: MappableFile::Create(const char *path) michael@0: { michael@0: int fd = open(path, O_RDONLY); michael@0: if (fd != -1) michael@0: return new MappableFile(fd); michael@0: return nullptr; michael@0: } michael@0: michael@0: MemoryRange michael@0: MappableFile::mmap(const void *addr, size_t length, int prot, int flags, michael@0: off_t offset) michael@0: { michael@0: MOZ_ASSERT(fd != -1); michael@0: MOZ_ASSERT(!(flags & MAP_SHARED)); michael@0: flags |= MAP_PRIVATE; michael@0: michael@0: return MemoryRange::mmap(const_cast(addr), length, prot, flags, michael@0: fd, offset); michael@0: } michael@0: michael@0: void michael@0: MappableFile::finalize() michael@0: { michael@0: /* Close file ; equivalent to close(fd.forget()) */ michael@0: fd = -1; michael@0: } michael@0: michael@0: size_t michael@0: MappableFile::GetLength() const michael@0: { michael@0: struct stat st; michael@0: return fstat(fd, &st) ? 0 : st.st_size; michael@0: } michael@0: michael@0: Mappable * michael@0: MappableExtractFile::Create(const char *name, Zip *zip, Zip::Stream *stream) michael@0: { michael@0: const char *cachePath = getenv("MOZ_LINKER_CACHE"); michael@0: if (!cachePath || !*cachePath) { michael@0: LOG("Warning: MOZ_LINKER_EXTRACT is set, but not MOZ_LINKER_CACHE; " michael@0: "not extracting"); michael@0: return nullptr; michael@0: } michael@0: mozilla::ScopedDeleteArray path; michael@0: path = new char[strlen(cachePath) + strlen(name) + 2]; michael@0: sprintf(path, "%s/%s", cachePath, name); michael@0: struct stat cacheStat; michael@0: if (stat(path, &cacheStat) == 0) { michael@0: struct stat zipStat; michael@0: stat(zip->GetName(), &zipStat); michael@0: if (cacheStat.st_mtime > zipStat.st_mtime) { michael@0: DEBUG_LOG("Reusing %s", static_cast(path)); michael@0: return MappableFile::Create(path); michael@0: } michael@0: } michael@0: DEBUG_LOG("Extracting to %s", static_cast(path)); michael@0: AutoCloseFD fd; michael@0: fd = open(path, O_TRUNC | O_RDWR | O_CREAT | O_NOATIME, michael@0: S_IRUSR | S_IWUSR); michael@0: if (fd == -1) { michael@0: LOG("Couldn't open %s to decompress library", path.get()); michael@0: return nullptr; michael@0: } michael@0: AutoUnlinkFile file; michael@0: file = path.forget(); michael@0: if (stream->GetType() == Zip::Stream::DEFLATE) { michael@0: if (ftruncate(fd, stream->GetUncompressedSize()) == -1) { michael@0: LOG("Couldn't ftruncate %s to decompress library", file.get()); michael@0: return nullptr; michael@0: } michael@0: /* Map the temporary file for use as inflate buffer */ michael@0: MappedPtr buffer(MemoryRange::mmap(nullptr, stream->GetUncompressedSize(), michael@0: PROT_WRITE, MAP_SHARED, fd, 0)); michael@0: if (buffer == MAP_FAILED) { michael@0: LOG("Couldn't map %s to decompress library", file.get()); michael@0: return nullptr; michael@0: } michael@0: michael@0: z_stream zStream = stream->GetZStream(buffer); michael@0: michael@0: /* Decompress */ michael@0: if (inflateInit2(&zStream, -MAX_WBITS) != Z_OK) { michael@0: LOG("inflateInit failed: %s", zStream.msg); michael@0: return nullptr; michael@0: } michael@0: if (inflate(&zStream, Z_FINISH) != Z_STREAM_END) { michael@0: LOG("inflate failed: %s", zStream.msg); michael@0: return nullptr; michael@0: } michael@0: if (inflateEnd(&zStream) != Z_OK) { michael@0: LOG("inflateEnd failed: %s", zStream.msg); michael@0: return nullptr; michael@0: } michael@0: if (zStream.total_out != stream->GetUncompressedSize()) { michael@0: LOG("File not fully uncompressed! %ld / %d", zStream.total_out, michael@0: static_cast(stream->GetUncompressedSize())); michael@0: return nullptr; michael@0: } michael@0: } else if (stream->GetType() == Zip::Stream::STORE) { michael@0: SeekableZStream zStream; michael@0: if (!zStream.Init(stream->GetBuffer(), stream->GetSize())) { michael@0: LOG("Couldn't initialize SeekableZStream for %s", name); michael@0: return nullptr; michael@0: } michael@0: if (ftruncate(fd, zStream.GetUncompressedSize()) == -1) { michael@0: LOG("Couldn't ftruncate %s to decompress library", file.get()); michael@0: return nullptr; michael@0: } michael@0: MappedPtr buffer(MemoryRange::mmap(nullptr, zStream.GetUncompressedSize(), michael@0: PROT_WRITE, MAP_SHARED, fd, 0)); michael@0: if (buffer == MAP_FAILED) { michael@0: LOG("Couldn't map %s to decompress library", file.get()); michael@0: return nullptr; michael@0: } michael@0: michael@0: if (!zStream.Decompress(buffer, 0, zStream.GetUncompressedSize())) { michael@0: LOG("%s: failed to decompress", name); michael@0: return nullptr; michael@0: } michael@0: } else { michael@0: return nullptr; michael@0: } michael@0: michael@0: return new MappableExtractFile(fd.forget(), file.forget()); michael@0: } michael@0: michael@0: MappableExtractFile::~MappableExtractFile() michael@0: { michael@0: /* When destroying from a forked process, we don't want the file to be michael@0: * removed, as the main process is still using the file. Although it michael@0: * doesn't really matter, it helps e.g. valgrind that the file is there. michael@0: * The string still needs to be delete[]d, though */ michael@0: if (pid != getpid()) michael@0: delete [] path.forget(); michael@0: } michael@0: michael@0: /** michael@0: * _MappableBuffer is a buffer which content can be mapped at different michael@0: * locations in the virtual address space. michael@0: * On Linux, uses a (deleted) temporary file on a tmpfs for sharable content. michael@0: * On Android, uses ashmem. michael@0: */ michael@0: class _MappableBuffer: public MappedPtr michael@0: { michael@0: public: michael@0: /** michael@0: * Returns a _MappableBuffer instance with the given name and the given michael@0: * length. michael@0: */ michael@0: static _MappableBuffer *Create(const char *name, size_t length) michael@0: { michael@0: AutoCloseFD fd; michael@0: #ifdef ANDROID michael@0: /* On Android, initialize an ashmem region with the given length */ michael@0: fd = open("/" ASHMEM_NAME_DEF, O_RDWR, 0600); michael@0: if (fd == -1) michael@0: return nullptr; michael@0: char str[ASHMEM_NAME_LEN]; michael@0: strlcpy(str, name, sizeof(str)); michael@0: ioctl(fd, ASHMEM_SET_NAME, str); michael@0: if (ioctl(fd, ASHMEM_SET_SIZE, length)) michael@0: return nullptr; michael@0: michael@0: /* The Gecko crash reporter is confused by adjacent memory mappings of michael@0: * the same file and chances are we're going to map from the same file michael@0: * descriptor right away. To avoid problems with the crash reporter, michael@0: * create an empty anonymous page before or after the ashmem mapping, michael@0: * depending on how mappings grow in the address space. michael@0: */ michael@0: #if defined(__arm__) michael@0: void *buf = ::mmap(nullptr, length + PAGE_SIZE, PROT_READ | PROT_WRITE, michael@0: MAP_SHARED, fd, 0); michael@0: if (buf != MAP_FAILED) { michael@0: ::mmap(AlignedEndPtr(reinterpret_cast(buf) + length, PAGE_SIZE), michael@0: PAGE_SIZE, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); michael@0: DEBUG_LOG("Decompression buffer of size 0x%x in ashmem \"%s\", mapped @%p", michael@0: length, str, buf); michael@0: return new _MappableBuffer(fd.forget(), buf, length); michael@0: } michael@0: #elif defined(__i386__) michael@0: size_t anon_mapping_length = length + PAGE_SIZE; michael@0: void *buf = ::mmap(nullptr, anon_mapping_length, PROT_NONE, michael@0: MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); michael@0: if (buf != MAP_FAILED) { michael@0: char *first_page = reinterpret_cast(buf); michael@0: char *map_page = first_page + PAGE_SIZE; michael@0: michael@0: void *actual_buf = ::mmap(map_page, length, PROT_READ | PROT_WRITE, michael@0: MAP_FIXED | MAP_SHARED, fd, 0); michael@0: if (actual_buf == MAP_FAILED) { michael@0: ::munmap(buf, anon_mapping_length); michael@0: DEBUG_LOG("Fixed allocation of decompression buffer at %p failed", map_page); michael@0: return nullptr; michael@0: } michael@0: michael@0: DEBUG_LOG("Decompression buffer of size 0x%x in ashmem \"%s\", mapped @%p", michael@0: length, str, actual_buf); michael@0: return new _MappableBuffer(fd.forget(), actual_buf, length); michael@0: } michael@0: #else michael@0: #error need to add a case for your CPU michael@0: #endif michael@0: #else michael@0: /* On Linux, use /dev/shm as base directory for temporary files, assuming michael@0: * it's on tmpfs */ michael@0: /* TODO: check that /dev/shm is tmpfs */ michael@0: char path[256]; michael@0: sprintf(path, "/dev/shm/%s.XXXXXX", name); michael@0: fd = mkstemp(path); michael@0: if (fd == -1) michael@0: return nullptr; michael@0: unlink(path); michael@0: ftruncate(fd, length); michael@0: michael@0: void *buf = ::mmap(nullptr, length, PROT_READ | PROT_WRITE, michael@0: MAP_SHARED, fd, 0); michael@0: if (buf != MAP_FAILED) { michael@0: DEBUG_LOG("Decompression buffer of size %ld in \"%s\", mapped @%p", michael@0: length, path, buf); michael@0: return new _MappableBuffer(fd.forget(), buf, length); michael@0: } michael@0: #endif michael@0: return nullptr; michael@0: } michael@0: michael@0: void *mmap(const void *addr, size_t length, int prot, int flags, off_t offset) michael@0: { michael@0: MOZ_ASSERT(fd != -1); michael@0: #ifdef ANDROID michael@0: /* Mapping ashmem MAP_PRIVATE is like mapping anonymous memory, even when michael@0: * there is content in the ashmem */ michael@0: if (flags & MAP_PRIVATE) { michael@0: flags &= ~MAP_PRIVATE; michael@0: flags |= MAP_SHARED; michael@0: } michael@0: #endif michael@0: return ::mmap(const_cast(addr), length, prot, flags, fd, offset); michael@0: } michael@0: michael@0: #ifdef ANDROID michael@0: ~_MappableBuffer() { michael@0: /* Free the additional page we allocated. See _MappableBuffer::Create */ michael@0: #if defined(__arm__) michael@0: ::munmap(AlignedEndPtr(*this + GetLength(), PAGE_SIZE), PAGE_SIZE); michael@0: #elif defined(__i386__) michael@0: ::munmap(*this - PAGE_SIZE, GetLength() + PAGE_SIZE); michael@0: #else michael@0: #error need to add a case for your CPU michael@0: #endif michael@0: } michael@0: #endif michael@0: michael@0: private: michael@0: _MappableBuffer(int fd, void *buf, size_t length) michael@0: : MappedPtr(buf, length), fd(fd) { } michael@0: michael@0: /* File descriptor for the temporary file or ashmem */ michael@0: AutoCloseFD fd; michael@0: }; michael@0: michael@0: michael@0: Mappable * michael@0: MappableDeflate::Create(const char *name, Zip *zip, Zip::Stream *stream) michael@0: { michael@0: MOZ_ASSERT(stream->GetType() == Zip::Stream::DEFLATE); michael@0: _MappableBuffer *buf = _MappableBuffer::Create(name, stream->GetUncompressedSize()); michael@0: if (buf) michael@0: return new MappableDeflate(buf, zip, stream); michael@0: return nullptr; michael@0: } michael@0: michael@0: MappableDeflate::MappableDeflate(_MappableBuffer *buf, Zip *zip, michael@0: Zip::Stream *stream) michael@0: : zip(zip), buffer(buf), zStream(stream->GetZStream(*buf)) { } michael@0: michael@0: MappableDeflate::~MappableDeflate() { } michael@0: michael@0: MemoryRange michael@0: MappableDeflate::mmap(const void *addr, size_t length, int prot, int flags, off_t offset) michael@0: { michael@0: MOZ_ASSERT(buffer); michael@0: MOZ_ASSERT(!(flags & MAP_SHARED)); michael@0: flags |= MAP_PRIVATE; michael@0: michael@0: /* The deflate stream is uncompressed up to the required offset + length, if michael@0: * it hasn't previously been uncompressed */ michael@0: ssize_t missing = offset + length + zStream.avail_out - buffer->GetLength(); michael@0: if (missing > 0) { michael@0: uInt avail_out = zStream.avail_out; michael@0: zStream.avail_out = missing; michael@0: if ((*buffer == zStream.next_out) && michael@0: (inflateInit2(&zStream, -MAX_WBITS) != Z_OK)) { michael@0: LOG("inflateInit failed: %s", zStream.msg); michael@0: return MemoryRange(MAP_FAILED, 0); michael@0: } michael@0: int ret = inflate(&zStream, Z_SYNC_FLUSH); michael@0: if (ret < 0) { michael@0: LOG("inflate failed: %s", zStream.msg); michael@0: return MemoryRange(MAP_FAILED, 0); michael@0: } michael@0: if (ret == Z_NEED_DICT) { michael@0: LOG("zstream requires a dictionary. %s", zStream.msg); michael@0: return MemoryRange(MAP_FAILED, 0); michael@0: } michael@0: zStream.avail_out = avail_out - missing + zStream.avail_out; michael@0: if (ret == Z_STREAM_END) { michael@0: if (inflateEnd(&zStream) != Z_OK) { michael@0: LOG("inflateEnd failed: %s", zStream.msg); michael@0: return MemoryRange(MAP_FAILED, 0); michael@0: } michael@0: if (zStream.total_out != buffer->GetLength()) { michael@0: LOG("File not fully uncompressed! %ld / %d", zStream.total_out, michael@0: static_cast(buffer->GetLength())); michael@0: return MemoryRange(MAP_FAILED, 0); michael@0: } michael@0: } michael@0: } michael@0: #if defined(ANDROID) && defined(__arm__) michael@0: if (prot & PROT_EXEC) { michael@0: /* We just extracted data that may be executed in the future. michael@0: * We thus need to ensure Instruction and Data cache coherency. */ michael@0: DEBUG_LOG("cacheflush(%p, %p)", *buffer + offset, *buffer + (offset + length)); michael@0: cacheflush(reinterpret_cast(*buffer + offset), michael@0: reinterpret_cast(*buffer + (offset + length)), 0); michael@0: } michael@0: #endif michael@0: michael@0: return MemoryRange(buffer->mmap(addr, length, prot, flags, offset), length); michael@0: } michael@0: michael@0: void michael@0: MappableDeflate::finalize() michael@0: { michael@0: /* Free zlib internal buffers */ michael@0: inflateEnd(&zStream); michael@0: /* Free decompression buffer */ michael@0: buffer = nullptr; michael@0: /* Remove reference to Zip archive */ michael@0: zip = nullptr; michael@0: } michael@0: michael@0: size_t michael@0: MappableDeflate::GetLength() const michael@0: { michael@0: return buffer->GetLength(); michael@0: } michael@0: michael@0: Mappable * michael@0: MappableSeekableZStream::Create(const char *name, Zip *zip, michael@0: Zip::Stream *stream) michael@0: { michael@0: MOZ_ASSERT(stream->GetType() == Zip::Stream::STORE); michael@0: mozilla::ScopedDeletePtr mappable; michael@0: mappable = new MappableSeekableZStream(zip); michael@0: michael@0: pthread_mutexattr_t recursiveAttr; michael@0: pthread_mutexattr_init(&recursiveAttr); michael@0: pthread_mutexattr_settype(&recursiveAttr, PTHREAD_MUTEX_RECURSIVE); michael@0: michael@0: if (pthread_mutex_init(&mappable->mutex, &recursiveAttr)) michael@0: return nullptr; michael@0: michael@0: if (!mappable->zStream.Init(stream->GetBuffer(), stream->GetSize())) michael@0: return nullptr; michael@0: michael@0: mappable->buffer = _MappableBuffer::Create(name, michael@0: mappable->zStream.GetUncompressedSize()); michael@0: if (!mappable->buffer) michael@0: return nullptr; michael@0: michael@0: mappable->chunkAvail = new unsigned char[mappable->zStream.GetChunksNum()]; michael@0: memset(mappable->chunkAvail, 0, mappable->zStream.GetChunksNum()); michael@0: michael@0: return mappable.forget(); michael@0: } michael@0: michael@0: MappableSeekableZStream::MappableSeekableZStream(Zip *zip) michael@0: : zip(zip), chunkAvailNum(0) { } michael@0: michael@0: MappableSeekableZStream::~MappableSeekableZStream() michael@0: { michael@0: pthread_mutex_destroy(&mutex); michael@0: } michael@0: michael@0: MemoryRange michael@0: MappableSeekableZStream::mmap(const void *addr, size_t length, int prot, michael@0: int flags, off_t offset) michael@0: { michael@0: /* Map with PROT_NONE so that accessing the mapping would segfault, and michael@0: * bring us to ensure() */ michael@0: void *res = buffer->mmap(addr, length, PROT_NONE, flags, offset); michael@0: if (res == MAP_FAILED) michael@0: return MemoryRange(MAP_FAILED, 0); michael@0: michael@0: /* Store the mapping, ordered by offset and length */ michael@0: std::vector::reverse_iterator it; michael@0: for (it = lazyMaps.rbegin(); it < lazyMaps.rend(); ++it) { michael@0: if ((it->offset < offset) || michael@0: ((it->offset == offset) && (it->length < length))) michael@0: break; michael@0: } michael@0: LazyMap map = { res, length, prot, offset }; michael@0: lazyMaps.insert(it.base(), map); michael@0: return MemoryRange(res, length); michael@0: } michael@0: michael@0: void michael@0: MappableSeekableZStream::munmap(void *addr, size_t length) michael@0: { michael@0: std::vector::iterator it; michael@0: for (it = lazyMaps.begin(); it < lazyMaps.end(); ++it) michael@0: if ((it->addr = addr) && (it->length == length)) { michael@0: lazyMaps.erase(it); michael@0: ::munmap(addr, length); michael@0: return; michael@0: } michael@0: MOZ_CRASH("munmap called with unknown mapping"); michael@0: } michael@0: michael@0: void michael@0: MappableSeekableZStream::finalize() { } michael@0: michael@0: class AutoLock { michael@0: public: michael@0: AutoLock(pthread_mutex_t *mutex): mutex(mutex) michael@0: { michael@0: if (pthread_mutex_lock(mutex)) michael@0: MOZ_CRASH("pthread_mutex_lock failed"); michael@0: } michael@0: ~AutoLock() michael@0: { michael@0: if (pthread_mutex_unlock(mutex)) michael@0: MOZ_CRASH("pthread_mutex_unlock failed"); michael@0: } michael@0: private: michael@0: pthread_mutex_t *mutex; michael@0: }; michael@0: michael@0: bool michael@0: MappableSeekableZStream::ensure(const void *addr) michael@0: { michael@0: DEBUG_LOG("ensure @%p", addr); michael@0: const void *addrPage = PageAlignedPtr(addr); michael@0: /* Find the mapping corresponding to the given page */ michael@0: std::vector::iterator map; michael@0: for (map = lazyMaps.begin(); map < lazyMaps.end(); ++map) { michael@0: if (map->Contains(addrPage)) michael@0: break; michael@0: } michael@0: if (map == lazyMaps.end()) michael@0: return false; michael@0: michael@0: /* Find corresponding chunk */ michael@0: off_t mapOffset = map->offsetOf(addrPage); michael@0: off_t chunk = mapOffset / zStream.GetChunkSize(); michael@0: michael@0: /* In the typical case, we just need to decompress the chunk entirely. But michael@0: * when the current mapping ends in the middle of the chunk, we want to michael@0: * stop at the end of the corresponding page. michael@0: * However, if another mapping needs the last part of the chunk, we still michael@0: * need to continue. As mappings are ordered by offset and length, we don't michael@0: * need to scan the entire list of mappings. michael@0: * It is safe to run through lazyMaps here because the linker is never michael@0: * going to call mmap (which adds lazyMaps) while this function is michael@0: * called. */ michael@0: size_t length = zStream.GetChunkSize(chunk); michael@0: off_t chunkStart = chunk * zStream.GetChunkSize(); michael@0: off_t chunkEnd = chunkStart + length; michael@0: std::vector::iterator it; michael@0: for (it = map; it < lazyMaps.end(); ++it) { michael@0: if (chunkEnd <= it->endOffset()) michael@0: break; michael@0: } michael@0: if ((it == lazyMaps.end()) || (chunkEnd > it->endOffset())) { michael@0: /* The mapping "it" points at now is past the interesting one */ michael@0: --it; michael@0: length = it->endOffset() - chunkStart; michael@0: } michael@0: michael@0: length = PageAlignedSize(length); michael@0: michael@0: /* The following lock can be re-acquired by the thread holding it. michael@0: * If this happens, it means the following code is interrupted somehow by michael@0: * some signal, and ends up retriggering a chunk decompression for the michael@0: * same MappableSeekableZStream. michael@0: * If the chunk to decompress is different the second time, then everything michael@0: * is safe as the only common data touched below is chunkAvailNum, and it is michael@0: * atomically updated (leaving out any chance of an interruption while it is michael@0: * updated affecting the result). If the chunk to decompress is the same, the michael@0: * worst thing that can happen is chunkAvailNum being incremented one too michael@0: * many times, which doesn't affect functionality. The chances of it michael@0: * happening being pretty slim, and the effect being harmless, we can just michael@0: * ignore the issue. Other than that, we'd just be wasting time decompressing michael@0: * the same chunk twice. */ michael@0: AutoLock lock(&mutex); michael@0: michael@0: /* The very first page is mapped and accessed separately of the rest, and michael@0: * as such, only the first page of the first chunk is decompressed this way. michael@0: * When we fault in the remaining pages of that chunk, we want to decompress michael@0: * the complete chunk again. Short of doing that, we would end up with michael@0: * no data between PageSize() and chunkSize, which would effectively corrupt michael@0: * symbol resolution in the underlying library. */ michael@0: if (chunkAvail[chunk] < PageNumber(length)) { michael@0: if (!zStream.DecompressChunk(*buffer + chunkStart, chunk, length)) michael@0: return false; michael@0: michael@0: #if defined(ANDROID) && defined(__arm__) michael@0: if (map->prot & PROT_EXEC) { michael@0: /* We just extracted data that may be executed in the future. michael@0: * We thus need to ensure Instruction and Data cache coherency. */ michael@0: DEBUG_LOG("cacheflush(%p, %p)", *buffer + chunkStart, *buffer + (chunkStart + length)); michael@0: cacheflush(reinterpret_cast(*buffer + chunkStart), michael@0: reinterpret_cast(*buffer + (chunkStart + length)), 0); michael@0: } michael@0: #endif michael@0: /* Only count if we haven't already decompressed parts of the chunk */ michael@0: if (chunkAvail[chunk] == 0) michael@0: chunkAvailNum++; michael@0: michael@0: chunkAvail[chunk] = PageNumber(length); michael@0: } michael@0: michael@0: /* Flip the chunk mapping protection to the recorded flags. We could michael@0: * also flip the protection for other mappings of the same chunk, michael@0: * but it's easier to skip that and let further segfaults call michael@0: * ensure again. */ michael@0: const void *chunkAddr = reinterpret_cast michael@0: (reinterpret_cast(addrPage) michael@0: - mapOffset % zStream.GetChunkSize()); michael@0: const void *chunkEndAddr = reinterpret_cast michael@0: (reinterpret_cast(chunkAddr) + length); michael@0: michael@0: const void *start = std::max(map->addr, chunkAddr); michael@0: const void *end = std::min(map->end(), chunkEndAddr); michael@0: length = reinterpret_cast(end) michael@0: - reinterpret_cast(start); michael@0: michael@0: if (mprotect(const_cast(start), length, map->prot) == 0) { michael@0: DEBUG_LOG("mprotect @%p, 0x%" PRIxSize ", 0x%x", start, length, map->prot); michael@0: return true; michael@0: } michael@0: michael@0: LOG("mprotect @%p, 0x%" PRIxSize ", 0x%x failed with errno %d", michael@0: start, length, map->prot, errno); michael@0: LOG("mprotect failed"); michael@0: return false; michael@0: } michael@0: michael@0: void michael@0: MappableSeekableZStream::stats(const char *when, const char *name) const michael@0: { michael@0: size_t nEntries = zStream.GetChunksNum(); michael@0: DEBUG_LOG("%s: %s; %" PRIdSize "/%" PRIdSize " chunks decompressed", michael@0: name, when, static_cast(chunkAvailNum), nEntries); michael@0: michael@0: size_t len = 64; michael@0: mozilla::ScopedDeleteArray map; michael@0: map = new char[len + 3]; michael@0: map[0] = '['; michael@0: michael@0: for (size_t i = 0, j = 1; i < nEntries; i++, j++) { michael@0: map[j] = chunkAvail[i] ? '*' : '_'; michael@0: if ((j == len) || (i == nEntries - 1)) { michael@0: map[j + 1] = ']'; michael@0: map[j + 2] = '\0'; michael@0: DEBUG_LOG("%s", static_cast(map)); michael@0: j = 0; michael@0: } michael@0: } michael@0: } michael@0: michael@0: size_t michael@0: MappableSeekableZStream::GetLength() const michael@0: { michael@0: return buffer->GetLength(); michael@0: }