mozglue/linker/Mappable.cpp

Thu, 22 Jan 2015 13:21:57 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 22 Jan 2015 13:21:57 +0100
branch
TOR_BUG_9701
changeset 15
b8a032363ba2
permissions
-rw-r--r--

Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6

michael@0 1 /* This Source Code Form is subject to the terms of the Mozilla Public
michael@0 2 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
michael@0 3 * You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 4
michael@0 5 #include <fcntl.h>
michael@0 6 #include <unistd.h>
michael@0 7 #include <sys/mman.h>
michael@0 8 #include <sys/stat.h>
michael@0 9 #include <cstring>
michael@0 10 #include <cstdlib>
michael@0 11 #include <cstdio>
michael@0 12 #include "Mappable.h"
michael@0 13 #ifdef ANDROID
michael@0 14 #include <linux/ashmem.h>
michael@0 15 #endif
michael@0 16 #include <sys/stat.h>
michael@0 17 #include <errno.h>
michael@0 18 #include "ElfLoader.h"
michael@0 19 #include "SeekableZStream.h"
michael@0 20 #include "Logging.h"
michael@0 21
michael@0 22 Mappable *
michael@0 23 MappableFile::Create(const char *path)
michael@0 24 {
michael@0 25 int fd = open(path, O_RDONLY);
michael@0 26 if (fd != -1)
michael@0 27 return new MappableFile(fd);
michael@0 28 return nullptr;
michael@0 29 }
michael@0 30
michael@0 31 MemoryRange
michael@0 32 MappableFile::mmap(const void *addr, size_t length, int prot, int flags,
michael@0 33 off_t offset)
michael@0 34 {
michael@0 35 MOZ_ASSERT(fd != -1);
michael@0 36 MOZ_ASSERT(!(flags & MAP_SHARED));
michael@0 37 flags |= MAP_PRIVATE;
michael@0 38
michael@0 39 return MemoryRange::mmap(const_cast<void *>(addr), length, prot, flags,
michael@0 40 fd, offset);
michael@0 41 }
michael@0 42
michael@0 43 void
michael@0 44 MappableFile::finalize()
michael@0 45 {
michael@0 46 /* Close file ; equivalent to close(fd.forget()) */
michael@0 47 fd = -1;
michael@0 48 }
michael@0 49
michael@0 50 size_t
michael@0 51 MappableFile::GetLength() const
michael@0 52 {
michael@0 53 struct stat st;
michael@0 54 return fstat(fd, &st) ? 0 : st.st_size;
michael@0 55 }
michael@0 56
michael@0 57 Mappable *
michael@0 58 MappableExtractFile::Create(const char *name, Zip *zip, Zip::Stream *stream)
michael@0 59 {
michael@0 60 const char *cachePath = getenv("MOZ_LINKER_CACHE");
michael@0 61 if (!cachePath || !*cachePath) {
michael@0 62 LOG("Warning: MOZ_LINKER_EXTRACT is set, but not MOZ_LINKER_CACHE; "
michael@0 63 "not extracting");
michael@0 64 return nullptr;
michael@0 65 }
michael@0 66 mozilla::ScopedDeleteArray<char> path;
michael@0 67 path = new char[strlen(cachePath) + strlen(name) + 2];
michael@0 68 sprintf(path, "%s/%s", cachePath, name);
michael@0 69 struct stat cacheStat;
michael@0 70 if (stat(path, &cacheStat) == 0) {
michael@0 71 struct stat zipStat;
michael@0 72 stat(zip->GetName(), &zipStat);
michael@0 73 if (cacheStat.st_mtime > zipStat.st_mtime) {
michael@0 74 DEBUG_LOG("Reusing %s", static_cast<char *>(path));
michael@0 75 return MappableFile::Create(path);
michael@0 76 }
michael@0 77 }
michael@0 78 DEBUG_LOG("Extracting to %s", static_cast<char *>(path));
michael@0 79 AutoCloseFD fd;
michael@0 80 fd = open(path, O_TRUNC | O_RDWR | O_CREAT | O_NOATIME,
michael@0 81 S_IRUSR | S_IWUSR);
michael@0 82 if (fd == -1) {
michael@0 83 LOG("Couldn't open %s to decompress library", path.get());
michael@0 84 return nullptr;
michael@0 85 }
michael@0 86 AutoUnlinkFile file;
michael@0 87 file = path.forget();
michael@0 88 if (stream->GetType() == Zip::Stream::DEFLATE) {
michael@0 89 if (ftruncate(fd, stream->GetUncompressedSize()) == -1) {
michael@0 90 LOG("Couldn't ftruncate %s to decompress library", file.get());
michael@0 91 return nullptr;
michael@0 92 }
michael@0 93 /* Map the temporary file for use as inflate buffer */
michael@0 94 MappedPtr buffer(MemoryRange::mmap(nullptr, stream->GetUncompressedSize(),
michael@0 95 PROT_WRITE, MAP_SHARED, fd, 0));
michael@0 96 if (buffer == MAP_FAILED) {
michael@0 97 LOG("Couldn't map %s to decompress library", file.get());
michael@0 98 return nullptr;
michael@0 99 }
michael@0 100
michael@0 101 z_stream zStream = stream->GetZStream(buffer);
michael@0 102
michael@0 103 /* Decompress */
michael@0 104 if (inflateInit2(&zStream, -MAX_WBITS) != Z_OK) {
michael@0 105 LOG("inflateInit failed: %s", zStream.msg);
michael@0 106 return nullptr;
michael@0 107 }
michael@0 108 if (inflate(&zStream, Z_FINISH) != Z_STREAM_END) {
michael@0 109 LOG("inflate failed: %s", zStream.msg);
michael@0 110 return nullptr;
michael@0 111 }
michael@0 112 if (inflateEnd(&zStream) != Z_OK) {
michael@0 113 LOG("inflateEnd failed: %s", zStream.msg);
michael@0 114 return nullptr;
michael@0 115 }
michael@0 116 if (zStream.total_out != stream->GetUncompressedSize()) {
michael@0 117 LOG("File not fully uncompressed! %ld / %d", zStream.total_out,
michael@0 118 static_cast<unsigned int>(stream->GetUncompressedSize()));
michael@0 119 return nullptr;
michael@0 120 }
michael@0 121 } else if (stream->GetType() == Zip::Stream::STORE) {
michael@0 122 SeekableZStream zStream;
michael@0 123 if (!zStream.Init(stream->GetBuffer(), stream->GetSize())) {
michael@0 124 LOG("Couldn't initialize SeekableZStream for %s", name);
michael@0 125 return nullptr;
michael@0 126 }
michael@0 127 if (ftruncate(fd, zStream.GetUncompressedSize()) == -1) {
michael@0 128 LOG("Couldn't ftruncate %s to decompress library", file.get());
michael@0 129 return nullptr;
michael@0 130 }
michael@0 131 MappedPtr buffer(MemoryRange::mmap(nullptr, zStream.GetUncompressedSize(),
michael@0 132 PROT_WRITE, MAP_SHARED, fd, 0));
michael@0 133 if (buffer == MAP_FAILED) {
michael@0 134 LOG("Couldn't map %s to decompress library", file.get());
michael@0 135 return nullptr;
michael@0 136 }
michael@0 137
michael@0 138 if (!zStream.Decompress(buffer, 0, zStream.GetUncompressedSize())) {
michael@0 139 LOG("%s: failed to decompress", name);
michael@0 140 return nullptr;
michael@0 141 }
michael@0 142 } else {
michael@0 143 return nullptr;
michael@0 144 }
michael@0 145
michael@0 146 return new MappableExtractFile(fd.forget(), file.forget());
michael@0 147 }
michael@0 148
michael@0 149 MappableExtractFile::~MappableExtractFile()
michael@0 150 {
michael@0 151 /* When destroying from a forked process, we don't want the file to be
michael@0 152 * removed, as the main process is still using the file. Although it
michael@0 153 * doesn't really matter, it helps e.g. valgrind that the file is there.
michael@0 154 * The string still needs to be delete[]d, though */
michael@0 155 if (pid != getpid())
michael@0 156 delete [] path.forget();
michael@0 157 }
michael@0 158
michael@0 159 /**
michael@0 160 * _MappableBuffer is a buffer which content can be mapped at different
michael@0 161 * locations in the virtual address space.
michael@0 162 * On Linux, uses a (deleted) temporary file on a tmpfs for sharable content.
michael@0 163 * On Android, uses ashmem.
michael@0 164 */
michael@0 165 class _MappableBuffer: public MappedPtr
michael@0 166 {
michael@0 167 public:
michael@0 168 /**
michael@0 169 * Returns a _MappableBuffer instance with the given name and the given
michael@0 170 * length.
michael@0 171 */
michael@0 172 static _MappableBuffer *Create(const char *name, size_t length)
michael@0 173 {
michael@0 174 AutoCloseFD fd;
michael@0 175 #ifdef ANDROID
michael@0 176 /* On Android, initialize an ashmem region with the given length */
michael@0 177 fd = open("/" ASHMEM_NAME_DEF, O_RDWR, 0600);
michael@0 178 if (fd == -1)
michael@0 179 return nullptr;
michael@0 180 char str[ASHMEM_NAME_LEN];
michael@0 181 strlcpy(str, name, sizeof(str));
michael@0 182 ioctl(fd, ASHMEM_SET_NAME, str);
michael@0 183 if (ioctl(fd, ASHMEM_SET_SIZE, length))
michael@0 184 return nullptr;
michael@0 185
michael@0 186 /* The Gecko crash reporter is confused by adjacent memory mappings of
michael@0 187 * the same file and chances are we're going to map from the same file
michael@0 188 * descriptor right away. To avoid problems with the crash reporter,
michael@0 189 * create an empty anonymous page before or after the ashmem mapping,
michael@0 190 * depending on how mappings grow in the address space.
michael@0 191 */
michael@0 192 #if defined(__arm__)
michael@0 193 void *buf = ::mmap(nullptr, length + PAGE_SIZE, PROT_READ | PROT_WRITE,
michael@0 194 MAP_SHARED, fd, 0);
michael@0 195 if (buf != MAP_FAILED) {
michael@0 196 ::mmap(AlignedEndPtr(reinterpret_cast<char *>(buf) + length, PAGE_SIZE),
michael@0 197 PAGE_SIZE, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
michael@0 198 DEBUG_LOG("Decompression buffer of size 0x%x in ashmem \"%s\", mapped @%p",
michael@0 199 length, str, buf);
michael@0 200 return new _MappableBuffer(fd.forget(), buf, length);
michael@0 201 }
michael@0 202 #elif defined(__i386__)
michael@0 203 size_t anon_mapping_length = length + PAGE_SIZE;
michael@0 204 void *buf = ::mmap(nullptr, anon_mapping_length, PROT_NONE,
michael@0 205 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
michael@0 206 if (buf != MAP_FAILED) {
michael@0 207 char *first_page = reinterpret_cast<char *>(buf);
michael@0 208 char *map_page = first_page + PAGE_SIZE;
michael@0 209
michael@0 210 void *actual_buf = ::mmap(map_page, length, PROT_READ | PROT_WRITE,
michael@0 211 MAP_FIXED | MAP_SHARED, fd, 0);
michael@0 212 if (actual_buf == MAP_FAILED) {
michael@0 213 ::munmap(buf, anon_mapping_length);
michael@0 214 DEBUG_LOG("Fixed allocation of decompression buffer at %p failed", map_page);
michael@0 215 return nullptr;
michael@0 216 }
michael@0 217
michael@0 218 DEBUG_LOG("Decompression buffer of size 0x%x in ashmem \"%s\", mapped @%p",
michael@0 219 length, str, actual_buf);
michael@0 220 return new _MappableBuffer(fd.forget(), actual_buf, length);
michael@0 221 }
michael@0 222 #else
michael@0 223 #error need to add a case for your CPU
michael@0 224 #endif
michael@0 225 #else
michael@0 226 /* On Linux, use /dev/shm as base directory for temporary files, assuming
michael@0 227 * it's on tmpfs */
michael@0 228 /* TODO: check that /dev/shm is tmpfs */
michael@0 229 char path[256];
michael@0 230 sprintf(path, "/dev/shm/%s.XXXXXX", name);
michael@0 231 fd = mkstemp(path);
michael@0 232 if (fd == -1)
michael@0 233 return nullptr;
michael@0 234 unlink(path);
michael@0 235 ftruncate(fd, length);
michael@0 236
michael@0 237 void *buf = ::mmap(nullptr, length, PROT_READ | PROT_WRITE,
michael@0 238 MAP_SHARED, fd, 0);
michael@0 239 if (buf != MAP_FAILED) {
michael@0 240 DEBUG_LOG("Decompression buffer of size %ld in \"%s\", mapped @%p",
michael@0 241 length, path, buf);
michael@0 242 return new _MappableBuffer(fd.forget(), buf, length);
michael@0 243 }
michael@0 244 #endif
michael@0 245 return nullptr;
michael@0 246 }
michael@0 247
michael@0 248 void *mmap(const void *addr, size_t length, int prot, int flags, off_t offset)
michael@0 249 {
michael@0 250 MOZ_ASSERT(fd != -1);
michael@0 251 #ifdef ANDROID
michael@0 252 /* Mapping ashmem MAP_PRIVATE is like mapping anonymous memory, even when
michael@0 253 * there is content in the ashmem */
michael@0 254 if (flags & MAP_PRIVATE) {
michael@0 255 flags &= ~MAP_PRIVATE;
michael@0 256 flags |= MAP_SHARED;
michael@0 257 }
michael@0 258 #endif
michael@0 259 return ::mmap(const_cast<void *>(addr), length, prot, flags, fd, offset);
michael@0 260 }
michael@0 261
michael@0 262 #ifdef ANDROID
michael@0 263 ~_MappableBuffer() {
michael@0 264 /* Free the additional page we allocated. See _MappableBuffer::Create */
michael@0 265 #if defined(__arm__)
michael@0 266 ::munmap(AlignedEndPtr(*this + GetLength(), PAGE_SIZE), PAGE_SIZE);
michael@0 267 #elif defined(__i386__)
michael@0 268 ::munmap(*this - PAGE_SIZE, GetLength() + PAGE_SIZE);
michael@0 269 #else
michael@0 270 #error need to add a case for your CPU
michael@0 271 #endif
michael@0 272 }
michael@0 273 #endif
michael@0 274
michael@0 275 private:
michael@0 276 _MappableBuffer(int fd, void *buf, size_t length)
michael@0 277 : MappedPtr(buf, length), fd(fd) { }
michael@0 278
michael@0 279 /* File descriptor for the temporary file or ashmem */
michael@0 280 AutoCloseFD fd;
michael@0 281 };
michael@0 282
michael@0 283
michael@0 284 Mappable *
michael@0 285 MappableDeflate::Create(const char *name, Zip *zip, Zip::Stream *stream)
michael@0 286 {
michael@0 287 MOZ_ASSERT(stream->GetType() == Zip::Stream::DEFLATE);
michael@0 288 _MappableBuffer *buf = _MappableBuffer::Create(name, stream->GetUncompressedSize());
michael@0 289 if (buf)
michael@0 290 return new MappableDeflate(buf, zip, stream);
michael@0 291 return nullptr;
michael@0 292 }
michael@0 293
michael@0 294 MappableDeflate::MappableDeflate(_MappableBuffer *buf, Zip *zip,
michael@0 295 Zip::Stream *stream)
michael@0 296 : zip(zip), buffer(buf), zStream(stream->GetZStream(*buf)) { }
michael@0 297
michael@0 298 MappableDeflate::~MappableDeflate() { }
michael@0 299
michael@0 300 MemoryRange
michael@0 301 MappableDeflate::mmap(const void *addr, size_t length, int prot, int flags, off_t offset)
michael@0 302 {
michael@0 303 MOZ_ASSERT(buffer);
michael@0 304 MOZ_ASSERT(!(flags & MAP_SHARED));
michael@0 305 flags |= MAP_PRIVATE;
michael@0 306
michael@0 307 /* The deflate stream is uncompressed up to the required offset + length, if
michael@0 308 * it hasn't previously been uncompressed */
michael@0 309 ssize_t missing = offset + length + zStream.avail_out - buffer->GetLength();
michael@0 310 if (missing > 0) {
michael@0 311 uInt avail_out = zStream.avail_out;
michael@0 312 zStream.avail_out = missing;
michael@0 313 if ((*buffer == zStream.next_out) &&
michael@0 314 (inflateInit2(&zStream, -MAX_WBITS) != Z_OK)) {
michael@0 315 LOG("inflateInit failed: %s", zStream.msg);
michael@0 316 return MemoryRange(MAP_FAILED, 0);
michael@0 317 }
michael@0 318 int ret = inflate(&zStream, Z_SYNC_FLUSH);
michael@0 319 if (ret < 0) {
michael@0 320 LOG("inflate failed: %s", zStream.msg);
michael@0 321 return MemoryRange(MAP_FAILED, 0);
michael@0 322 }
michael@0 323 if (ret == Z_NEED_DICT) {
michael@0 324 LOG("zstream requires a dictionary. %s", zStream.msg);
michael@0 325 return MemoryRange(MAP_FAILED, 0);
michael@0 326 }
michael@0 327 zStream.avail_out = avail_out - missing + zStream.avail_out;
michael@0 328 if (ret == Z_STREAM_END) {
michael@0 329 if (inflateEnd(&zStream) != Z_OK) {
michael@0 330 LOG("inflateEnd failed: %s", zStream.msg);
michael@0 331 return MemoryRange(MAP_FAILED, 0);
michael@0 332 }
michael@0 333 if (zStream.total_out != buffer->GetLength()) {
michael@0 334 LOG("File not fully uncompressed! %ld / %d", zStream.total_out,
michael@0 335 static_cast<unsigned int>(buffer->GetLength()));
michael@0 336 return MemoryRange(MAP_FAILED, 0);
michael@0 337 }
michael@0 338 }
michael@0 339 }
michael@0 340 #if defined(ANDROID) && defined(__arm__)
michael@0 341 if (prot & PROT_EXEC) {
michael@0 342 /* We just extracted data that may be executed in the future.
michael@0 343 * We thus need to ensure Instruction and Data cache coherency. */
michael@0 344 DEBUG_LOG("cacheflush(%p, %p)", *buffer + offset, *buffer + (offset + length));
michael@0 345 cacheflush(reinterpret_cast<uintptr_t>(*buffer + offset),
michael@0 346 reinterpret_cast<uintptr_t>(*buffer + (offset + length)), 0);
michael@0 347 }
michael@0 348 #endif
michael@0 349
michael@0 350 return MemoryRange(buffer->mmap(addr, length, prot, flags, offset), length);
michael@0 351 }
michael@0 352
michael@0 353 void
michael@0 354 MappableDeflate::finalize()
michael@0 355 {
michael@0 356 /* Free zlib internal buffers */
michael@0 357 inflateEnd(&zStream);
michael@0 358 /* Free decompression buffer */
michael@0 359 buffer = nullptr;
michael@0 360 /* Remove reference to Zip archive */
michael@0 361 zip = nullptr;
michael@0 362 }
michael@0 363
michael@0 364 size_t
michael@0 365 MappableDeflate::GetLength() const
michael@0 366 {
michael@0 367 return buffer->GetLength();
michael@0 368 }
michael@0 369
michael@0 370 Mappable *
michael@0 371 MappableSeekableZStream::Create(const char *name, Zip *zip,
michael@0 372 Zip::Stream *stream)
michael@0 373 {
michael@0 374 MOZ_ASSERT(stream->GetType() == Zip::Stream::STORE);
michael@0 375 mozilla::ScopedDeletePtr<MappableSeekableZStream> mappable;
michael@0 376 mappable = new MappableSeekableZStream(zip);
michael@0 377
michael@0 378 pthread_mutexattr_t recursiveAttr;
michael@0 379 pthread_mutexattr_init(&recursiveAttr);
michael@0 380 pthread_mutexattr_settype(&recursiveAttr, PTHREAD_MUTEX_RECURSIVE);
michael@0 381
michael@0 382 if (pthread_mutex_init(&mappable->mutex, &recursiveAttr))
michael@0 383 return nullptr;
michael@0 384
michael@0 385 if (!mappable->zStream.Init(stream->GetBuffer(), stream->GetSize()))
michael@0 386 return nullptr;
michael@0 387
michael@0 388 mappable->buffer = _MappableBuffer::Create(name,
michael@0 389 mappable->zStream.GetUncompressedSize());
michael@0 390 if (!mappable->buffer)
michael@0 391 return nullptr;
michael@0 392
michael@0 393 mappable->chunkAvail = new unsigned char[mappable->zStream.GetChunksNum()];
michael@0 394 memset(mappable->chunkAvail, 0, mappable->zStream.GetChunksNum());
michael@0 395
michael@0 396 return mappable.forget();
michael@0 397 }
michael@0 398
michael@0 399 MappableSeekableZStream::MappableSeekableZStream(Zip *zip)
michael@0 400 : zip(zip), chunkAvailNum(0) { }
michael@0 401
michael@0 402 MappableSeekableZStream::~MappableSeekableZStream()
michael@0 403 {
michael@0 404 pthread_mutex_destroy(&mutex);
michael@0 405 }
michael@0 406
michael@0 407 MemoryRange
michael@0 408 MappableSeekableZStream::mmap(const void *addr, size_t length, int prot,
michael@0 409 int flags, off_t offset)
michael@0 410 {
michael@0 411 /* Map with PROT_NONE so that accessing the mapping would segfault, and
michael@0 412 * bring us to ensure() */
michael@0 413 void *res = buffer->mmap(addr, length, PROT_NONE, flags, offset);
michael@0 414 if (res == MAP_FAILED)
michael@0 415 return MemoryRange(MAP_FAILED, 0);
michael@0 416
michael@0 417 /* Store the mapping, ordered by offset and length */
michael@0 418 std::vector<LazyMap>::reverse_iterator it;
michael@0 419 for (it = lazyMaps.rbegin(); it < lazyMaps.rend(); ++it) {
michael@0 420 if ((it->offset < offset) ||
michael@0 421 ((it->offset == offset) && (it->length < length)))
michael@0 422 break;
michael@0 423 }
michael@0 424 LazyMap map = { res, length, prot, offset };
michael@0 425 lazyMaps.insert(it.base(), map);
michael@0 426 return MemoryRange(res, length);
michael@0 427 }
michael@0 428
michael@0 429 void
michael@0 430 MappableSeekableZStream::munmap(void *addr, size_t length)
michael@0 431 {
michael@0 432 std::vector<LazyMap>::iterator it;
michael@0 433 for (it = lazyMaps.begin(); it < lazyMaps.end(); ++it)
michael@0 434 if ((it->addr = addr) && (it->length == length)) {
michael@0 435 lazyMaps.erase(it);
michael@0 436 ::munmap(addr, length);
michael@0 437 return;
michael@0 438 }
michael@0 439 MOZ_CRASH("munmap called with unknown mapping");
michael@0 440 }
michael@0 441
michael@0 442 void
michael@0 443 MappableSeekableZStream::finalize() { }
michael@0 444
michael@0 445 class AutoLock {
michael@0 446 public:
michael@0 447 AutoLock(pthread_mutex_t *mutex): mutex(mutex)
michael@0 448 {
michael@0 449 if (pthread_mutex_lock(mutex))
michael@0 450 MOZ_CRASH("pthread_mutex_lock failed");
michael@0 451 }
michael@0 452 ~AutoLock()
michael@0 453 {
michael@0 454 if (pthread_mutex_unlock(mutex))
michael@0 455 MOZ_CRASH("pthread_mutex_unlock failed");
michael@0 456 }
michael@0 457 private:
michael@0 458 pthread_mutex_t *mutex;
michael@0 459 };
michael@0 460
michael@0 461 bool
michael@0 462 MappableSeekableZStream::ensure(const void *addr)
michael@0 463 {
michael@0 464 DEBUG_LOG("ensure @%p", addr);
michael@0 465 const void *addrPage = PageAlignedPtr(addr);
michael@0 466 /* Find the mapping corresponding to the given page */
michael@0 467 std::vector<LazyMap>::iterator map;
michael@0 468 for (map = lazyMaps.begin(); map < lazyMaps.end(); ++map) {
michael@0 469 if (map->Contains(addrPage))
michael@0 470 break;
michael@0 471 }
michael@0 472 if (map == lazyMaps.end())
michael@0 473 return false;
michael@0 474
michael@0 475 /* Find corresponding chunk */
michael@0 476 off_t mapOffset = map->offsetOf(addrPage);
michael@0 477 off_t chunk = mapOffset / zStream.GetChunkSize();
michael@0 478
michael@0 479 /* In the typical case, we just need to decompress the chunk entirely. But
michael@0 480 * when the current mapping ends in the middle of the chunk, we want to
michael@0 481 * stop at the end of the corresponding page.
michael@0 482 * However, if another mapping needs the last part of the chunk, we still
michael@0 483 * need to continue. As mappings are ordered by offset and length, we don't
michael@0 484 * need to scan the entire list of mappings.
michael@0 485 * It is safe to run through lazyMaps here because the linker is never
michael@0 486 * going to call mmap (which adds lazyMaps) while this function is
michael@0 487 * called. */
michael@0 488 size_t length = zStream.GetChunkSize(chunk);
michael@0 489 off_t chunkStart = chunk * zStream.GetChunkSize();
michael@0 490 off_t chunkEnd = chunkStart + length;
michael@0 491 std::vector<LazyMap>::iterator it;
michael@0 492 for (it = map; it < lazyMaps.end(); ++it) {
michael@0 493 if (chunkEnd <= it->endOffset())
michael@0 494 break;
michael@0 495 }
michael@0 496 if ((it == lazyMaps.end()) || (chunkEnd > it->endOffset())) {
michael@0 497 /* The mapping "it" points at now is past the interesting one */
michael@0 498 --it;
michael@0 499 length = it->endOffset() - chunkStart;
michael@0 500 }
michael@0 501
michael@0 502 length = PageAlignedSize(length);
michael@0 503
michael@0 504 /* The following lock can be re-acquired by the thread holding it.
michael@0 505 * If this happens, it means the following code is interrupted somehow by
michael@0 506 * some signal, and ends up retriggering a chunk decompression for the
michael@0 507 * same MappableSeekableZStream.
michael@0 508 * If the chunk to decompress is different the second time, then everything
michael@0 509 * is safe as the only common data touched below is chunkAvailNum, and it is
michael@0 510 * atomically updated (leaving out any chance of an interruption while it is
michael@0 511 * updated affecting the result). If the chunk to decompress is the same, the
michael@0 512 * worst thing that can happen is chunkAvailNum being incremented one too
michael@0 513 * many times, which doesn't affect functionality. The chances of it
michael@0 514 * happening being pretty slim, and the effect being harmless, we can just
michael@0 515 * ignore the issue. Other than that, we'd just be wasting time decompressing
michael@0 516 * the same chunk twice. */
michael@0 517 AutoLock lock(&mutex);
michael@0 518
michael@0 519 /* The very first page is mapped and accessed separately of the rest, and
michael@0 520 * as such, only the first page of the first chunk is decompressed this way.
michael@0 521 * When we fault in the remaining pages of that chunk, we want to decompress
michael@0 522 * the complete chunk again. Short of doing that, we would end up with
michael@0 523 * no data between PageSize() and chunkSize, which would effectively corrupt
michael@0 524 * symbol resolution in the underlying library. */
michael@0 525 if (chunkAvail[chunk] < PageNumber(length)) {
michael@0 526 if (!zStream.DecompressChunk(*buffer + chunkStart, chunk, length))
michael@0 527 return false;
michael@0 528
michael@0 529 #if defined(ANDROID) && defined(__arm__)
michael@0 530 if (map->prot & PROT_EXEC) {
michael@0 531 /* We just extracted data that may be executed in the future.
michael@0 532 * We thus need to ensure Instruction and Data cache coherency. */
michael@0 533 DEBUG_LOG("cacheflush(%p, %p)", *buffer + chunkStart, *buffer + (chunkStart + length));
michael@0 534 cacheflush(reinterpret_cast<uintptr_t>(*buffer + chunkStart),
michael@0 535 reinterpret_cast<uintptr_t>(*buffer + (chunkStart + length)), 0);
michael@0 536 }
michael@0 537 #endif
michael@0 538 /* Only count if we haven't already decompressed parts of the chunk */
michael@0 539 if (chunkAvail[chunk] == 0)
michael@0 540 chunkAvailNum++;
michael@0 541
michael@0 542 chunkAvail[chunk] = PageNumber(length);
michael@0 543 }
michael@0 544
michael@0 545 /* Flip the chunk mapping protection to the recorded flags. We could
michael@0 546 * also flip the protection for other mappings of the same chunk,
michael@0 547 * but it's easier to skip that and let further segfaults call
michael@0 548 * ensure again. */
michael@0 549 const void *chunkAddr = reinterpret_cast<const void *>
michael@0 550 (reinterpret_cast<uintptr_t>(addrPage)
michael@0 551 - mapOffset % zStream.GetChunkSize());
michael@0 552 const void *chunkEndAddr = reinterpret_cast<const void *>
michael@0 553 (reinterpret_cast<uintptr_t>(chunkAddr) + length);
michael@0 554
michael@0 555 const void *start = std::max(map->addr, chunkAddr);
michael@0 556 const void *end = std::min(map->end(), chunkEndAddr);
michael@0 557 length = reinterpret_cast<uintptr_t>(end)
michael@0 558 - reinterpret_cast<uintptr_t>(start);
michael@0 559
michael@0 560 if (mprotect(const_cast<void *>(start), length, map->prot) == 0) {
michael@0 561 DEBUG_LOG("mprotect @%p, 0x%" PRIxSize ", 0x%x", start, length, map->prot);
michael@0 562 return true;
michael@0 563 }
michael@0 564
michael@0 565 LOG("mprotect @%p, 0x%" PRIxSize ", 0x%x failed with errno %d",
michael@0 566 start, length, map->prot, errno);
michael@0 567 LOG("mprotect failed");
michael@0 568 return false;
michael@0 569 }
michael@0 570
michael@0 571 void
michael@0 572 MappableSeekableZStream::stats(const char *when, const char *name) const
michael@0 573 {
michael@0 574 size_t nEntries = zStream.GetChunksNum();
michael@0 575 DEBUG_LOG("%s: %s; %" PRIdSize "/%" PRIdSize " chunks decompressed",
michael@0 576 name, when, static_cast<size_t>(chunkAvailNum), nEntries);
michael@0 577
michael@0 578 size_t len = 64;
michael@0 579 mozilla::ScopedDeleteArray<char> map;
michael@0 580 map = new char[len + 3];
michael@0 581 map[0] = '[';
michael@0 582
michael@0 583 for (size_t i = 0, j = 1; i < nEntries; i++, j++) {
michael@0 584 map[j] = chunkAvail[i] ? '*' : '_';
michael@0 585 if ((j == len) || (i == nEntries - 1)) {
michael@0 586 map[j + 1] = ']';
michael@0 587 map[j + 2] = '\0';
michael@0 588 DEBUG_LOG("%s", static_cast<char *>(map));
michael@0 589 j = 0;
michael@0 590 }
michael@0 591 }
michael@0 592 }
michael@0 593
michael@0 594 size_t
michael@0 595 MappableSeekableZStream::GetLength() const
michael@0 596 {
michael@0 597 return buffer->GetLength();
michael@0 598 }

mercurial