mozglue/linker/Mappable.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/mozglue/linker/Mappable.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,598 @@
     1.4 +/* This Source Code Form is subject to the terms of the Mozilla Public
     1.5 + * License, v. 2.0. If a copy of the MPL was not distributed with this file,
     1.6 + * You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.7 +
     1.8 +#include <fcntl.h>
     1.9 +#include <unistd.h>
    1.10 +#include <sys/mman.h>
    1.11 +#include <sys/stat.h>
    1.12 +#include <cstring>
    1.13 +#include <cstdlib>
    1.14 +#include <cstdio>
    1.15 +#include "Mappable.h"
    1.16 +#ifdef ANDROID
    1.17 +#include <linux/ashmem.h>
    1.18 +#endif
    1.19 +#include <sys/stat.h>
    1.20 +#include <errno.h>
    1.21 +#include "ElfLoader.h"
    1.22 +#include "SeekableZStream.h"
    1.23 +#include "Logging.h"
    1.24 +
    1.25 +Mappable *
    1.26 +MappableFile::Create(const char *path)
    1.27 +{
    1.28 +  int fd = open(path, O_RDONLY);
    1.29 +  if (fd != -1)
    1.30 +    return new MappableFile(fd);
    1.31 +  return nullptr;
    1.32 +}
    1.33 +
    1.34 +MemoryRange
    1.35 +MappableFile::mmap(const void *addr, size_t length, int prot, int flags,
    1.36 +                   off_t offset)
    1.37 +{
    1.38 +  MOZ_ASSERT(fd != -1);
    1.39 +  MOZ_ASSERT(!(flags & MAP_SHARED));
    1.40 +  flags |= MAP_PRIVATE;
    1.41 +
    1.42 +  return MemoryRange::mmap(const_cast<void *>(addr), length, prot, flags,
    1.43 +                           fd, offset);
    1.44 +}
    1.45 +
    1.46 +void
    1.47 +MappableFile::finalize()
    1.48 +{
    1.49 +  /* Close file ; equivalent to close(fd.forget()) */
    1.50 +  fd = -1;
    1.51 +}
    1.52 +
    1.53 +size_t
    1.54 +MappableFile::GetLength() const
    1.55 +{
    1.56 +  struct stat st;
    1.57 +  return fstat(fd, &st) ? 0 : st.st_size;
    1.58 +}
    1.59 +
    1.60 +Mappable *
    1.61 +MappableExtractFile::Create(const char *name, Zip *zip, Zip::Stream *stream)
    1.62 +{
    1.63 +  const char *cachePath = getenv("MOZ_LINKER_CACHE");
    1.64 +  if (!cachePath || !*cachePath) {
    1.65 +    LOG("Warning: MOZ_LINKER_EXTRACT is set, but not MOZ_LINKER_CACHE; "
    1.66 +        "not extracting");
    1.67 +    return nullptr;
    1.68 +  }
    1.69 +  mozilla::ScopedDeleteArray<char> path;
    1.70 +  path = new char[strlen(cachePath) + strlen(name) + 2];
    1.71 +  sprintf(path, "%s/%s", cachePath, name);
    1.72 +  struct stat cacheStat;
    1.73 +  if (stat(path, &cacheStat) == 0) {
    1.74 +    struct stat zipStat;
    1.75 +    stat(zip->GetName(), &zipStat);
    1.76 +    if (cacheStat.st_mtime > zipStat.st_mtime) {
    1.77 +      DEBUG_LOG("Reusing %s", static_cast<char *>(path));
    1.78 +      return MappableFile::Create(path);
    1.79 +    }
    1.80 +  }
    1.81 +  DEBUG_LOG("Extracting to %s", static_cast<char *>(path));
    1.82 +  AutoCloseFD fd;
    1.83 +  fd = open(path, O_TRUNC | O_RDWR | O_CREAT | O_NOATIME,
    1.84 +                  S_IRUSR | S_IWUSR);
    1.85 +  if (fd == -1) {
    1.86 +    LOG("Couldn't open %s to decompress library", path.get());
    1.87 +    return nullptr;
    1.88 +  }
    1.89 +  AutoUnlinkFile file;
    1.90 +  file = path.forget();
    1.91 +  if (stream->GetType() == Zip::Stream::DEFLATE) {
    1.92 +    if (ftruncate(fd, stream->GetUncompressedSize()) == -1) {
    1.93 +      LOG("Couldn't ftruncate %s to decompress library", file.get());
    1.94 +      return nullptr;
    1.95 +    }
    1.96 +    /* Map the temporary file for use as inflate buffer */
    1.97 +    MappedPtr buffer(MemoryRange::mmap(nullptr, stream->GetUncompressedSize(),
    1.98 +                                       PROT_WRITE, MAP_SHARED, fd, 0));
    1.99 +    if (buffer == MAP_FAILED) {
   1.100 +      LOG("Couldn't map %s to decompress library", file.get());
   1.101 +      return nullptr;
   1.102 +    }
   1.103 +
   1.104 +    z_stream zStream = stream->GetZStream(buffer);
   1.105 +
   1.106 +    /* Decompress */
   1.107 +    if (inflateInit2(&zStream, -MAX_WBITS) != Z_OK) {
   1.108 +      LOG("inflateInit failed: %s", zStream.msg);
   1.109 +      return nullptr;
   1.110 +    }
   1.111 +    if (inflate(&zStream, Z_FINISH) != Z_STREAM_END) {
   1.112 +      LOG("inflate failed: %s", zStream.msg);
   1.113 +      return nullptr;
   1.114 +    }
   1.115 +    if (inflateEnd(&zStream) != Z_OK) {
   1.116 +      LOG("inflateEnd failed: %s", zStream.msg);
   1.117 +      return nullptr;
   1.118 +    }
   1.119 +    if (zStream.total_out != stream->GetUncompressedSize()) {
   1.120 +      LOG("File not fully uncompressed! %ld / %d", zStream.total_out,
   1.121 +          static_cast<unsigned int>(stream->GetUncompressedSize()));
   1.122 +      return nullptr;
   1.123 +    }
   1.124 +  } else if (stream->GetType() == Zip::Stream::STORE) {
   1.125 +    SeekableZStream zStream;
   1.126 +    if (!zStream.Init(stream->GetBuffer(), stream->GetSize())) {
   1.127 +      LOG("Couldn't initialize SeekableZStream for %s", name);
   1.128 +      return nullptr;
   1.129 +    }
   1.130 +    if (ftruncate(fd, zStream.GetUncompressedSize()) == -1) {
   1.131 +      LOG("Couldn't ftruncate %s to decompress library", file.get());
   1.132 +      return nullptr;
   1.133 +    }
   1.134 +    MappedPtr buffer(MemoryRange::mmap(nullptr, zStream.GetUncompressedSize(),
   1.135 +                                       PROT_WRITE, MAP_SHARED, fd, 0));
   1.136 +    if (buffer == MAP_FAILED) {
   1.137 +      LOG("Couldn't map %s to decompress library", file.get());
   1.138 +      return nullptr;
   1.139 +    }
   1.140 +
   1.141 +    if (!zStream.Decompress(buffer, 0, zStream.GetUncompressedSize())) {
   1.142 +      LOG("%s: failed to decompress", name);
   1.143 +      return nullptr;
   1.144 +    }
   1.145 +  } else {
   1.146 +    return nullptr;
   1.147 +  }
   1.148 +
   1.149 +  return new MappableExtractFile(fd.forget(), file.forget());
   1.150 +}
   1.151 +
   1.152 +MappableExtractFile::~MappableExtractFile()
   1.153 +{
   1.154 +  /* When destroying from a forked process, we don't want the file to be
   1.155 +   * removed, as the main process is still using the file. Although it
   1.156 +   * doesn't really matter, it helps e.g. valgrind that the file is there.
   1.157 +   * The string still needs to be delete[]d, though */
   1.158 +  if (pid != getpid())
   1.159 +    delete [] path.forget();
   1.160 +}
   1.161 +
   1.162 +/**
   1.163 + * _MappableBuffer is a buffer which content can be mapped at different
   1.164 + * locations in the virtual address space.
   1.165 + * On Linux, uses a (deleted) temporary file on a tmpfs for sharable content.
   1.166 + * On Android, uses ashmem.
   1.167 + */
   1.168 +class _MappableBuffer: public MappedPtr
   1.169 +{
   1.170 +public:
   1.171 +  /**
   1.172 +   * Returns a _MappableBuffer instance with the given name and the given
   1.173 +   * length.
   1.174 +   */
   1.175 +  static _MappableBuffer *Create(const char *name, size_t length)
   1.176 +  {
   1.177 +    AutoCloseFD fd;
   1.178 +#ifdef ANDROID
   1.179 +    /* On Android, initialize an ashmem region with the given length */
   1.180 +    fd = open("/" ASHMEM_NAME_DEF, O_RDWR, 0600);
   1.181 +    if (fd == -1)
   1.182 +      return nullptr;
   1.183 +    char str[ASHMEM_NAME_LEN];
   1.184 +    strlcpy(str, name, sizeof(str));
   1.185 +    ioctl(fd, ASHMEM_SET_NAME, str);
   1.186 +    if (ioctl(fd, ASHMEM_SET_SIZE, length))
   1.187 +      return nullptr;
   1.188 +
   1.189 +    /* The Gecko crash reporter is confused by adjacent memory mappings of
   1.190 +     * the same file and chances are we're going to map from the same file
   1.191 +     * descriptor right away. To avoid problems with the crash reporter,
   1.192 +     * create an empty anonymous page before or after the ashmem mapping,
   1.193 +     * depending on how mappings grow in the address space.
   1.194 +     */
   1.195 +#if defined(__arm__)
   1.196 +    void *buf = ::mmap(nullptr, length + PAGE_SIZE, PROT_READ | PROT_WRITE,
   1.197 +                       MAP_SHARED, fd, 0);
   1.198 +    if (buf != MAP_FAILED) {
   1.199 +      ::mmap(AlignedEndPtr(reinterpret_cast<char *>(buf) + length, PAGE_SIZE),
   1.200 +             PAGE_SIZE, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
   1.201 +      DEBUG_LOG("Decompression buffer of size 0x%x in ashmem \"%s\", mapped @%p",
   1.202 +                length, str, buf);
   1.203 +      return new _MappableBuffer(fd.forget(), buf, length);
   1.204 +    }
   1.205 +#elif defined(__i386__)
   1.206 +    size_t anon_mapping_length = length + PAGE_SIZE;
   1.207 +    void *buf = ::mmap(nullptr, anon_mapping_length, PROT_NONE,
   1.208 +                       MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
   1.209 +    if (buf != MAP_FAILED) {
   1.210 +      char *first_page = reinterpret_cast<char *>(buf);
   1.211 +      char *map_page = first_page + PAGE_SIZE;
   1.212 +
   1.213 +      void *actual_buf = ::mmap(map_page, length, PROT_READ | PROT_WRITE,
   1.214 +                                MAP_FIXED | MAP_SHARED, fd, 0);
   1.215 +      if (actual_buf == MAP_FAILED) {
   1.216 +        ::munmap(buf, anon_mapping_length);
   1.217 +        DEBUG_LOG("Fixed allocation of decompression buffer at %p failed", map_page);
   1.218 +        return nullptr;
   1.219 +      }
   1.220 +
   1.221 +      DEBUG_LOG("Decompression buffer of size 0x%x in ashmem \"%s\", mapped @%p",
   1.222 +                length, str, actual_buf);
   1.223 +      return new _MappableBuffer(fd.forget(), actual_buf, length);
   1.224 +    }
   1.225 +#else
   1.226 +#error need to add a case for your CPU
   1.227 +#endif
   1.228 +#else
   1.229 +    /* On Linux, use /dev/shm as base directory for temporary files, assuming
   1.230 +     * it's on tmpfs */
   1.231 +    /* TODO: check that /dev/shm is tmpfs */
   1.232 +    char path[256];
   1.233 +    sprintf(path, "/dev/shm/%s.XXXXXX", name);
   1.234 +    fd = mkstemp(path);
   1.235 +    if (fd == -1)
   1.236 +      return nullptr;
   1.237 +    unlink(path);
   1.238 +    ftruncate(fd, length);
   1.239 +
   1.240 +    void *buf = ::mmap(nullptr, length, PROT_READ | PROT_WRITE,
   1.241 +                       MAP_SHARED, fd, 0);
   1.242 +    if (buf != MAP_FAILED) {
   1.243 +      DEBUG_LOG("Decompression buffer of size %ld in \"%s\", mapped @%p",
   1.244 +                length, path, buf);
   1.245 +      return new _MappableBuffer(fd.forget(), buf, length);
   1.246 +    }
   1.247 +#endif
   1.248 +    return nullptr;
   1.249 +  }
   1.250 +
   1.251 +  void *mmap(const void *addr, size_t length, int prot, int flags, off_t offset)
   1.252 +  {
   1.253 +    MOZ_ASSERT(fd != -1);
   1.254 +#ifdef ANDROID
   1.255 +    /* Mapping ashmem MAP_PRIVATE is like mapping anonymous memory, even when
   1.256 +     * there is content in the ashmem */
   1.257 +    if (flags & MAP_PRIVATE) {
   1.258 +      flags &= ~MAP_PRIVATE;
   1.259 +      flags |= MAP_SHARED;
   1.260 +    }
   1.261 +#endif
   1.262 +    return ::mmap(const_cast<void *>(addr), length, prot, flags, fd, offset);
   1.263 +  }
   1.264 +
   1.265 +#ifdef ANDROID
   1.266 +  ~_MappableBuffer() {
   1.267 +    /* Free the additional page we allocated. See _MappableBuffer::Create */
   1.268 +#if defined(__arm__)
   1.269 +    ::munmap(AlignedEndPtr(*this + GetLength(), PAGE_SIZE), PAGE_SIZE);
   1.270 +#elif defined(__i386__)
   1.271 +    ::munmap(*this - PAGE_SIZE, GetLength() + PAGE_SIZE);
   1.272 +#else
   1.273 +#error need to add a case for your CPU
   1.274 +#endif
   1.275 +  }
   1.276 +#endif
   1.277 +
   1.278 +private:
   1.279 +  _MappableBuffer(int fd, void *buf, size_t length)
   1.280 +  : MappedPtr(buf, length), fd(fd) { }
   1.281 +
   1.282 +  /* File descriptor for the temporary file or ashmem */
   1.283 +  AutoCloseFD fd;
   1.284 +};
   1.285 +
   1.286 +
   1.287 +Mappable *
   1.288 +MappableDeflate::Create(const char *name, Zip *zip, Zip::Stream *stream)
   1.289 +{
   1.290 +  MOZ_ASSERT(stream->GetType() == Zip::Stream::DEFLATE);
   1.291 +  _MappableBuffer *buf = _MappableBuffer::Create(name, stream->GetUncompressedSize());
   1.292 +  if (buf)
   1.293 +    return new MappableDeflate(buf, zip, stream);
   1.294 +  return nullptr;
   1.295 +}
   1.296 +
   1.297 +MappableDeflate::MappableDeflate(_MappableBuffer *buf, Zip *zip,
   1.298 +                                 Zip::Stream *stream)
   1.299 +: zip(zip), buffer(buf), zStream(stream->GetZStream(*buf)) { }
   1.300 +
   1.301 +MappableDeflate::~MappableDeflate() { }
   1.302 +
   1.303 +MemoryRange
   1.304 +MappableDeflate::mmap(const void *addr, size_t length, int prot, int flags, off_t offset)
   1.305 +{
   1.306 +  MOZ_ASSERT(buffer);
   1.307 +  MOZ_ASSERT(!(flags & MAP_SHARED));
   1.308 +  flags |= MAP_PRIVATE;
   1.309 +
   1.310 +  /* The deflate stream is uncompressed up to the required offset + length, if
   1.311 +   * it hasn't previously been uncompressed */
   1.312 +  ssize_t missing = offset + length + zStream.avail_out - buffer->GetLength();
   1.313 +  if (missing > 0) {
   1.314 +    uInt avail_out = zStream.avail_out;
   1.315 +    zStream.avail_out = missing;
   1.316 +    if ((*buffer == zStream.next_out) &&
   1.317 +        (inflateInit2(&zStream, -MAX_WBITS) != Z_OK)) {
   1.318 +      LOG("inflateInit failed: %s", zStream.msg);
   1.319 +      return MemoryRange(MAP_FAILED, 0);
   1.320 +    }
   1.321 +    int ret = inflate(&zStream, Z_SYNC_FLUSH);
   1.322 +    if (ret < 0) {
   1.323 +      LOG("inflate failed: %s", zStream.msg);
   1.324 +      return MemoryRange(MAP_FAILED, 0);
   1.325 +    }
   1.326 +    if (ret == Z_NEED_DICT) {
   1.327 +      LOG("zstream requires a dictionary. %s", zStream.msg);
   1.328 +      return MemoryRange(MAP_FAILED, 0);
   1.329 +    }
   1.330 +    zStream.avail_out = avail_out - missing + zStream.avail_out;
   1.331 +    if (ret == Z_STREAM_END) {
   1.332 +      if (inflateEnd(&zStream) != Z_OK) {
   1.333 +        LOG("inflateEnd failed: %s", zStream.msg);
   1.334 +        return MemoryRange(MAP_FAILED, 0);
   1.335 +      }
   1.336 +      if (zStream.total_out != buffer->GetLength()) {
   1.337 +        LOG("File not fully uncompressed! %ld / %d", zStream.total_out,
   1.338 +            static_cast<unsigned int>(buffer->GetLength()));
   1.339 +        return MemoryRange(MAP_FAILED, 0);
   1.340 +      }
   1.341 +    }
   1.342 +  }
   1.343 +#if defined(ANDROID) && defined(__arm__)
   1.344 +  if (prot & PROT_EXEC) {
   1.345 +    /* We just extracted data that may be executed in the future.
   1.346 +     * We thus need to ensure Instruction and Data cache coherency. */
   1.347 +    DEBUG_LOG("cacheflush(%p, %p)", *buffer + offset, *buffer + (offset + length));
   1.348 +    cacheflush(reinterpret_cast<uintptr_t>(*buffer + offset),
   1.349 +               reinterpret_cast<uintptr_t>(*buffer + (offset + length)), 0);
   1.350 +  }
   1.351 +#endif
   1.352 +
   1.353 +  return MemoryRange(buffer->mmap(addr, length, prot, flags, offset), length);
   1.354 +}
   1.355 +
   1.356 +void
   1.357 +MappableDeflate::finalize()
   1.358 +{
   1.359 +  /* Free zlib internal buffers */
   1.360 +  inflateEnd(&zStream);
   1.361 +  /* Free decompression buffer */
   1.362 +  buffer = nullptr;
   1.363 +  /* Remove reference to Zip archive */
   1.364 +  zip = nullptr;
   1.365 +}
   1.366 +
   1.367 +size_t
   1.368 +MappableDeflate::GetLength() const
   1.369 +{
   1.370 +  return buffer->GetLength();
   1.371 +}
   1.372 +
   1.373 +Mappable *
   1.374 +MappableSeekableZStream::Create(const char *name, Zip *zip,
   1.375 +                                Zip::Stream *stream)
   1.376 +{
   1.377 +  MOZ_ASSERT(stream->GetType() == Zip::Stream::STORE);
   1.378 +  mozilla::ScopedDeletePtr<MappableSeekableZStream> mappable;
   1.379 +  mappable = new MappableSeekableZStream(zip);
   1.380 +
   1.381 +  pthread_mutexattr_t recursiveAttr;
   1.382 +  pthread_mutexattr_init(&recursiveAttr);
   1.383 +  pthread_mutexattr_settype(&recursiveAttr, PTHREAD_MUTEX_RECURSIVE);
   1.384 +
   1.385 +  if (pthread_mutex_init(&mappable->mutex, &recursiveAttr))
   1.386 +    return nullptr;
   1.387 +
   1.388 +  if (!mappable->zStream.Init(stream->GetBuffer(), stream->GetSize()))
   1.389 +    return nullptr;
   1.390 +
   1.391 +  mappable->buffer = _MappableBuffer::Create(name,
   1.392 +                              mappable->zStream.GetUncompressedSize());
   1.393 +  if (!mappable->buffer)
   1.394 +    return nullptr;
   1.395 +
   1.396 +  mappable->chunkAvail = new unsigned char[mappable->zStream.GetChunksNum()];
   1.397 +  memset(mappable->chunkAvail, 0, mappable->zStream.GetChunksNum());
   1.398 +
   1.399 +  return mappable.forget();
   1.400 +}
   1.401 +
   1.402 +MappableSeekableZStream::MappableSeekableZStream(Zip *zip)
   1.403 +: zip(zip), chunkAvailNum(0) { }
   1.404 +
   1.405 +MappableSeekableZStream::~MappableSeekableZStream()
   1.406 +{
   1.407 +  pthread_mutex_destroy(&mutex);
   1.408 +}
   1.409 +
   1.410 +MemoryRange
   1.411 +MappableSeekableZStream::mmap(const void *addr, size_t length, int prot,
   1.412 +                              int flags, off_t offset)
   1.413 +{
   1.414 +  /* Map with PROT_NONE so that accessing the mapping would segfault, and
   1.415 +   * bring us to ensure() */
   1.416 +  void *res = buffer->mmap(addr, length, PROT_NONE, flags, offset);
   1.417 +  if (res == MAP_FAILED)
   1.418 +    return MemoryRange(MAP_FAILED, 0);
   1.419 +
   1.420 +  /* Store the mapping, ordered by offset and length */
   1.421 +  std::vector<LazyMap>::reverse_iterator it;
   1.422 +  for (it = lazyMaps.rbegin(); it < lazyMaps.rend(); ++it) {
   1.423 +    if ((it->offset < offset) ||
   1.424 +        ((it->offset == offset) && (it->length < length)))
   1.425 +      break;
   1.426 +  }
   1.427 +  LazyMap map = { res, length, prot, offset };
   1.428 +  lazyMaps.insert(it.base(), map);
   1.429 +  return MemoryRange(res, length);
   1.430 +}
   1.431 +
   1.432 +void
   1.433 +MappableSeekableZStream::munmap(void *addr, size_t length)
   1.434 +{
   1.435 +  std::vector<LazyMap>::iterator it;
   1.436 +  for (it = lazyMaps.begin(); it < lazyMaps.end(); ++it)
   1.437 +    if ((it->addr = addr) && (it->length == length)) {
   1.438 +      lazyMaps.erase(it);
   1.439 +      ::munmap(addr, length);
   1.440 +      return;
   1.441 +    }
   1.442 +  MOZ_CRASH("munmap called with unknown mapping");
   1.443 +}
   1.444 +
   1.445 +void
   1.446 +MappableSeekableZStream::finalize() { }
   1.447 +
   1.448 +class AutoLock {
   1.449 +public:
   1.450 +  AutoLock(pthread_mutex_t *mutex): mutex(mutex)
   1.451 +  {
   1.452 +    if (pthread_mutex_lock(mutex))
   1.453 +      MOZ_CRASH("pthread_mutex_lock failed");
   1.454 +  }
   1.455 +  ~AutoLock()
   1.456 +  {
   1.457 +    if (pthread_mutex_unlock(mutex))
   1.458 +      MOZ_CRASH("pthread_mutex_unlock failed");
   1.459 +  }
   1.460 +private:
   1.461 +  pthread_mutex_t *mutex;
   1.462 +};
   1.463 +
   1.464 +bool
   1.465 +MappableSeekableZStream::ensure(const void *addr)
   1.466 +{
   1.467 +  DEBUG_LOG("ensure @%p", addr);
   1.468 +  const void *addrPage = PageAlignedPtr(addr);
   1.469 +  /* Find the mapping corresponding to the given page */
   1.470 +  std::vector<LazyMap>::iterator map;
   1.471 +  for (map = lazyMaps.begin(); map < lazyMaps.end(); ++map) {
   1.472 +    if (map->Contains(addrPage))
   1.473 +      break;
   1.474 +  }
   1.475 +  if (map == lazyMaps.end())
   1.476 +    return false;
   1.477 +
   1.478 +  /* Find corresponding chunk */
   1.479 +  off_t mapOffset = map->offsetOf(addrPage);
   1.480 +  off_t chunk = mapOffset / zStream.GetChunkSize();
   1.481 +
   1.482 +  /* In the typical case, we just need to decompress the chunk entirely. But
   1.483 +   * when the current mapping ends in the middle of the chunk, we want to
   1.484 +   * stop at the end of the corresponding page.
   1.485 +   * However, if another mapping needs the last part of the chunk, we still
   1.486 +   * need to continue. As mappings are ordered by offset and length, we don't
   1.487 +   * need to scan the entire list of mappings.
   1.488 +   * It is safe to run through lazyMaps here because the linker is never
   1.489 +   * going to call mmap (which adds lazyMaps) while this function is
   1.490 +   * called. */
   1.491 +  size_t length = zStream.GetChunkSize(chunk);
   1.492 +  off_t chunkStart = chunk * zStream.GetChunkSize();
   1.493 +  off_t chunkEnd = chunkStart + length;
   1.494 +  std::vector<LazyMap>::iterator it;
   1.495 +  for (it = map; it < lazyMaps.end(); ++it) {
   1.496 +    if (chunkEnd <= it->endOffset())
   1.497 +      break;
   1.498 +  }
   1.499 +  if ((it == lazyMaps.end()) || (chunkEnd > it->endOffset())) {
   1.500 +    /* The mapping "it" points at now is past the interesting one */
   1.501 +    --it;
   1.502 +    length = it->endOffset() - chunkStart;
   1.503 +  }
   1.504 +
   1.505 +  length = PageAlignedSize(length);
   1.506 +
   1.507 +  /* The following lock can be re-acquired by the thread holding it.
   1.508 +   * If this happens, it means the following code is interrupted somehow by
   1.509 +   * some signal, and ends up retriggering a chunk decompression for the
   1.510 +   * same MappableSeekableZStream.
   1.511 +   * If the chunk to decompress is different the second time, then everything
   1.512 +   * is safe as the only common data touched below is chunkAvailNum, and it is
   1.513 +   * atomically updated (leaving out any chance of an interruption while it is
   1.514 +   * updated affecting the result). If the chunk to decompress is the same, the
   1.515 +   * worst thing that can happen is chunkAvailNum being incremented one too
   1.516 +   * many times, which doesn't affect functionality. The chances of it
   1.517 +   * happening being pretty slim, and the effect being harmless, we can just
   1.518 +   * ignore the issue. Other than that, we'd just be wasting time decompressing
   1.519 +   * the same chunk twice. */
   1.520 +  AutoLock lock(&mutex);
   1.521 +
   1.522 +  /* The very first page is mapped and accessed separately of the rest, and
   1.523 +   * as such, only the first page of the first chunk is decompressed this way.
   1.524 +   * When we fault in the remaining pages of that chunk, we want to decompress
   1.525 +   * the complete chunk again. Short of doing that, we would end up with
   1.526 +   * no data between PageSize() and chunkSize, which would effectively corrupt
   1.527 +   * symbol resolution in the underlying library. */
   1.528 +  if (chunkAvail[chunk] < PageNumber(length)) {
   1.529 +    if (!zStream.DecompressChunk(*buffer + chunkStart, chunk, length))
   1.530 +      return false;
   1.531 +
   1.532 +#if defined(ANDROID) && defined(__arm__)
   1.533 +    if (map->prot & PROT_EXEC) {
   1.534 +      /* We just extracted data that may be executed in the future.
   1.535 +       * We thus need to ensure Instruction and Data cache coherency. */
   1.536 +      DEBUG_LOG("cacheflush(%p, %p)", *buffer + chunkStart, *buffer + (chunkStart + length));
   1.537 +      cacheflush(reinterpret_cast<uintptr_t>(*buffer + chunkStart),
   1.538 +                 reinterpret_cast<uintptr_t>(*buffer + (chunkStart + length)), 0);
   1.539 +    }
   1.540 +#endif
   1.541 +    /* Only count if we haven't already decompressed parts of the chunk */
   1.542 +    if (chunkAvail[chunk] == 0)
   1.543 +      chunkAvailNum++;
   1.544 +
   1.545 +    chunkAvail[chunk] = PageNumber(length);
   1.546 +  }
   1.547 +
   1.548 +  /* Flip the chunk mapping protection to the recorded flags. We could
   1.549 +   * also flip the protection for other mappings of the same chunk,
   1.550 +   * but it's easier to skip that and let further segfaults call
   1.551 +   * ensure again. */
   1.552 +  const void *chunkAddr = reinterpret_cast<const void *>
   1.553 +                          (reinterpret_cast<uintptr_t>(addrPage)
   1.554 +                           - mapOffset % zStream.GetChunkSize());
   1.555 +  const void *chunkEndAddr = reinterpret_cast<const void *>
   1.556 +                             (reinterpret_cast<uintptr_t>(chunkAddr) + length);
   1.557 +  
   1.558 +  const void *start = std::max(map->addr, chunkAddr);
   1.559 +  const void *end = std::min(map->end(), chunkEndAddr);
   1.560 +  length = reinterpret_cast<uintptr_t>(end)
   1.561 +           - reinterpret_cast<uintptr_t>(start);
   1.562 +
   1.563 +  if (mprotect(const_cast<void *>(start), length, map->prot) == 0) {
   1.564 +    DEBUG_LOG("mprotect @%p, 0x%" PRIxSize ", 0x%x", start, length, map->prot);
   1.565 +    return true;
   1.566 +  }
   1.567 +
   1.568 +  LOG("mprotect @%p, 0x%" PRIxSize ", 0x%x failed with errno %d",
   1.569 +      start, length, map->prot, errno);
   1.570 +  LOG("mprotect failed");
   1.571 +  return false;
   1.572 +}
   1.573 +
   1.574 +void
   1.575 +MappableSeekableZStream::stats(const char *when, const char *name) const
   1.576 +{
   1.577 +  size_t nEntries = zStream.GetChunksNum();
   1.578 +  DEBUG_LOG("%s: %s; %" PRIdSize "/%" PRIdSize " chunks decompressed",
   1.579 +            name, when, static_cast<size_t>(chunkAvailNum), nEntries);
   1.580 +
   1.581 +  size_t len = 64;
   1.582 +  mozilla::ScopedDeleteArray<char> map;
   1.583 +  map = new char[len + 3];
   1.584 +  map[0] = '[';
   1.585 +
   1.586 +  for (size_t i = 0, j = 1; i < nEntries; i++, j++) {
   1.587 +    map[j] = chunkAvail[i] ? '*' : '_';
   1.588 +    if ((j == len) || (i == nEntries - 1)) {
   1.589 +      map[j + 1] = ']';
   1.590 +      map[j + 2] = '\0';
   1.591 +      DEBUG_LOG("%s", static_cast<char *>(map));
   1.592 +      j = 0;
   1.593 +    }
   1.594 +  }
   1.595 +}
   1.596 +
   1.597 +size_t
   1.598 +MappableSeekableZStream::GetLength() const
   1.599 +{
   1.600 +  return buffer->GetLength();
   1.601 +}

mercurial