Thu, 22 Jan 2015 13:21:57 +0100
Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6
1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
3 * You can obtain one at http://mozilla.org/MPL/2.0/. */
5 #include <fcntl.h>
6 #include <unistd.h>
7 #include <sys/mman.h>
8 #include <sys/stat.h>
9 #include <cstring>
10 #include <cstdlib>
11 #include <cstdio>
12 #include "Mappable.h"
13 #ifdef ANDROID
14 #include <linux/ashmem.h>
15 #endif
16 #include <sys/stat.h>
17 #include <errno.h>
18 #include "ElfLoader.h"
19 #include "SeekableZStream.h"
20 #include "Logging.h"
22 Mappable *
23 MappableFile::Create(const char *path)
24 {
25 int fd = open(path, O_RDONLY);
26 if (fd != -1)
27 return new MappableFile(fd);
28 return nullptr;
29 }
31 MemoryRange
32 MappableFile::mmap(const void *addr, size_t length, int prot, int flags,
33 off_t offset)
34 {
35 MOZ_ASSERT(fd != -1);
36 MOZ_ASSERT(!(flags & MAP_SHARED));
37 flags |= MAP_PRIVATE;
39 return MemoryRange::mmap(const_cast<void *>(addr), length, prot, flags,
40 fd, offset);
41 }
43 void
44 MappableFile::finalize()
45 {
46 /* Close file ; equivalent to close(fd.forget()) */
47 fd = -1;
48 }
50 size_t
51 MappableFile::GetLength() const
52 {
53 struct stat st;
54 return fstat(fd, &st) ? 0 : st.st_size;
55 }
57 Mappable *
58 MappableExtractFile::Create(const char *name, Zip *zip, Zip::Stream *stream)
59 {
60 const char *cachePath = getenv("MOZ_LINKER_CACHE");
61 if (!cachePath || !*cachePath) {
62 LOG("Warning: MOZ_LINKER_EXTRACT is set, but not MOZ_LINKER_CACHE; "
63 "not extracting");
64 return nullptr;
65 }
66 mozilla::ScopedDeleteArray<char> path;
67 path = new char[strlen(cachePath) + strlen(name) + 2];
68 sprintf(path, "%s/%s", cachePath, name);
69 struct stat cacheStat;
70 if (stat(path, &cacheStat) == 0) {
71 struct stat zipStat;
72 stat(zip->GetName(), &zipStat);
73 if (cacheStat.st_mtime > zipStat.st_mtime) {
74 DEBUG_LOG("Reusing %s", static_cast<char *>(path));
75 return MappableFile::Create(path);
76 }
77 }
78 DEBUG_LOG("Extracting to %s", static_cast<char *>(path));
79 AutoCloseFD fd;
80 fd = open(path, O_TRUNC | O_RDWR | O_CREAT | O_NOATIME,
81 S_IRUSR | S_IWUSR);
82 if (fd == -1) {
83 LOG("Couldn't open %s to decompress library", path.get());
84 return nullptr;
85 }
86 AutoUnlinkFile file;
87 file = path.forget();
88 if (stream->GetType() == Zip::Stream::DEFLATE) {
89 if (ftruncate(fd, stream->GetUncompressedSize()) == -1) {
90 LOG("Couldn't ftruncate %s to decompress library", file.get());
91 return nullptr;
92 }
93 /* Map the temporary file for use as inflate buffer */
94 MappedPtr buffer(MemoryRange::mmap(nullptr, stream->GetUncompressedSize(),
95 PROT_WRITE, MAP_SHARED, fd, 0));
96 if (buffer == MAP_FAILED) {
97 LOG("Couldn't map %s to decompress library", file.get());
98 return nullptr;
99 }
101 z_stream zStream = stream->GetZStream(buffer);
103 /* Decompress */
104 if (inflateInit2(&zStream, -MAX_WBITS) != Z_OK) {
105 LOG("inflateInit failed: %s", zStream.msg);
106 return nullptr;
107 }
108 if (inflate(&zStream, Z_FINISH) != Z_STREAM_END) {
109 LOG("inflate failed: %s", zStream.msg);
110 return nullptr;
111 }
112 if (inflateEnd(&zStream) != Z_OK) {
113 LOG("inflateEnd failed: %s", zStream.msg);
114 return nullptr;
115 }
116 if (zStream.total_out != stream->GetUncompressedSize()) {
117 LOG("File not fully uncompressed! %ld / %d", zStream.total_out,
118 static_cast<unsigned int>(stream->GetUncompressedSize()));
119 return nullptr;
120 }
121 } else if (stream->GetType() == Zip::Stream::STORE) {
122 SeekableZStream zStream;
123 if (!zStream.Init(stream->GetBuffer(), stream->GetSize())) {
124 LOG("Couldn't initialize SeekableZStream for %s", name);
125 return nullptr;
126 }
127 if (ftruncate(fd, zStream.GetUncompressedSize()) == -1) {
128 LOG("Couldn't ftruncate %s to decompress library", file.get());
129 return nullptr;
130 }
131 MappedPtr buffer(MemoryRange::mmap(nullptr, zStream.GetUncompressedSize(),
132 PROT_WRITE, MAP_SHARED, fd, 0));
133 if (buffer == MAP_FAILED) {
134 LOG("Couldn't map %s to decompress library", file.get());
135 return nullptr;
136 }
138 if (!zStream.Decompress(buffer, 0, zStream.GetUncompressedSize())) {
139 LOG("%s: failed to decompress", name);
140 return nullptr;
141 }
142 } else {
143 return nullptr;
144 }
146 return new MappableExtractFile(fd.forget(), file.forget());
147 }
149 MappableExtractFile::~MappableExtractFile()
150 {
151 /* When destroying from a forked process, we don't want the file to be
152 * removed, as the main process is still using the file. Although it
153 * doesn't really matter, it helps e.g. valgrind that the file is there.
154 * The string still needs to be delete[]d, though */
155 if (pid != getpid())
156 delete [] path.forget();
157 }
159 /**
160 * _MappableBuffer is a buffer which content can be mapped at different
161 * locations in the virtual address space.
162 * On Linux, uses a (deleted) temporary file on a tmpfs for sharable content.
163 * On Android, uses ashmem.
164 */
165 class _MappableBuffer: public MappedPtr
166 {
167 public:
168 /**
169 * Returns a _MappableBuffer instance with the given name and the given
170 * length.
171 */
172 static _MappableBuffer *Create(const char *name, size_t length)
173 {
174 AutoCloseFD fd;
175 #ifdef ANDROID
176 /* On Android, initialize an ashmem region with the given length */
177 fd = open("/" ASHMEM_NAME_DEF, O_RDWR, 0600);
178 if (fd == -1)
179 return nullptr;
180 char str[ASHMEM_NAME_LEN];
181 strlcpy(str, name, sizeof(str));
182 ioctl(fd, ASHMEM_SET_NAME, str);
183 if (ioctl(fd, ASHMEM_SET_SIZE, length))
184 return nullptr;
186 /* The Gecko crash reporter is confused by adjacent memory mappings of
187 * the same file and chances are we're going to map from the same file
188 * descriptor right away. To avoid problems with the crash reporter,
189 * create an empty anonymous page before or after the ashmem mapping,
190 * depending on how mappings grow in the address space.
191 */
192 #if defined(__arm__)
193 void *buf = ::mmap(nullptr, length + PAGE_SIZE, PROT_READ | PROT_WRITE,
194 MAP_SHARED, fd, 0);
195 if (buf != MAP_FAILED) {
196 ::mmap(AlignedEndPtr(reinterpret_cast<char *>(buf) + length, PAGE_SIZE),
197 PAGE_SIZE, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
198 DEBUG_LOG("Decompression buffer of size 0x%x in ashmem \"%s\", mapped @%p",
199 length, str, buf);
200 return new _MappableBuffer(fd.forget(), buf, length);
201 }
202 #elif defined(__i386__)
203 size_t anon_mapping_length = length + PAGE_SIZE;
204 void *buf = ::mmap(nullptr, anon_mapping_length, PROT_NONE,
205 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
206 if (buf != MAP_FAILED) {
207 char *first_page = reinterpret_cast<char *>(buf);
208 char *map_page = first_page + PAGE_SIZE;
210 void *actual_buf = ::mmap(map_page, length, PROT_READ | PROT_WRITE,
211 MAP_FIXED | MAP_SHARED, fd, 0);
212 if (actual_buf == MAP_FAILED) {
213 ::munmap(buf, anon_mapping_length);
214 DEBUG_LOG("Fixed allocation of decompression buffer at %p failed", map_page);
215 return nullptr;
216 }
218 DEBUG_LOG("Decompression buffer of size 0x%x in ashmem \"%s\", mapped @%p",
219 length, str, actual_buf);
220 return new _MappableBuffer(fd.forget(), actual_buf, length);
221 }
222 #else
223 #error need to add a case for your CPU
224 #endif
225 #else
226 /* On Linux, use /dev/shm as base directory for temporary files, assuming
227 * it's on tmpfs */
228 /* TODO: check that /dev/shm is tmpfs */
229 char path[256];
230 sprintf(path, "/dev/shm/%s.XXXXXX", name);
231 fd = mkstemp(path);
232 if (fd == -1)
233 return nullptr;
234 unlink(path);
235 ftruncate(fd, length);
237 void *buf = ::mmap(nullptr, length, PROT_READ | PROT_WRITE,
238 MAP_SHARED, fd, 0);
239 if (buf != MAP_FAILED) {
240 DEBUG_LOG("Decompression buffer of size %ld in \"%s\", mapped @%p",
241 length, path, buf);
242 return new _MappableBuffer(fd.forget(), buf, length);
243 }
244 #endif
245 return nullptr;
246 }
248 void *mmap(const void *addr, size_t length, int prot, int flags, off_t offset)
249 {
250 MOZ_ASSERT(fd != -1);
251 #ifdef ANDROID
252 /* Mapping ashmem MAP_PRIVATE is like mapping anonymous memory, even when
253 * there is content in the ashmem */
254 if (flags & MAP_PRIVATE) {
255 flags &= ~MAP_PRIVATE;
256 flags |= MAP_SHARED;
257 }
258 #endif
259 return ::mmap(const_cast<void *>(addr), length, prot, flags, fd, offset);
260 }
262 #ifdef ANDROID
263 ~_MappableBuffer() {
264 /* Free the additional page we allocated. See _MappableBuffer::Create */
265 #if defined(__arm__)
266 ::munmap(AlignedEndPtr(*this + GetLength(), PAGE_SIZE), PAGE_SIZE);
267 #elif defined(__i386__)
268 ::munmap(*this - PAGE_SIZE, GetLength() + PAGE_SIZE);
269 #else
270 #error need to add a case for your CPU
271 #endif
272 }
273 #endif
275 private:
276 _MappableBuffer(int fd, void *buf, size_t length)
277 : MappedPtr(buf, length), fd(fd) { }
279 /* File descriptor for the temporary file or ashmem */
280 AutoCloseFD fd;
281 };
284 Mappable *
285 MappableDeflate::Create(const char *name, Zip *zip, Zip::Stream *stream)
286 {
287 MOZ_ASSERT(stream->GetType() == Zip::Stream::DEFLATE);
288 _MappableBuffer *buf = _MappableBuffer::Create(name, stream->GetUncompressedSize());
289 if (buf)
290 return new MappableDeflate(buf, zip, stream);
291 return nullptr;
292 }
294 MappableDeflate::MappableDeflate(_MappableBuffer *buf, Zip *zip,
295 Zip::Stream *stream)
296 : zip(zip), buffer(buf), zStream(stream->GetZStream(*buf)) { }
298 MappableDeflate::~MappableDeflate() { }
300 MemoryRange
301 MappableDeflate::mmap(const void *addr, size_t length, int prot, int flags, off_t offset)
302 {
303 MOZ_ASSERT(buffer);
304 MOZ_ASSERT(!(flags & MAP_SHARED));
305 flags |= MAP_PRIVATE;
307 /* The deflate stream is uncompressed up to the required offset + length, if
308 * it hasn't previously been uncompressed */
309 ssize_t missing = offset + length + zStream.avail_out - buffer->GetLength();
310 if (missing > 0) {
311 uInt avail_out = zStream.avail_out;
312 zStream.avail_out = missing;
313 if ((*buffer == zStream.next_out) &&
314 (inflateInit2(&zStream, -MAX_WBITS) != Z_OK)) {
315 LOG("inflateInit failed: %s", zStream.msg);
316 return MemoryRange(MAP_FAILED, 0);
317 }
318 int ret = inflate(&zStream, Z_SYNC_FLUSH);
319 if (ret < 0) {
320 LOG("inflate failed: %s", zStream.msg);
321 return MemoryRange(MAP_FAILED, 0);
322 }
323 if (ret == Z_NEED_DICT) {
324 LOG("zstream requires a dictionary. %s", zStream.msg);
325 return MemoryRange(MAP_FAILED, 0);
326 }
327 zStream.avail_out = avail_out - missing + zStream.avail_out;
328 if (ret == Z_STREAM_END) {
329 if (inflateEnd(&zStream) != Z_OK) {
330 LOG("inflateEnd failed: %s", zStream.msg);
331 return MemoryRange(MAP_FAILED, 0);
332 }
333 if (zStream.total_out != buffer->GetLength()) {
334 LOG("File not fully uncompressed! %ld / %d", zStream.total_out,
335 static_cast<unsigned int>(buffer->GetLength()));
336 return MemoryRange(MAP_FAILED, 0);
337 }
338 }
339 }
340 #if defined(ANDROID) && defined(__arm__)
341 if (prot & PROT_EXEC) {
342 /* We just extracted data that may be executed in the future.
343 * We thus need to ensure Instruction and Data cache coherency. */
344 DEBUG_LOG("cacheflush(%p, %p)", *buffer + offset, *buffer + (offset + length));
345 cacheflush(reinterpret_cast<uintptr_t>(*buffer + offset),
346 reinterpret_cast<uintptr_t>(*buffer + (offset + length)), 0);
347 }
348 #endif
350 return MemoryRange(buffer->mmap(addr, length, prot, flags, offset), length);
351 }
353 void
354 MappableDeflate::finalize()
355 {
356 /* Free zlib internal buffers */
357 inflateEnd(&zStream);
358 /* Free decompression buffer */
359 buffer = nullptr;
360 /* Remove reference to Zip archive */
361 zip = nullptr;
362 }
364 size_t
365 MappableDeflate::GetLength() const
366 {
367 return buffer->GetLength();
368 }
370 Mappable *
371 MappableSeekableZStream::Create(const char *name, Zip *zip,
372 Zip::Stream *stream)
373 {
374 MOZ_ASSERT(stream->GetType() == Zip::Stream::STORE);
375 mozilla::ScopedDeletePtr<MappableSeekableZStream> mappable;
376 mappable = new MappableSeekableZStream(zip);
378 pthread_mutexattr_t recursiveAttr;
379 pthread_mutexattr_init(&recursiveAttr);
380 pthread_mutexattr_settype(&recursiveAttr, PTHREAD_MUTEX_RECURSIVE);
382 if (pthread_mutex_init(&mappable->mutex, &recursiveAttr))
383 return nullptr;
385 if (!mappable->zStream.Init(stream->GetBuffer(), stream->GetSize()))
386 return nullptr;
388 mappable->buffer = _MappableBuffer::Create(name,
389 mappable->zStream.GetUncompressedSize());
390 if (!mappable->buffer)
391 return nullptr;
393 mappable->chunkAvail = new unsigned char[mappable->zStream.GetChunksNum()];
394 memset(mappable->chunkAvail, 0, mappable->zStream.GetChunksNum());
396 return mappable.forget();
397 }
399 MappableSeekableZStream::MappableSeekableZStream(Zip *zip)
400 : zip(zip), chunkAvailNum(0) { }
402 MappableSeekableZStream::~MappableSeekableZStream()
403 {
404 pthread_mutex_destroy(&mutex);
405 }
407 MemoryRange
408 MappableSeekableZStream::mmap(const void *addr, size_t length, int prot,
409 int flags, off_t offset)
410 {
411 /* Map with PROT_NONE so that accessing the mapping would segfault, and
412 * bring us to ensure() */
413 void *res = buffer->mmap(addr, length, PROT_NONE, flags, offset);
414 if (res == MAP_FAILED)
415 return MemoryRange(MAP_FAILED, 0);
417 /* Store the mapping, ordered by offset and length */
418 std::vector<LazyMap>::reverse_iterator it;
419 for (it = lazyMaps.rbegin(); it < lazyMaps.rend(); ++it) {
420 if ((it->offset < offset) ||
421 ((it->offset == offset) && (it->length < length)))
422 break;
423 }
424 LazyMap map = { res, length, prot, offset };
425 lazyMaps.insert(it.base(), map);
426 return MemoryRange(res, length);
427 }
429 void
430 MappableSeekableZStream::munmap(void *addr, size_t length)
431 {
432 std::vector<LazyMap>::iterator it;
433 for (it = lazyMaps.begin(); it < lazyMaps.end(); ++it)
434 if ((it->addr = addr) && (it->length == length)) {
435 lazyMaps.erase(it);
436 ::munmap(addr, length);
437 return;
438 }
439 MOZ_CRASH("munmap called with unknown mapping");
440 }
442 void
443 MappableSeekableZStream::finalize() { }
445 class AutoLock {
446 public:
447 AutoLock(pthread_mutex_t *mutex): mutex(mutex)
448 {
449 if (pthread_mutex_lock(mutex))
450 MOZ_CRASH("pthread_mutex_lock failed");
451 }
452 ~AutoLock()
453 {
454 if (pthread_mutex_unlock(mutex))
455 MOZ_CRASH("pthread_mutex_unlock failed");
456 }
457 private:
458 pthread_mutex_t *mutex;
459 };
461 bool
462 MappableSeekableZStream::ensure(const void *addr)
463 {
464 DEBUG_LOG("ensure @%p", addr);
465 const void *addrPage = PageAlignedPtr(addr);
466 /* Find the mapping corresponding to the given page */
467 std::vector<LazyMap>::iterator map;
468 for (map = lazyMaps.begin(); map < lazyMaps.end(); ++map) {
469 if (map->Contains(addrPage))
470 break;
471 }
472 if (map == lazyMaps.end())
473 return false;
475 /* Find corresponding chunk */
476 off_t mapOffset = map->offsetOf(addrPage);
477 off_t chunk = mapOffset / zStream.GetChunkSize();
479 /* In the typical case, we just need to decompress the chunk entirely. But
480 * when the current mapping ends in the middle of the chunk, we want to
481 * stop at the end of the corresponding page.
482 * However, if another mapping needs the last part of the chunk, we still
483 * need to continue. As mappings are ordered by offset and length, we don't
484 * need to scan the entire list of mappings.
485 * It is safe to run through lazyMaps here because the linker is never
486 * going to call mmap (which adds lazyMaps) while this function is
487 * called. */
488 size_t length = zStream.GetChunkSize(chunk);
489 off_t chunkStart = chunk * zStream.GetChunkSize();
490 off_t chunkEnd = chunkStart + length;
491 std::vector<LazyMap>::iterator it;
492 for (it = map; it < lazyMaps.end(); ++it) {
493 if (chunkEnd <= it->endOffset())
494 break;
495 }
496 if ((it == lazyMaps.end()) || (chunkEnd > it->endOffset())) {
497 /* The mapping "it" points at now is past the interesting one */
498 --it;
499 length = it->endOffset() - chunkStart;
500 }
502 length = PageAlignedSize(length);
504 /* The following lock can be re-acquired by the thread holding it.
505 * If this happens, it means the following code is interrupted somehow by
506 * some signal, and ends up retriggering a chunk decompression for the
507 * same MappableSeekableZStream.
508 * If the chunk to decompress is different the second time, then everything
509 * is safe as the only common data touched below is chunkAvailNum, and it is
510 * atomically updated (leaving out any chance of an interruption while it is
511 * updated affecting the result). If the chunk to decompress is the same, the
512 * worst thing that can happen is chunkAvailNum being incremented one too
513 * many times, which doesn't affect functionality. The chances of it
514 * happening being pretty slim, and the effect being harmless, we can just
515 * ignore the issue. Other than that, we'd just be wasting time decompressing
516 * the same chunk twice. */
517 AutoLock lock(&mutex);
519 /* The very first page is mapped and accessed separately of the rest, and
520 * as such, only the first page of the first chunk is decompressed this way.
521 * When we fault in the remaining pages of that chunk, we want to decompress
522 * the complete chunk again. Short of doing that, we would end up with
523 * no data between PageSize() and chunkSize, which would effectively corrupt
524 * symbol resolution in the underlying library. */
525 if (chunkAvail[chunk] < PageNumber(length)) {
526 if (!zStream.DecompressChunk(*buffer + chunkStart, chunk, length))
527 return false;
529 #if defined(ANDROID) && defined(__arm__)
530 if (map->prot & PROT_EXEC) {
531 /* We just extracted data that may be executed in the future.
532 * We thus need to ensure Instruction and Data cache coherency. */
533 DEBUG_LOG("cacheflush(%p, %p)", *buffer + chunkStart, *buffer + (chunkStart + length));
534 cacheflush(reinterpret_cast<uintptr_t>(*buffer + chunkStart),
535 reinterpret_cast<uintptr_t>(*buffer + (chunkStart + length)), 0);
536 }
537 #endif
538 /* Only count if we haven't already decompressed parts of the chunk */
539 if (chunkAvail[chunk] == 0)
540 chunkAvailNum++;
542 chunkAvail[chunk] = PageNumber(length);
543 }
545 /* Flip the chunk mapping protection to the recorded flags. We could
546 * also flip the protection for other mappings of the same chunk,
547 * but it's easier to skip that and let further segfaults call
548 * ensure again. */
549 const void *chunkAddr = reinterpret_cast<const void *>
550 (reinterpret_cast<uintptr_t>(addrPage)
551 - mapOffset % zStream.GetChunkSize());
552 const void *chunkEndAddr = reinterpret_cast<const void *>
553 (reinterpret_cast<uintptr_t>(chunkAddr) + length);
555 const void *start = std::max(map->addr, chunkAddr);
556 const void *end = std::min(map->end(), chunkEndAddr);
557 length = reinterpret_cast<uintptr_t>(end)
558 - reinterpret_cast<uintptr_t>(start);
560 if (mprotect(const_cast<void *>(start), length, map->prot) == 0) {
561 DEBUG_LOG("mprotect @%p, 0x%" PRIxSize ", 0x%x", start, length, map->prot);
562 return true;
563 }
565 LOG("mprotect @%p, 0x%" PRIxSize ", 0x%x failed with errno %d",
566 start, length, map->prot, errno);
567 LOG("mprotect failed");
568 return false;
569 }
571 void
572 MappableSeekableZStream::stats(const char *when, const char *name) const
573 {
574 size_t nEntries = zStream.GetChunksNum();
575 DEBUG_LOG("%s: %s; %" PRIdSize "/%" PRIdSize " chunks decompressed",
576 name, when, static_cast<size_t>(chunkAvailNum), nEntries);
578 size_t len = 64;
579 mozilla::ScopedDeleteArray<char> map;
580 map = new char[len + 3];
581 map[0] = '[';
583 for (size_t i = 0, j = 1; i < nEntries; i++, j++) {
584 map[j] = chunkAvail[i] ? '*' : '_';
585 if ((j == len) || (i == nEntries - 1)) {
586 map[j + 1] = ']';
587 map[j + 2] = '\0';
588 DEBUG_LOG("%s", static_cast<char *>(map));
589 j = 0;
590 }
591 }
592 }
594 size_t
595 MappableSeekableZStream::GetLength() const
596 {
597 return buffer->GetLength();
598 }