|
1 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
2 * License, v. 2.0. If a copy of the MPL was not distributed with this file, |
|
3 * You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
4 |
|
5 #include <cstring> |
|
6 #include <sys/mman.h> |
|
7 #include <vector> |
|
8 #include <dlfcn.h> |
|
9 #include "CustomElf.h" |
|
10 #include "Mappable.h" |
|
11 #include "Logging.h" |
|
12 |
|
13 using namespace Elf; |
|
14 using namespace mozilla; |
|
15 |
|
16 /* TODO: Fill ElfLoader::Singleton.lastError on errors. */ |
|
17 |
|
18 /* Function used to report library mappings from the custom linker to Gecko |
|
19 * crash reporter */ |
|
20 #ifdef ANDROID |
|
21 extern "C" { |
|
22 void report_mapping(char *name, void *base, uint32_t len, uint32_t offset); |
|
23 } |
|
24 #else |
|
25 #define report_mapping(...) |
|
26 #endif |
|
27 |
|
28 const Ehdr *Ehdr::validate(const void *buf) |
|
29 { |
|
30 if (!buf || buf == MAP_FAILED) |
|
31 return nullptr; |
|
32 |
|
33 const Ehdr *ehdr = reinterpret_cast<const Ehdr *>(buf); |
|
34 |
|
35 /* Only support ELF executables or libraries for the host system */ |
|
36 if (memcmp(ELFMAG, &ehdr->e_ident, SELFMAG) || |
|
37 ehdr->e_ident[EI_CLASS] != ELFCLASS || |
|
38 ehdr->e_ident[EI_DATA] != ELFDATA || |
|
39 ehdr->e_ident[EI_VERSION] != 1 || |
|
40 (ehdr->e_ident[EI_OSABI] != ELFOSABI && ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE) || |
|
41 #ifdef EI_ABIVERSION |
|
42 ehdr->e_ident[EI_ABIVERSION] != ELFABIVERSION || |
|
43 #endif |
|
44 (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) || |
|
45 ehdr->e_machine != ELFMACHINE || |
|
46 ehdr->e_version != 1 || |
|
47 ehdr->e_phentsize != sizeof(Phdr)) |
|
48 return nullptr; |
|
49 |
|
50 return ehdr; |
|
51 } |
|
52 |
|
53 namespace { |
|
54 |
|
55 void debug_phdr(const char *type, const Phdr *phdr) |
|
56 { |
|
57 DEBUG_LOG("%s @0x%08" PRIxAddr " (" |
|
58 "filesz: 0x%08" PRIxAddr ", " |
|
59 "memsz: 0x%08" PRIxAddr ", " |
|
60 "offset: 0x%08" PRIxAddr ", " |
|
61 "flags: %c%c%c)", |
|
62 type, phdr->p_vaddr, phdr->p_filesz, phdr->p_memsz, |
|
63 phdr->p_offset, phdr->p_flags & PF_R ? 'r' : '-', |
|
64 phdr->p_flags & PF_W ? 'w' : '-', phdr->p_flags & PF_X ? 'x' : '-'); |
|
65 } |
|
66 |
|
67 static int p_flags_to_mprot(Word flags) |
|
68 { |
|
69 return ((flags & PF_X) ? PROT_EXEC : 0) | |
|
70 ((flags & PF_W) ? PROT_WRITE : 0) | |
|
71 ((flags & PF_R) ? PROT_READ : 0); |
|
72 } |
|
73 |
|
74 void |
|
75 __void_stub(void) |
|
76 { |
|
77 } |
|
78 |
|
79 } /* anonymous namespace */ |
|
80 |
|
81 /** |
|
82 * RAII wrapper for a mapping of the first page off a Mappable object. |
|
83 * This calls Mappable::munmap instead of system munmap. |
|
84 */ |
|
85 class Mappable1stPagePtr: public GenericMappedPtr<Mappable1stPagePtr> { |
|
86 public: |
|
87 Mappable1stPagePtr(Mappable *mappable) |
|
88 : GenericMappedPtr<Mappable1stPagePtr>( |
|
89 mappable->mmap(nullptr, PageSize(), PROT_READ, MAP_PRIVATE, 0)) |
|
90 , mappable(mappable) |
|
91 { |
|
92 /* Ensure the content of this page */ |
|
93 mappable->ensure(*this); |
|
94 } |
|
95 |
|
96 private: |
|
97 friend class GenericMappedPtr<Mappable1stPagePtr>; |
|
98 void munmap(void *buf, size_t length) { |
|
99 mappable->munmap(buf, length); |
|
100 } |
|
101 |
|
102 mozilla::RefPtr<Mappable> mappable; |
|
103 }; |
|
104 |
|
105 |
|
106 TemporaryRef<LibHandle> |
|
107 CustomElf::Load(Mappable *mappable, const char *path, int flags) |
|
108 { |
|
109 DEBUG_LOG("CustomElf::Load(\"%s\", 0x%x) = ...", path, flags); |
|
110 if (!mappable) |
|
111 return nullptr; |
|
112 /* Keeping a RefPtr of the CustomElf is going to free the appropriate |
|
113 * resources when returning nullptr */ |
|
114 RefPtr<CustomElf> elf = new CustomElf(mappable, path); |
|
115 /* Map the first page of the Elf object to access Elf and program headers */ |
|
116 Mappable1stPagePtr ehdr_raw(mappable); |
|
117 if (ehdr_raw == MAP_FAILED) |
|
118 return nullptr; |
|
119 |
|
120 const Ehdr *ehdr = Ehdr::validate(ehdr_raw); |
|
121 if (!ehdr) |
|
122 return nullptr; |
|
123 |
|
124 /* Scan Elf Program Headers and gather some information about them */ |
|
125 std::vector<const Phdr *> pt_loads; |
|
126 Addr min_vaddr = (Addr) -1; // We want to find the lowest and biggest |
|
127 Addr max_vaddr = 0; // virtual address used by this Elf. |
|
128 const Phdr *dyn = nullptr; |
|
129 |
|
130 const Phdr *first_phdr = reinterpret_cast<const Phdr *>( |
|
131 reinterpret_cast<const char *>(ehdr) + ehdr->e_phoff); |
|
132 const Phdr *end_phdr = &first_phdr[ehdr->e_phnum]; |
|
133 #ifdef __ARM_EABI__ |
|
134 const Phdr *arm_exidx_phdr = nullptr; |
|
135 #endif |
|
136 |
|
137 for (const Phdr *phdr = first_phdr; phdr < end_phdr; phdr++) { |
|
138 switch (phdr->p_type) { |
|
139 case PT_LOAD: |
|
140 debug_phdr("PT_LOAD", phdr); |
|
141 pt_loads.push_back(phdr); |
|
142 if (phdr->p_vaddr < min_vaddr) |
|
143 min_vaddr = phdr->p_vaddr; |
|
144 if (max_vaddr < phdr->p_vaddr + phdr->p_memsz) |
|
145 max_vaddr = phdr->p_vaddr + phdr->p_memsz; |
|
146 break; |
|
147 case PT_DYNAMIC: |
|
148 debug_phdr("PT_DYNAMIC", phdr); |
|
149 if (!dyn) { |
|
150 dyn = phdr; |
|
151 } else { |
|
152 LOG("%s: Multiple PT_DYNAMIC segments detected", elf->GetPath()); |
|
153 return nullptr; |
|
154 } |
|
155 break; |
|
156 case PT_TLS: |
|
157 debug_phdr("PT_TLS", phdr); |
|
158 if (phdr->p_memsz) { |
|
159 LOG("%s: TLS is not supported", elf->GetPath()); |
|
160 return nullptr; |
|
161 } |
|
162 break; |
|
163 case PT_GNU_STACK: |
|
164 debug_phdr("PT_GNU_STACK", phdr); |
|
165 // Skip on Android until bug 706116 is fixed |
|
166 #ifndef ANDROID |
|
167 if (phdr->p_flags & PF_X) { |
|
168 LOG("%s: Executable stack is not supported", elf->GetPath()); |
|
169 return nullptr; |
|
170 } |
|
171 #endif |
|
172 break; |
|
173 #ifdef __ARM_EABI__ |
|
174 case PT_ARM_EXIDX: |
|
175 /* We cannot initialize arm_exidx here |
|
176 because we don't have a base yet */ |
|
177 arm_exidx_phdr = phdr; |
|
178 break; |
|
179 #endif |
|
180 default: |
|
181 DEBUG_LOG("%s: Warning: program header type #%d not handled", |
|
182 elf->GetPath(), phdr->p_type); |
|
183 } |
|
184 } |
|
185 |
|
186 if (min_vaddr != 0) { |
|
187 LOG("%s: Unsupported minimal virtual address: 0x%08" PRIxAddr, |
|
188 elf->GetPath(), min_vaddr); |
|
189 return nullptr; |
|
190 } |
|
191 if (!dyn) { |
|
192 LOG("%s: No PT_DYNAMIC segment found", elf->GetPath()); |
|
193 return nullptr; |
|
194 } |
|
195 |
|
196 /* Reserve enough memory to map the complete virtual address space for this |
|
197 * library. |
|
198 * As we are using the base address from here to mmap something else with |
|
199 * MAP_FIXED | MAP_SHARED, we need to make sure these mmaps will work. For |
|
200 * instance, on armv6, MAP_SHARED mappings require a 16k alignment, but mmap |
|
201 * MAP_PRIVATE only returns a 4k aligned address. So we first get a base |
|
202 * address with MAP_SHARED, which guarantees the kernel returns an address |
|
203 * that we'll be able to use with MAP_FIXED, and then remap MAP_PRIVATE at |
|
204 * the same address, because of some bad side effects of keeping it as |
|
205 * MAP_SHARED. */ |
|
206 elf->base.Assign(MemoryRange::mmap(nullptr, max_vaddr, PROT_NONE, |
|
207 MAP_SHARED | MAP_ANONYMOUS, -1, 0)); |
|
208 if ((elf->base == MAP_FAILED) || |
|
209 (mmap(elf->base, max_vaddr, PROT_NONE, |
|
210 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != elf->base)) { |
|
211 LOG("%s: Failed to mmap", elf->GetPath()); |
|
212 return nullptr; |
|
213 } |
|
214 |
|
215 /* Load and initialize library */ |
|
216 for (std::vector<const Phdr *>::iterator it = pt_loads.begin(); |
|
217 it < pt_loads.end(); ++it) |
|
218 if (!elf->LoadSegment(*it)) |
|
219 return nullptr; |
|
220 |
|
221 /* We're not going to mmap anymore */ |
|
222 mappable->finalize(); |
|
223 |
|
224 report_mapping(const_cast<char *>(elf->GetName()), elf->base, |
|
225 (max_vaddr + PAGE_SIZE - 1) & PAGE_MASK, 0); |
|
226 |
|
227 elf->l_addr = elf->base; |
|
228 elf->l_name = elf->GetPath(); |
|
229 elf->l_ld = elf->GetPtr<Dyn>(dyn->p_vaddr); |
|
230 ElfLoader::Singleton.Register(elf); |
|
231 |
|
232 if (!elf->InitDyn(dyn)) |
|
233 return nullptr; |
|
234 |
|
235 if (elf->has_text_relocs) { |
|
236 for (std::vector<const Phdr *>::iterator it = pt_loads.begin(); |
|
237 it < pt_loads.end(); ++it) |
|
238 mprotect(PageAlignedPtr(elf->GetPtr((*it)->p_vaddr)), |
|
239 PageAlignedEndPtr((*it)->p_memsz), |
|
240 p_flags_to_mprot((*it)->p_flags) | PROT_WRITE); |
|
241 } |
|
242 |
|
243 if (!elf->Relocate() || !elf->RelocateJumps()) |
|
244 return nullptr; |
|
245 |
|
246 if (elf->has_text_relocs) { |
|
247 for (std::vector<const Phdr *>::iterator it = pt_loads.begin(); |
|
248 it < pt_loads.end(); ++it) |
|
249 mprotect(PageAlignedPtr(elf->GetPtr((*it)->p_vaddr)), |
|
250 PageAlignedEndPtr((*it)->p_memsz), |
|
251 p_flags_to_mprot((*it)->p_flags)); |
|
252 } |
|
253 |
|
254 if (!elf->CallInit()) |
|
255 return nullptr; |
|
256 |
|
257 #ifdef __ARM_EABI__ |
|
258 if (arm_exidx_phdr) |
|
259 elf->arm_exidx.InitSize(elf->GetPtr(arm_exidx_phdr->p_vaddr), |
|
260 arm_exidx_phdr->p_memsz); |
|
261 #endif |
|
262 |
|
263 elf->stats("oneLibLoaded"); |
|
264 DEBUG_LOG("CustomElf::Load(\"%s\", 0x%x) = %p", path, flags, |
|
265 static_cast<void *>(elf)); |
|
266 return elf; |
|
267 } |
|
268 |
|
269 CustomElf::~CustomElf() |
|
270 { |
|
271 DEBUG_LOG("CustomElf::~CustomElf(%p [\"%s\"])", |
|
272 reinterpret_cast<void *>(this), GetPath()); |
|
273 CallFini(); |
|
274 /* Normally, __cxa_finalize is called by the .fini function. However, |
|
275 * Android NDK before r6b doesn't do that. Our wrapped cxa_finalize only |
|
276 * calls destructors once, so call it in all cases. */ |
|
277 ElfLoader::__wrap_cxa_finalize(this); |
|
278 ElfLoader::Singleton.Forget(this); |
|
279 } |
|
280 |
|
281 namespace { |
|
282 |
|
283 /** |
|
284 * Hash function for symbol lookup, as defined in ELF standard for System V |
|
285 */ |
|
286 unsigned long |
|
287 ElfHash(const char *symbol) |
|
288 { |
|
289 const unsigned char *sym = reinterpret_cast<const unsigned char *>(symbol); |
|
290 unsigned long h = 0, g; |
|
291 while (*sym) { |
|
292 h = (h << 4) + *sym++; |
|
293 if ((g = h & 0xf0000000)) |
|
294 h ^= g >> 24; |
|
295 h &= ~g; |
|
296 } |
|
297 return h; |
|
298 } |
|
299 |
|
300 } /* anonymous namespace */ |
|
301 |
|
302 void * |
|
303 CustomElf::GetSymbolPtr(const char *symbol) const |
|
304 { |
|
305 return GetSymbolPtr(symbol, ElfHash(symbol)); |
|
306 } |
|
307 |
|
308 void * |
|
309 CustomElf::GetSymbolPtr(const char *symbol, unsigned long hash) const |
|
310 { |
|
311 const Sym *sym = GetSymbol(symbol, hash); |
|
312 void *ptr = nullptr; |
|
313 if (sym && sym->st_shndx != SHN_UNDEF) |
|
314 ptr = GetPtr(sym->st_value); |
|
315 DEBUG_LOG("CustomElf::GetSymbolPtr(%p [\"%s\"], \"%s\") = %p", |
|
316 reinterpret_cast<const void *>(this), GetPath(), symbol, ptr); |
|
317 return ptr; |
|
318 } |
|
319 |
|
320 void * |
|
321 CustomElf::GetSymbolPtrInDeps(const char *symbol) const |
|
322 { |
|
323 /* Resolve dlopen and related functions to point to ours */ |
|
324 if (symbol[0] == 'd' && symbol[1] == 'l') { |
|
325 if (strcmp(symbol + 2, "open") == 0) |
|
326 return FunctionPtr(__wrap_dlopen); |
|
327 if (strcmp(symbol + 2, "error") == 0) |
|
328 return FunctionPtr(__wrap_dlerror); |
|
329 if (strcmp(symbol + 2, "close") == 0) |
|
330 return FunctionPtr(__wrap_dlclose); |
|
331 if (strcmp(symbol + 2, "sym") == 0) |
|
332 return FunctionPtr(__wrap_dlsym); |
|
333 if (strcmp(symbol + 2, "addr") == 0) |
|
334 return FunctionPtr(__wrap_dladdr); |
|
335 if (strcmp(symbol + 2, "_iterate_phdr") == 0) |
|
336 return FunctionPtr(__wrap_dl_iterate_phdr); |
|
337 } else if (symbol[0] == '_' && symbol[1] == '_') { |
|
338 /* Resolve a few C++ ABI specific functions to point to ours */ |
|
339 #ifdef __ARM_EABI__ |
|
340 if (strcmp(symbol + 2, "aeabi_atexit") == 0) |
|
341 return FunctionPtr(&ElfLoader::__wrap_aeabi_atexit); |
|
342 #else |
|
343 if (strcmp(symbol + 2, "cxa_atexit") == 0) |
|
344 return FunctionPtr(&ElfLoader::__wrap_cxa_atexit); |
|
345 #endif |
|
346 if (strcmp(symbol + 2, "cxa_finalize") == 0) |
|
347 return FunctionPtr(&ElfLoader::__wrap_cxa_finalize); |
|
348 if (strcmp(symbol + 2, "dso_handle") == 0) |
|
349 return const_cast<CustomElf *>(this); |
|
350 if (strcmp(symbol + 2, "moz_linker_stats") == 0) |
|
351 return FunctionPtr(&ElfLoader::stats); |
|
352 #ifdef __ARM_EABI__ |
|
353 if (strcmp(symbol + 2, "gnu_Unwind_Find_exidx") == 0) |
|
354 return FunctionPtr(__wrap___gnu_Unwind_Find_exidx); |
|
355 #endif |
|
356 } |
|
357 |
|
358 #define MISSING_FLASH_SYMNAME_START "_ZN7android10VectorImpl19reservedVectorImpl" |
|
359 |
|
360 // Android changed some symbols that Flash depended on in 4.4, |
|
361 // so stub those out here |
|
362 if (strncmp(symbol, |
|
363 MISSING_FLASH_SYMNAME_START, |
|
364 sizeof(MISSING_FLASH_SYMNAME_START) - 1) == 0) { |
|
365 return FunctionPtr(__void_stub); |
|
366 } |
|
367 |
|
368 void *sym; |
|
369 /* Search the symbol in the main program. Note this also tries all libraries |
|
370 * the system linker will have loaded RTLD_GLOBAL. Unfortunately, that doesn't |
|
371 * work with bionic, but its linker doesn't normally search the main binary |
|
372 * anyways. Moreover, on android, the main binary is dalvik. */ |
|
373 #ifdef __GLIBC__ |
|
374 sym = dlsym(RTLD_DEFAULT, symbol); |
|
375 DEBUG_LOG("dlsym(RTLD_DEFAULT, \"%s\") = %p", symbol, sym); |
|
376 if (sym) |
|
377 return sym; |
|
378 #endif |
|
379 |
|
380 /* Then search the symbol in our dependencies. Since we already searched in |
|
381 * libraries the system linker loaded, skip those (on glibc systems). We |
|
382 * also assume the symbol is to be found in one of the dependent libraries |
|
383 * directly, not in their own dependent libraries. Building libraries with |
|
384 * --no-allow-shlib-undefined ensures such indirect symbol dependency don't |
|
385 * happen. */ |
|
386 unsigned long hash = ElfHash(symbol); |
|
387 for (std::vector<RefPtr<LibHandle> >::const_iterator it = dependencies.begin(); |
|
388 it < dependencies.end(); ++it) { |
|
389 if (!(*it)->IsSystemElf()) { |
|
390 sym = reinterpret_cast<CustomElf *>((*it).get())->GetSymbolPtr(symbol, hash); |
|
391 #ifndef __GLIBC__ |
|
392 } else { |
|
393 sym = (*it)->GetSymbolPtr(symbol); |
|
394 #endif |
|
395 } |
|
396 if (sym) |
|
397 return sym; |
|
398 } |
|
399 return nullptr; |
|
400 } |
|
401 |
|
402 const Sym * |
|
403 CustomElf::GetSymbol(const char *symbol, unsigned long hash) const |
|
404 { |
|
405 /* Search symbol with the buckets and chains tables. |
|
406 * The hash computed from the symbol name gives an index in the buckets |
|
407 * table. The corresponding value in the bucket table is an index in the |
|
408 * symbols table and in the chains table. |
|
409 * If the corresponding symbol in the symbols table matches, we're done. |
|
410 * Otherwise, the corresponding value in the chains table is a new index |
|
411 * in both tables, which corresponding symbol is tested and so on and so |
|
412 * forth */ |
|
413 size_t bucket = hash % buckets.numElements(); |
|
414 for (size_t y = buckets[bucket]; y != STN_UNDEF; y = chains[y]) { |
|
415 if (strcmp(symbol, strtab.GetStringAt(symtab[y].st_name))) |
|
416 continue; |
|
417 return &symtab[y]; |
|
418 } |
|
419 return nullptr; |
|
420 } |
|
421 |
|
422 bool |
|
423 CustomElf::Contains(void *addr) const |
|
424 { |
|
425 return base.Contains(addr); |
|
426 } |
|
427 |
|
428 #ifdef __ARM_EABI__ |
|
429 const void * |
|
430 CustomElf::FindExidx(int *pcount) const |
|
431 { |
|
432 if (arm_exidx) { |
|
433 *pcount = arm_exidx.numElements(); |
|
434 return arm_exidx; |
|
435 } |
|
436 *pcount = 0; |
|
437 return nullptr; |
|
438 } |
|
439 #endif |
|
440 |
|
441 void |
|
442 CustomElf::stats(const char *when) const |
|
443 { |
|
444 mappable->stats(when, GetPath()); |
|
445 } |
|
446 |
|
447 bool |
|
448 CustomElf::LoadSegment(const Phdr *pt_load) const |
|
449 { |
|
450 if (pt_load->p_type != PT_LOAD) { |
|
451 DEBUG_LOG("%s: Elf::LoadSegment only takes PT_LOAD program headers", GetPath()); |
|
452 return false;; |
|
453 } |
|
454 |
|
455 int prot = p_flags_to_mprot(pt_load->p_flags); |
|
456 |
|
457 /* Mmap at page boundary */ |
|
458 Addr align = PageSize(); |
|
459 Addr align_offset; |
|
460 void *mapped, *where; |
|
461 do { |
|
462 align_offset = pt_load->p_vaddr - AlignedPtr(pt_load->p_vaddr, align); |
|
463 where = GetPtr(pt_load->p_vaddr - align_offset); |
|
464 DEBUG_LOG("%s: Loading segment @%p %c%c%c", GetPath(), where, |
|
465 prot & PROT_READ ? 'r' : '-', |
|
466 prot & PROT_WRITE ? 'w' : '-', |
|
467 prot & PROT_EXEC ? 'x' : '-'); |
|
468 mapped = mappable->mmap(where, pt_load->p_filesz + align_offset, |
|
469 prot, MAP_PRIVATE | MAP_FIXED, |
|
470 pt_load->p_offset - align_offset); |
|
471 if ((mapped != MAP_FAILED) || (pt_load->p_vaddr == 0) || |
|
472 (pt_load->p_align == align)) |
|
473 break; |
|
474 /* The virtual address space for the library is properly aligned at |
|
475 * 16k on ARMv6 (see CustomElf::Load), and so is the first segment |
|
476 * (p_vaddr == 0). But subsequent segments may not be 16k aligned |
|
477 * and fail to mmap. In such case, try to mmap again at the p_align |
|
478 * boundary instead of page boundary. */ |
|
479 DEBUG_LOG("%s: Failed to mmap, retrying", GetPath()); |
|
480 align = pt_load->p_align; |
|
481 } while (1); |
|
482 |
|
483 if (mapped != where) { |
|
484 if (mapped == MAP_FAILED) { |
|
485 LOG("%s: Failed to mmap", GetPath()); |
|
486 } else { |
|
487 LOG("%s: Didn't map at the expected location (wanted: %p, got: %p)", |
|
488 GetPath(), where, mapped); |
|
489 } |
|
490 return false; |
|
491 } |
|
492 |
|
493 /* Ensure the availability of all pages within the mapping if on-demand |
|
494 * decompression is disabled (MOZ_LINKER_ONDEMAND=0 or signal handler not |
|
495 * registered). */ |
|
496 const char *ondemand = getenv("MOZ_LINKER_ONDEMAND"); |
|
497 if (!ElfLoader::Singleton.hasRegisteredHandler() || |
|
498 (ondemand && !strncmp(ondemand, "0", 2 /* Including '\0' */))) { |
|
499 for (Addr off = 0; off < pt_load->p_filesz + align_offset; |
|
500 off += PageSize()) { |
|
501 mappable->ensure(reinterpret_cast<char *>(mapped) + off); |
|
502 } |
|
503 } |
|
504 /* When p_memsz is greater than p_filesz, we need to have nulled out memory |
|
505 * after p_filesz and before p_memsz. |
|
506 * Above the end of the last page, and up to p_memsz, we already have nulled |
|
507 * out memory because we mapped anonymous memory on the whole library virtual |
|
508 * address space. We just need to adjust this anonymous memory protection |
|
509 * flags. */ |
|
510 if (pt_load->p_memsz > pt_load->p_filesz) { |
|
511 Addr file_end = pt_load->p_vaddr + pt_load->p_filesz; |
|
512 Addr mem_end = pt_load->p_vaddr + pt_load->p_memsz; |
|
513 Addr next_page = PageAlignedEndPtr(file_end); |
|
514 if (next_page > file_end) { |
|
515 /* The library is not registered at this point, so we can't rely on |
|
516 * on-demand decompression to handle missing pages here. */ |
|
517 void *ptr = GetPtr(file_end); |
|
518 mappable->ensure(ptr); |
|
519 memset(ptr, 0, next_page - file_end); |
|
520 } |
|
521 if (mem_end > next_page) { |
|
522 if (mprotect(GetPtr(next_page), mem_end - next_page, prot) < 0) { |
|
523 LOG("%s: Failed to mprotect", GetPath()); |
|
524 return false; |
|
525 } |
|
526 } |
|
527 } |
|
528 return true; |
|
529 } |
|
530 |
|
531 namespace { |
|
532 |
|
533 void debug_dyn(const char *type, const Dyn *dyn) |
|
534 { |
|
535 DEBUG_LOG("%s 0x%08" PRIxAddr, type, dyn->d_un.d_val); |
|
536 } |
|
537 |
|
538 } /* anonymous namespace */ |
|
539 |
|
540 bool |
|
541 CustomElf::InitDyn(const Phdr *pt_dyn) |
|
542 { |
|
543 /* Scan PT_DYNAMIC segment and gather some information */ |
|
544 const Dyn *first_dyn = GetPtr<Dyn>(pt_dyn->p_vaddr); |
|
545 const Dyn *end_dyn = GetPtr<Dyn>(pt_dyn->p_vaddr + pt_dyn->p_filesz); |
|
546 std::vector<Word> dt_needed; |
|
547 size_t symnum = 0; |
|
548 for (const Dyn *dyn = first_dyn; dyn < end_dyn && dyn->d_tag; dyn++) { |
|
549 switch (dyn->d_tag) { |
|
550 case DT_NEEDED: |
|
551 debug_dyn("DT_NEEDED", dyn); |
|
552 dt_needed.push_back(dyn->d_un.d_val); |
|
553 break; |
|
554 case DT_HASH: |
|
555 { |
|
556 debug_dyn("DT_HASH", dyn); |
|
557 const Word *hash_table_header = GetPtr<Word>(dyn->d_un.d_ptr); |
|
558 symnum = hash_table_header[1]; |
|
559 buckets.Init(&hash_table_header[2], hash_table_header[0]); |
|
560 chains.Init(&*buckets.end()); |
|
561 } |
|
562 break; |
|
563 case DT_STRTAB: |
|
564 debug_dyn("DT_STRTAB", dyn); |
|
565 strtab.Init(GetPtr(dyn->d_un.d_ptr)); |
|
566 break; |
|
567 case DT_SYMTAB: |
|
568 debug_dyn("DT_SYMTAB", dyn); |
|
569 symtab.Init(GetPtr(dyn->d_un.d_ptr)); |
|
570 break; |
|
571 case DT_SYMENT: |
|
572 debug_dyn("DT_SYMENT", dyn); |
|
573 if (dyn->d_un.d_val != sizeof(Sym)) { |
|
574 LOG("%s: Unsupported DT_SYMENT", GetPath()); |
|
575 return false; |
|
576 } |
|
577 break; |
|
578 case DT_TEXTREL: |
|
579 if (strcmp("libflashplayer.so", GetName()) == 0) { |
|
580 has_text_relocs = true; |
|
581 } else { |
|
582 LOG("%s: Text relocations are not supported", GetPath()); |
|
583 return false; |
|
584 } |
|
585 break; |
|
586 case DT_STRSZ: /* Ignored */ |
|
587 debug_dyn("DT_STRSZ", dyn); |
|
588 break; |
|
589 case UNSUPPORTED_RELOC(): |
|
590 case UNSUPPORTED_RELOC(SZ): |
|
591 case UNSUPPORTED_RELOC(ENT): |
|
592 LOG("%s: Unsupported relocations", GetPath()); |
|
593 return false; |
|
594 case RELOC(): |
|
595 debug_dyn(STR_RELOC(), dyn); |
|
596 relocations.Init(GetPtr(dyn->d_un.d_ptr)); |
|
597 break; |
|
598 case RELOC(SZ): |
|
599 debug_dyn(STR_RELOC(SZ), dyn); |
|
600 relocations.InitSize(dyn->d_un.d_val); |
|
601 break; |
|
602 case RELOC(ENT): |
|
603 debug_dyn(STR_RELOC(ENT), dyn); |
|
604 if (dyn->d_un.d_val != sizeof(Reloc)) { |
|
605 LOG("%s: Unsupported DT_RELENT", GetPath()); |
|
606 return false; |
|
607 } |
|
608 break; |
|
609 case DT_JMPREL: |
|
610 debug_dyn("DT_JMPREL", dyn); |
|
611 jumprels.Init(GetPtr(dyn->d_un.d_ptr)); |
|
612 break; |
|
613 case DT_PLTRELSZ: |
|
614 debug_dyn("DT_PLTRELSZ", dyn); |
|
615 jumprels.InitSize(dyn->d_un.d_val); |
|
616 break; |
|
617 case DT_PLTGOT: |
|
618 debug_dyn("DT_PLTGOT", dyn); |
|
619 break; |
|
620 case DT_INIT: |
|
621 debug_dyn("DT_INIT", dyn); |
|
622 init = dyn->d_un.d_ptr; |
|
623 break; |
|
624 case DT_INIT_ARRAY: |
|
625 debug_dyn("DT_INIT_ARRAY", dyn); |
|
626 init_array.Init(GetPtr(dyn->d_un.d_ptr)); |
|
627 break; |
|
628 case DT_INIT_ARRAYSZ: |
|
629 debug_dyn("DT_INIT_ARRAYSZ", dyn); |
|
630 init_array.InitSize(dyn->d_un.d_val); |
|
631 break; |
|
632 case DT_FINI: |
|
633 debug_dyn("DT_FINI", dyn); |
|
634 fini = dyn->d_un.d_ptr; |
|
635 break; |
|
636 case DT_FINI_ARRAY: |
|
637 debug_dyn("DT_FINI_ARRAY", dyn); |
|
638 fini_array.Init(GetPtr(dyn->d_un.d_ptr)); |
|
639 break; |
|
640 case DT_FINI_ARRAYSZ: |
|
641 debug_dyn("DT_FINI_ARRAYSZ", dyn); |
|
642 fini_array.InitSize(dyn->d_un.d_val); |
|
643 break; |
|
644 case DT_PLTREL: |
|
645 if (dyn->d_un.d_val != RELOC()) { |
|
646 LOG("%s: Error: DT_PLTREL is not " STR_RELOC(), GetPath()); |
|
647 return false; |
|
648 } |
|
649 break; |
|
650 case DT_FLAGS: |
|
651 { |
|
652 Addr flags = dyn->d_un.d_val; |
|
653 /* Treat as a DT_TEXTREL tag */ |
|
654 if (flags & DF_TEXTREL) { |
|
655 if (strcmp("libflashplayer.so", GetName()) == 0) { |
|
656 has_text_relocs = true; |
|
657 } else { |
|
658 LOG("%s: Text relocations are not supported", GetPath()); |
|
659 return false; |
|
660 } |
|
661 } |
|
662 /* we can treat this like having a DT_SYMBOLIC tag */ |
|
663 flags &= ~DF_SYMBOLIC; |
|
664 if (flags) |
|
665 LOG("%s: Warning: unhandled flags #%" PRIxAddr" not handled", |
|
666 GetPath(), flags); |
|
667 } |
|
668 break; |
|
669 case DT_SONAME: /* Should match GetName(), but doesn't matter */ |
|
670 case DT_SYMBOLIC: /* Indicates internal symbols should be looked up in |
|
671 * the library itself first instead of the executable, |
|
672 * which is actually what this linker does by default */ |
|
673 case RELOC(COUNT): /* Indicates how many relocations are relative, which |
|
674 * is usually used to skip relocations on prelinked |
|
675 * libraries. They are not supported anyways. */ |
|
676 case UNSUPPORTED_RELOC(COUNT): /* This should error out, but it doesn't |
|
677 * really matter. */ |
|
678 case DT_FLAGS_1: /* Additional linker-internal flags that we don't care about. See |
|
679 * DF_1_* values in src/include/elf/common.h in binutils. */ |
|
680 case DT_VERSYM: /* DT_VER* entries are used for symbol versioning, which */ |
|
681 case DT_VERDEF: /* this linker doesn't support yet. */ |
|
682 case DT_VERDEFNUM: |
|
683 case DT_VERNEED: |
|
684 case DT_VERNEEDNUM: |
|
685 /* Ignored */ |
|
686 break; |
|
687 default: |
|
688 LOG("%s: Warning: dynamic header type #%" PRIxAddr" not handled", |
|
689 GetPath(), dyn->d_tag); |
|
690 } |
|
691 } |
|
692 |
|
693 if (!buckets || !symnum) { |
|
694 LOG("%s: Missing or broken DT_HASH", GetPath()); |
|
695 return false; |
|
696 } |
|
697 if (!strtab) { |
|
698 LOG("%s: Missing DT_STRTAB", GetPath()); |
|
699 return false; |
|
700 } |
|
701 if (!symtab) { |
|
702 LOG("%s: Missing DT_SYMTAB", GetPath()); |
|
703 return false; |
|
704 } |
|
705 |
|
706 /* Load dependent libraries */ |
|
707 for (size_t i = 0; i < dt_needed.size(); i++) { |
|
708 const char *name = strtab.GetStringAt(dt_needed[i]); |
|
709 RefPtr<LibHandle> handle = |
|
710 ElfLoader::Singleton.Load(name, RTLD_GLOBAL | RTLD_LAZY, this); |
|
711 if (!handle) |
|
712 return false; |
|
713 dependencies.push_back(handle); |
|
714 } |
|
715 |
|
716 return true; |
|
717 } |
|
718 |
|
719 bool |
|
720 CustomElf::Relocate() |
|
721 { |
|
722 DEBUG_LOG("Relocate %s @%p", GetPath(), static_cast<void *>(base)); |
|
723 uint32_t symtab_index = (uint32_t) -1; |
|
724 void *symptr = nullptr; |
|
725 for (Array<Reloc>::iterator rel = relocations.begin(); |
|
726 rel < relocations.end(); ++rel) { |
|
727 /* Location of the relocation */ |
|
728 void *ptr = GetPtr(rel->r_offset); |
|
729 |
|
730 /* R_*_RELATIVE relocations apply directly at the given location */ |
|
731 if (ELF_R_TYPE(rel->r_info) == R_RELATIVE) { |
|
732 *(void **) ptr = GetPtr(rel->GetAddend(base)); |
|
733 continue; |
|
734 } |
|
735 /* Other relocation types need a symbol resolution */ |
|
736 /* Avoid symbol resolution when it's the same symbol as last iteration */ |
|
737 if (symtab_index != ELF_R_SYM(rel->r_info)) { |
|
738 symtab_index = ELF_R_SYM(rel->r_info); |
|
739 const Sym sym = symtab[symtab_index]; |
|
740 if (sym.st_shndx != SHN_UNDEF) { |
|
741 symptr = GetPtr(sym.st_value); |
|
742 } else { |
|
743 /* TODO: handle symbol resolving to nullptr vs. being undefined. */ |
|
744 symptr = GetSymbolPtrInDeps(strtab.GetStringAt(sym.st_name)); |
|
745 } |
|
746 } |
|
747 |
|
748 if (symptr == nullptr) |
|
749 LOG("%s: Warning: relocation to NULL @0x%08" PRIxAddr, |
|
750 GetPath(), rel->r_offset); |
|
751 |
|
752 /* Apply relocation */ |
|
753 switch (ELF_R_TYPE(rel->r_info)) { |
|
754 case R_GLOB_DAT: |
|
755 /* R_*_GLOB_DAT relocations simply use the symbol value */ |
|
756 *(void **) ptr = symptr; |
|
757 break; |
|
758 case R_ABS: |
|
759 /* R_*_ABS* relocations add the relocation added to the symbol value */ |
|
760 *(const char **) ptr = (const char *)symptr + rel->GetAddend(base); |
|
761 break; |
|
762 default: |
|
763 LOG("%s: Unsupported relocation type: 0x%" PRIxAddr, |
|
764 GetPath(), ELF_R_TYPE(rel->r_info)); |
|
765 return false; |
|
766 } |
|
767 } |
|
768 return true; |
|
769 } |
|
770 |
|
771 bool |
|
772 CustomElf::RelocateJumps() |
|
773 { |
|
774 /* TODO: Dynamic symbol resolution */ |
|
775 for (Array<Reloc>::iterator rel = jumprels.begin(); |
|
776 rel < jumprels.end(); ++rel) { |
|
777 /* Location of the relocation */ |
|
778 void *ptr = GetPtr(rel->r_offset); |
|
779 |
|
780 /* Only R_*_JMP_SLOT relocations are expected */ |
|
781 if (ELF_R_TYPE(rel->r_info) != R_JMP_SLOT) { |
|
782 LOG("%s: Jump relocation type mismatch", GetPath()); |
|
783 return false; |
|
784 } |
|
785 |
|
786 /* TODO: Avoid code duplication with the relocations above */ |
|
787 const Sym sym = symtab[ELF_R_SYM(rel->r_info)]; |
|
788 void *symptr; |
|
789 if (sym.st_shndx != SHN_UNDEF) |
|
790 symptr = GetPtr(sym.st_value); |
|
791 else |
|
792 symptr = GetSymbolPtrInDeps(strtab.GetStringAt(sym.st_name)); |
|
793 |
|
794 if (symptr == nullptr) { |
|
795 LOG("%s: %s: relocation to NULL @0x%08" PRIxAddr " for symbol \"%s\"", |
|
796 GetPath(), |
|
797 (ELF_ST_BIND(sym.st_info) == STB_WEAK) ? "Warning" : "Error", |
|
798 rel->r_offset, strtab.GetStringAt(sym.st_name)); |
|
799 if (ELF_ST_BIND(sym.st_info) != STB_WEAK) |
|
800 return false; |
|
801 } |
|
802 /* Apply relocation */ |
|
803 *(void **) ptr = symptr; |
|
804 } |
|
805 return true; |
|
806 } |
|
807 |
|
808 bool |
|
809 CustomElf::CallInit() |
|
810 { |
|
811 if (init) |
|
812 CallFunction(init); |
|
813 |
|
814 for (Array<void *>::iterator it = init_array.begin(); |
|
815 it < init_array.end(); ++it) { |
|
816 /* Android x86 NDK wrongly puts 0xffffffff in INIT_ARRAY */ |
|
817 if (*it && *it != reinterpret_cast<void *>(-1)) |
|
818 CallFunction(*it); |
|
819 } |
|
820 initialized = true; |
|
821 return true; |
|
822 } |
|
823 |
|
824 void |
|
825 CustomElf::CallFini() |
|
826 { |
|
827 if (!initialized) |
|
828 return; |
|
829 for (Array<void *>::reverse_iterator it = fini_array.rbegin(); |
|
830 it < fini_array.rend(); ++it) { |
|
831 /* Android x86 NDK wrongly puts 0xffffffff in FINI_ARRAY */ |
|
832 if (*it && *it != reinterpret_cast<void *>(-1)) |
|
833 CallFunction(*it); |
|
834 } |
|
835 if (fini) |
|
836 CallFunction(fini); |
|
837 } |
|
838 |
|
839 Mappable * |
|
840 CustomElf::GetMappable() const |
|
841 { |
|
842 if (!mappable) |
|
843 return nullptr; |
|
844 if (mappable->GetKind() == Mappable::MAPPABLE_EXTRACT_FILE) |
|
845 return mappable; |
|
846 return ElfLoader::GetMappableFromPath(GetPath()); |
|
847 } |