|
/* On most platforms presume that PT_GNU_STACK is absent and the stack is |
|
* executable. Other platforms default to a nonexecutable stack and don't |
|
* need PT_GNU_STACK to do so. */ |
|
uint_fast16_t stack_flags = DEFAULT_STACK_PERMS; |
|
|
|
{ |
|
/* Scan the program header table, collecting its load commands. */ |
|
struct loadcmd loadcmds[l->l_phnum]; |
|
size_t nloadcmds = 0; |
|
bool has_holes = false; |
|
|
|
/* The struct is initialized to zero so this is not necessary: |
|
l->l_ld = 0; |
|
l->l_phdr = 0; |
|
l->l_addr = 0; */ |
|
for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph) |
|
switch (ph->p_type) |
|
{ |
|
/* These entries tell us where to find things once the file's |
|
segments are mapped in. We record the addresses it says |
|
verbatim, and later correct for the run-time load address. */ |
|
case PT_DYNAMIC: |
|
if (ph->p_filesz) |
|
{ |
|
/* Debuginfo only files from "objcopy --only-keep-debug" |
|
contain a PT_DYNAMIC segment with p_filesz == 0. Skip |
|
such a segment to avoid a crash later. */ |
|
l->l_ld = (void *) ph->p_vaddr; |
|
l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn)); |
|
} |
|
break; |
|
|
|
case PT_PHDR: |
|
l->l_phdr = (void *) ph->p_vaddr; |
|
break; |
|
|
|
case PT_LOAD: |
|
/* A load command tells us to map in part of the file. |
|
We record the load commands and process them all later. */ |
|
if (__glibc_unlikely ((ph->p_align & (GLRO(dl_pagesize) - 1)) != 0)) |
|
{ |
|
errstring = N_("ELF load command alignment not page-aligned"); |
|
goto call_lose; |
|
} |
|
if (__glibc_unlikely (((ph->p_vaddr - ph->p_offset) |
|
& (ph->p_align - 1)) != 0)) |
|
{ |
|
errstring |
|
= N_("ELF load command address/offset not properly aligned"); |
|
goto call_lose; |
|
} |
|
|
|
struct loadcmd *c = &loadcmds[nloadcmds++]; |
|
c->mapstart = ALIGN_DOWN (ph->p_vaddr, GLRO(dl_pagesize)); |
|
c->mapend = ALIGN_UP (ph->p_vaddr + ph->p_filesz, GLRO(dl_pagesize)); |
|
c->dataend = ph->p_vaddr + ph->p_filesz; |
|
c->allocend = ph->p_vaddr + ph->p_memsz; |
|
c->mapoff = ALIGN_DOWN (ph->p_offset, GLRO(dl_pagesize)); |
|
|
|
/* Determine whether there is a gap between the last segment |
|
and this one. */ |
|
if (nloadcmds > 1 && c[-1].mapend != c->mapstart) |
|
has_holes = true; |
|
|
|
/* Optimize a common case. */ |
|
#if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7 |
|
c->prot = (PF_TO_PROT |
|
>> ((ph->p_flags & (PF_R | PF_W | PF_X)) * 4)) & 0xf; |
|
#else |
|
c->prot = 0; |
|
if (ph->p_flags & PF_R) |
|
c->prot |= PROT_READ; |
|
if (ph->p_flags & PF_W) |
|
c->prot |= PROT_WRITE; |
|
if (ph->p_flags & PF_X) |
|
c->prot |= PROT_EXEC; |
|
#endif |
|
break; |
|
|
|
case PT_TLS: |
|
if (ph->p_memsz == 0) |
|
/* Nothing to do for an empty segment. */ |
|
break; |
|
|
|
l->l_tls_blocksize = ph->p_memsz; |
|
l->l_tls_align = ph->p_align; |
|
if (ph->p_align == 0) |
|
l->l_tls_firstbyte_offset = 0; |
|
else |
|
l->l_tls_firstbyte_offset = ph->p_vaddr & (ph->p_align - 1); |
|
l->l_tls_initimage_size = ph->p_filesz; |
|
/* Since we don't know the load address yet only store the |
|
offset. We will adjust it later. */ |
|
l->l_tls_initimage = (void *) ph->p_vaddr; |
|
|
|
/* If not loading the initial set of shared libraries, |
|
check whether we should permit loading a TLS segment. */ |
|
if (__glibc_likely (l->l_type == lt_library) |
|
/* If GL(dl_tls_dtv_slotinfo_list) == NULL, then rtld.c did |
|
not set up TLS data structures, so don't use them now. */ |
|
|| __glibc_likely (GL(dl_tls_dtv_slotinfo_list) != NULL)) |
|
{ |
|
/* Assign the next available module ID. */ |
|
l->l_tls_modid = _dl_next_tls_modid (); |
|
break; |
|
} |
|
|
|
#ifdef SHARED |
|
/* We are loading the executable itself when the dynamic |
|
linker was executed directly. The setup will happen |
|
later. Otherwise, the TLS data structures are already |
|
initialized, and we assigned a TLS modid above. */ |
|
assert (l->l_prev == NULL || (mode & __RTLD_AUDIT) != 0); |
|
#else |
|
assert (false && "TLS not initialized in static application"); |
|
#endif |
|
break; |
|
|
|
case PT_GNU_STACK: |
|
stack_flags = ph->p_flags; |
|
break; |
|
|
|
case PT_GNU_RELRO: |
|
l->l_relro_addr = ph->p_vaddr; |
|
l->l_relro_size = ph->p_memsz; |
|
break; |
|
|
|
case PT_NOTE: |
|
if (_dl_process_pt_note (l, ph, fd, fbp)) |
|
{ |
|
errstring = N_("cannot process note segment"); |
|
goto call_lose; |
|
} |
|
break; |
|
} |
|
|
|
if (__glibc_unlikely (nloadcmds == 0)) |
|
{ |
|
/* This only happens for a bogus object that will be caught with |
|
another error below. But we don't want to go through the |
|
calculations below using NLOADCMDS - 1. */ |
|
errstring = N_("object file has no loadable segments"); |
|
goto call_lose; |
|
} |
|
|
|
if (__glibc_unlikely (type != ET_DYN) |
|
&& __glibc_unlikely ((mode & __RTLD_OPENEXEC) == 0)) |
|
{ |
|
/* This object is loaded at a fixed address. This must never |
|
happen for objects loaded with dlopen. */ |
|
errstring = N_("cannot dynamically load executable"); |
|
goto call_lose; |
|
} |
|
|
|
/* Length of the sections to be loaded. */ |
|
maplength = loadcmds[nloadcmds - 1].allocend - loadcmds[0].mapstart; |
|
|
|
/* Now process the load commands and map segments into memory. |
|
This is responsible for filling in: |
|
l_map_start, l_map_end, l_addr, l_contiguous, l_text_end, l_phdr |
|
*/ |
|
errstring = _dl_map_segments (l, fd, header, type, loadcmds, nloadcmds, |
|
maplength, has_holes, loader); |
|
if (__glibc_unlikely (errstring != NULL)) |
|
goto call_lose; |
|
} |