
#include <errno.h>
#include <fcntl.h>
#include <libintl.h>
#include <stdlib.h>
#include <string.h>
#include <ldsodefs.h>

#include "dynamic-link.h"
#include <abi-tag.h>
#include <stackinfo.h>
#include <sysdep.h>

#include "dl-load.h"
#include "dl-load-i.h"

#include <dl-map-segments.h>
#include <dl-unmap-segments.h>
#include <dl-machine-reject-phdr.h>
#include <dl-prop.h>

#include <not-cancel.h>

#include <endian.h>
#if BYTE_ORDER == BIG_ENDIAN
# define byteorder ELFDATA2MSB
#elif BYTE_ORDER == LITTLE_ENDIAN
# define byteorder ELFDATA2LSB
#else
# error "Unknown BYTE_ORDER " BYTE_ORDER
# define byteorder ELFDATANONE
#endif

extern int __stack_prot attribute_relro attribute_hidden;

int
is_elf_magic (const char *buf)
{
  return (buf[0] == ELFMAG0
       && buf[1] == ELFMAG1
       && buf[2] == ELFMAG2
       && buf[3] == ELFMAG3) ? 1 : 0;
}

int
do_verify_elf (int fd,
               struct filebuf *fbp, struct link_map *loader,
               int mode, bool *found_other_class,
               int *perrval, const char **perrstr)
{
  /* This is the expected ELF header.  */
#ifndef VALID_ELF_HEADER
# define VALID_ELF_HEADER(hdr,exp,size)  (memcmp (hdr, exp, size) == 0)
# define VALID_ELF_OSABI(osabi)          (osabi == ELFOSABI_SYSV)
# define VALID_ELF_ABIVERSION(osabi,ver) (ver == 0)
#elif defined MORE_ELF_HEADER_DATA
  MORE_ELF_HEADER_DATA;
#endif
  static const unsigned char expected[EI_NIDENT] =
    {
      [EI_MAG0] = ELFMAG0,
      [EI_MAG1] = ELFMAG1,
      [EI_MAG2] = ELFMAG2,
      [EI_MAG3] = ELFMAG3,
      [EI_CLASS] = ELFW(CLASS),
      [EI_DATA] = byteorder,
      [EI_VERSION] = EV_CURRENT,
      [EI_OSABI] = ELFOSABI_SYSV,
      [EI_ABIVERSION] = 0
    };
  static const struct
  {
    ElfW(Word) vendorlen;
    ElfW(Word) datalen;
    ElfW(Word) type;
    char vendor[4];
  } expected_note = { 4, 16, 1, "GNU" };

  ElfW(Ehdr) *ehdr;
  ElfW(Phdr) *phdr, *ph;
  ElfW(Word) *abi_note;
  ElfW(Word) *abi_note_malloced = NULL;
  unsigned int osversion;
  size_t maplength;

  /* This is where the ELF header is loaded.  */
  ehdr = (ElfW(Ehdr) *) fbp->buf;

  /* Now run the tests.  */
  if (__glibc_unlikely (fbp->len < (ssize_t) sizeof (ElfW(Ehdr))))
    {
      *perrval = errno;
      *perrstr = (*perrval == 0 ? N_("file too short") : N_("cannot read file data"));
    call_lose:
      return 1;
    }

  /* See whether the ELF header is what we expect.  */
  if (__glibc_unlikely (! VALID_ELF_HEADER (ehdr->e_ident, expected, EI_ABIVERSION)
                        || !VALID_ELF_ABIVERSION (ehdr->e_ident[EI_OSABI],
                                                  ehdr->e_ident[EI_ABIVERSION])
                        || memcmp (&ehdr->e_ident[EI_PAD], &expected[EI_PAD], EI_NIDENT - EI_PAD) != 0))
    {
      /* Something is wrong.  */
      const Elf32_Word *magp = (const void *) ehdr->e_ident;
      if (*magp !=
        #if BYTE_ORDER == LITTLE_ENDIAN
          ((ELFMAG0 << (EI_MAG0 * 8))
            | (ELFMAG1 << (EI_MAG1 * 8))
            | (ELFMAG2 << (EI_MAG2 * 8))
            | (ELFMAG3 << (EI_MAG3 * 8)))
        #else
          ((ELFMAG0 << (EI_MAG3 * 8))
            | (ELFMAG1 << (EI_MAG2 * 8))
            | (ELFMAG2 << (EI_MAG1 * 8))
            | (ELFMAG3 << (EI_MAG0 * 8)))
        #endif
          )
        *perrstr = N_("invalid ELF header");
      else if (ehdr->e_ident[EI_CLASS] != ELFW(CLASS))
        {
          /* This is not a fatal error. 
             On architectures where 32-bit and 64-bit binaries can be run, this might happen.  */
          *found_other_class = true;
          goto close_and_out;
        }
      else if (ehdr->e_ident[EI_DATA] != byteorder)
        {
          if (BYTE_ORDER == BIG_ENDIAN)
            *perrstr = N_("ELF file data encoding not big-endian");
          else
            *perrstr = N_("ELF file data encoding not little-endian");
        }
      else if (ehdr->e_ident[EI_VERSION] != EV_CURRENT)
        *perrstr = N_("ELF file version ident does not match current one");
      /* XXX We should be able so set system specific versions which are allowed here.  */
      else if (!VALID_ELF_OSABI (ehdr->e_ident[EI_OSABI]))
        *perrstr = N_("ELF file OS ABI invalid");
      else if (!VALID_ELF_ABIVERSION (ehdr->e_ident[EI_OSABI],
                                      ehdr->e_ident[EI_ABIVERSION]))
        *perrstr = N_("ELF file ABI version invalid");
      else if (memcmp (&ehdr->e_ident[EI_PAD], &expected[EI_PAD], EI_NIDENT - EI_PAD) != 0)
        *perrstr = N_("nonzero padding in e_ident");
      else
        /* Otherwise we don't know what went wrong.  */
        *perrstr = N_("internal error");

      goto call_lose;
    }

  if (__glibc_unlikely (ehdr->e_version != EV_CURRENT))
    {
      *perrstr = N_("ELF file version does not match current one");
      goto call_lose;
    }
  if (! __glibc_likely (elf_machine_matches_host (ehdr)))
    goto close_and_out;
  else if (__glibc_unlikely (ehdr->e_type != ET_DYN && ehdr->e_type != ET_EXEC))
    {
      *perrstr = N_("only ET_DYN and ET_EXEC can be loaded");
      goto call_lose;
    }
  else if (__glibc_unlikely (ehdr->e_type == ET_EXEC && (mode & __RTLD_OPENEXEC) == 0))
    {
      /* BZ #16634. It is an error to dlopen ET_EXEC (unless
          __RTLD_OPENEXEC is explicitly set).  We return error here
          so that code in _dl_map_object_from_fd does not try to set
          l_tls_modid for this module.  */
      *perrstr = N_("cannot dynamically load executable");
      goto call_lose;
    }
  else if (__glibc_unlikely (ehdr->e_phentsize != sizeof (ElfW(Phdr))))
    {
      *perrstr = N_("ELF file's phentsize not the expected size");
      goto call_lose;
    }

  maplength = ehdr->e_phnum * sizeof (ElfW(Phdr));
  if (ehdr->e_phoff + maplength <= (size_t) fbp->len)
    phdr = (void *) (fbp->buf + ehdr->e_phoff);
  else
    {
      phdr = alloca (maplength);
      __lseek (fd, ehdr->e_phoff, SEEK_SET);
      if ((size_t) __read_nocancel (fd, (void *) phdr, maplength) != maplength)
        {
        read_error:
          *perrval = errno;
          *perrstr = N_("cannot read file data");
          goto call_lose;
        }
    }

  if (__glibc_unlikely (elf_machine_reject_phdr_p (phdr, ehdr->e_phnum, fbp->buf, fbp->len, loader, fd)))
    goto close_and_out;

  /* Check .note.ABI-tag if present.  */
  for (ph = phdr; ph < &phdr[ehdr->e_phnum]; ++ph)
    if (ph->p_type == PT_NOTE && ph->p_filesz >= 32 && ph->p_align >= 4)
      {
        ElfW(Addr) size = ph->p_filesz;
        /* NB: Some PT_NOTE segment may have alignment value of 0
            or 1.  gABI specifies that PT_NOTE segments should be
            aligned to 4 bytes in 32-bit objects and to 8 bytes in
            64-bit objects.  As a Linux extension, we also support
            4 byte alignment in 64-bit objects.  If p_align is less
            than 4, we treate alignment as 4 bytes since some note
            segments have 0 or 1 byte alignment.   */
        ElfW(Addr) align = ph->p_align;
        if (align < 4)
          align = 4;
        else if (align != 4 && align != 8)
          continue;

        if (ph->p_offset + size <= (size_t) fbp->len)
          abi_note = (void *) (fbp->buf + ph->p_offset);
        else
          {
            /* Note: __libc_use_alloca is not usable here, because
                thread info may not have been set up yet.  */
            if (size < __MAX_ALLOCA_CUTOFF)
              abi_note = alloca (size);
            else
              {
                /* There could be multiple PT_NOTEs.  */
                abi_note_malloced = realloc (abi_note_malloced, size);
                if (abi_note_malloced == NULL)
                  goto read_error;

                abi_note = abi_note_malloced;
              }
            __lseek (fd, ph->p_offset, SEEK_SET);
            if (__read_nocancel (fd, (void *) abi_note, size) != size)
              {
                free (abi_note_malloced);
                goto read_error;
              }
          }

        while (memcmp (abi_note, &expected_note, sizeof (expected_note)))
          {
            ElfW(Addr) note_size = ELF_NOTE_NEXT_OFFSET (abi_note[0], abi_note[1], align);

            if (size - 32 < note_size)
              {
                size = 0;
                break;
              }
            size -= note_size;
            abi_note = (void *) abi_note + note_size;
          }

        if (size == 0)
          continue;

        osversion = (abi_note[5] & 0xff) * 65536
                  + (abi_note[6] & 0xff) * 256
                  + (abi_note[7] & 0xff);
        if (abi_note[4] != __ABI_TAG_OS
            || (GLRO(dl_osversion) && GLRO(dl_osversion) < osversion))
          {
          close_and_out:
            free (abi_note_malloced);
            return 2;
          }

        break;
      }
  free (abi_note_malloced);

  return 0;
}

int do_load_elf (int fd, struct filebuf *fbp,
                 struct link_map *l, struct link_map *loader, Lmid_t nsid,
                 int mode, void **stack_endp,
                 int *perrval, const char **perrstr)
{
  const ElfW(Ehdr) *header;
  const ElfW(Phdr) *phdr;
  const ElfW(Phdr) *ph;
  size_t maplength;
  int type;

  /* This is the ELF header.  We read it in `open_verify'.  */
  header = (void *) fbp->buf;

  /* Extract the remaining details we need from the ELF header
     and then read in the program header table.  */
  l->l_entry = header->e_entry;
  type = header->e_type;
  l->l_phnum = header->e_phnum;

  maplength = header->e_phnum * sizeof (ElfW(Phdr));
  if (header->e_phoff + maplength <= (size_t) fbp->len)
    phdr = (void *) (fbp->buf + header->e_phoff);
  else
    {
      phdr = alloca (maplength);
      __lseek (fd, header->e_phoff, SEEK_SET);
      if ((size_t) __read_nocancel (fd, (void *) phdr, maplength) != maplength)
        {
          *perrstr = N_("cannot read file data");
          goto call_lose;
        call_lose_errno:
          *perrval = errno;
        call_lose:
          return 1;
        }
    }

  /* On most platforms presume that PT_GNU_STACK is absent and the stack is
  * executable.  Other platforms default to a nonexecutable stack and don't
  * need PT_GNU_STACK to do so.  */
  uint_fast16_t stack_flags = DEFAULT_STACK_PERMS;

  {
    /* Scan the program header table, collecting its load commands.  */
    struct loadcmd loadcmds[l->l_phnum];
    size_t nloadcmds = 0;
    bool has_holes = false;

    /* The struct is initialized to zero so this is not necessary:
    l->l_ld = 0;
    l->l_phdr = 0;
    l->l_addr = 0; */
    for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
      switch (ph->p_type)
        {
          /* These entries tell us where to find things once the file's
             segments are mapped in.  We record the addresses it says
             verbatim, and later correct for the run-time load address.  */
        case PT_DYNAMIC:
          if (ph->p_filesz)
            {
              /* Debuginfo only files from "objcopy --only-keep-debug"
                 contain a PT_DYNAMIC segment with p_filesz == 0.  Skip
                 such a segment to avoid a crash later.  */
              l->l_ld = (void *) ph->p_vaddr;
              l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
            }
          break;

        case PT_PHDR:
          l->l_phdr = (void *) ph->p_vaddr;
          break;

        case PT_LOAD:
          /* A load command tells us to map in part of the file.
             We record the load commands and process them all later.  */
          if (__glibc_unlikely ((ph->p_align & (GLRO(dl_pagesize) - 1)) != 0))
            {
              *perrstr = N_("ELF load command alignment not page-aligned");
              goto call_lose;
            }
          if (__glibc_unlikely (((ph->p_vaddr - ph->p_offset)
                                 & (ph->p_align - 1)) != 0))
            {
              *perrstr = N_("ELF load command address/offset not properly aligned");
              goto call_lose;
            }

          struct loadcmd *c = &loadcmds[nloadcmds++];
          c->mapstart = ALIGN_DOWN (ph->p_vaddr, GLRO(dl_pagesize));
          c->mapend = ALIGN_UP (ph->p_vaddr + ph->p_filesz, GLRO(dl_pagesize));
          c->dataend = ph->p_vaddr + ph->p_filesz;
          c->allocend = ph->p_vaddr + ph->p_memsz;
          c->mapoff = ALIGN_DOWN (ph->p_offset, GLRO(dl_pagesize));

          /* Determine whether there is a gap between the last segment
             and this one.  */
          if (nloadcmds > 1 && c[-1].mapend != c->mapstart)
            has_holes = true;

          /* Optimize a common case.  */
#if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7
          c->prot = (PF_TO_PROT >> ((ph->p_flags & (PF_R | PF_W | PF_X)) * 4)) & 0xf;
#else
          c->prot = 0;
          if (ph->p_flags & PF_R)
            c->prot |= PROT_READ;
          if (ph->p_flags & PF_W)
            c->prot |= PROT_WRITE;
          if (ph->p_flags & PF_X)
            c->prot |= PROT_EXEC;
#endif
          break;

        case PT_TLS:
          if (ph->p_memsz == 0)
            /* Nothing to do for an empty segment.  */
            break;

          l->l_tls_blocksize = ph->p_memsz;
          l->l_tls_align = ph->p_align;
          if (ph->p_align == 0)
            l->l_tls_firstbyte_offset = 0;
          else
            l->l_tls_firstbyte_offset = ph->p_vaddr & (ph->p_align - 1);
          l->l_tls_initimage_size = ph->p_filesz;
          /* Since we don't know the load address yet only store the
             offset.  We will adjust it later.  */
          l->l_tls_initimage = (void *) ph->p_vaddr;

          /* If not loading the initial set of shared libraries,
             check whether we should permit loading a TLS segment.  */
          if (__glibc_likely (l->l_type == lt_library)
              /* If GL(dl_tls_dtv_slotinfo_list) == NULL, then rtld.c did
                 not set up TLS data structures, so don't use them now.  */
              || __glibc_likely (GL(dl_tls_dtv_slotinfo_list) != NULL))
            {
              /* Assign the next available module ID.  */
              l->l_tls_modid = _dl_next_tls_modid ();
              break;
            }

#ifdef SHARED
          /* We are loading the executable itself when the dynamic
             linker was executed directly.  The setup will happen
             later.  Otherwise, the TLS data structures are already
             initialized, and we assigned a TLS modid above.  */
          assert (l->l_prev == NULL || (mode & __RTLD_AUDIT) != 0);
#else
          assert (false && "TLS not initialized in static application");
#endif
          break;

        case PT_GNU_STACK:
          stack_flags = ph->p_flags;
          break;

        case PT_GNU_RELRO:
          l->l_relro_addr = ph->p_vaddr;
          l->l_relro_size = ph->p_memsz;
          break;

        case PT_NOTE:
          if (_dl_process_pt_note (l, ph, fd, fbp))
            {
              *perrstr = N_("cannot process note segment");
              goto call_lose;
            }
          break;
        }

    if (__glibc_unlikely (nloadcmds == 0))
      {
        /* This only happens for a bogus object that will be caught with
           another error below.  But we don't want to go through the
           calculations below using NLOADCMDS - 1.  */
        *perrstr = N_("object file has no loadable segments");
        goto call_lose;
      }

    /* dlopen of an executable is not valid because it is not possible
       to perform proper relocations, handle static TLS, or run the
       ELF constructors.  For PIE, the check needs the dynamic
       section, so there is another check below.  */
    if (__glibc_unlikely (type != ET_DYN)
        && __glibc_unlikely ((mode & __RTLD_OPENEXEC) == 0))
      {
        /* This object is loaded at a fixed address.  This must never
           happen for objects loaded with dlopen.  */
        *perrstr = N_("cannot dynamically load executable");
        goto call_lose;
      }

    /* Length of the sections to be loaded.  */
    maplength = loadcmds[nloadcmds - 1].allocend - loadcmds[0].mapstart;

    /* Now process the load commands and map segments into memory.
       This is responsible for filling in:
       l_map_start, l_map_end, l_addr, l_contiguous, l_text_end, l_phdr
     */
    *perrstr = _dl_map_segments (l, fd, header, type, loadcmds, nloadcmds, maplength, has_holes, loader);
    if (__glibc_unlikely (*perrstr != NULL))
      goto call_lose;
  }

  if (l->l_ld == 0)
    {
      if (__glibc_unlikely (type == ET_DYN))
        {
          *perrstr = N_("object file has no dynamic section");
          goto call_lose;
        }
    }
  else
    l->l_ld = (ElfW(Dyn) *) ((ElfW(Addr)) l->l_ld + l->l_addr);

  elf_get_dynamic_info (l, NULL);

  /* Make sure we are not dlopen'ing an object that has the
     DF_1_NOOPEN flag set, or a PIE object.  */
  if ((__glibc_unlikely (l->l_flags_1 & DF_1_NOOPEN)
       && (mode & __RTLD_DLOPEN))
      || (__glibc_unlikely (l->l_flags_1 & DF_1_PIE)
          && __glibc_unlikely ((mode & __RTLD_OPENEXEC) == 0)))
    {
      /* We are not supposed to load this object.  Free all resources.  */
      _dl_unmap_segments (l);

      if (!l->l_libname->dont_free)
        free (l->l_libname);

      if (l->l_phdr_allocated)
        free ((void *) l->l_phdr);

      if (l->l_flags_1 & DF_1_PIE)
        *perrstr = N_("cannot dynamically load position-independent executable");
      else
        *perrstr = N_("shared object cannot be dlopen()ed");
      goto call_lose;
    }

  if (l->l_phdr == NULL)
    {
      /* The program header is not contained in any of the segments.
         We have to allocate memory ourself and copy it over from out
         temporary place.  */
      ElfW(Phdr) *newp = (ElfW(Phdr) *) malloc (header->e_phnum * sizeof (ElfW(Phdr)));
      if (newp == NULL)
        {
          *perrstr = N_("cannot allocate memory for program header");
          goto call_lose_errno;
        }

      l->l_phdr = memcpy (newp, phdr,
                          (header->e_phnum * sizeof (ElfW(Phdr))));
      l->l_phdr_allocated = 1;
    }
  else
    /* Adjust the PT_PHDR value by the runtime load address.  */
    l->l_phdr = (ElfW(Phdr) *) ((ElfW(Addr)) l->l_phdr + l->l_addr);

  if (__glibc_unlikely ((stack_flags &~ GL(dl_stack_flags)) & PF_X))
    {
      /* The stack is presently not executable, but this module
         requires that it be executable.  We must change the
         protection of the variable which contains the flags used in
         the mprotect calls.  */
#ifdef SHARED
      if ((mode & (__RTLD_DLOPEN | __RTLD_AUDIT)) == __RTLD_DLOPEN)
        {
          const uintptr_t p = (uintptr_t) &__stack_prot & -GLRO(dl_pagesize);
          const size_t s = (uintptr_t) (&__stack_prot + 1) - p;

          struct link_map *const m = &GL(dl_rtld_map);
          const uintptr_t relro_end = ((m->l_addr + m->l_relro_addr + m->l_relro_size) & -GLRO(dl_pagesize));
          if (__glibc_likely (p + s <= relro_end))
            {
              /* The variable lies in the region protected by RELRO.  */
              if (__mprotect ((void *) p, s, PROT_READ|PROT_WRITE) < 0)
                {
                  *perrstr = N_("cannot change memory protections");
                  goto call_lose_errno;
                }
              __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
              __mprotect ((void *) p, s, PROT_READ);
            }
          else
            __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
        }
      else
#endif
        __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;

#ifdef check_consistency
      check_consistency ();
#endif

      *perrval = (*GL(dl_make_stack_executable_hook)) (stack_endp);
      if (*perrval)
        {
          *perrstr = N_("cannot enable executable stack as shared object requires");
          goto call_lose;
        }
    }

  /* Adjust the address of the TLS initialization image.  */
  if (l->l_tls_initimage != NULL)
    l->l_tls_initimage = (char *) l->l_tls_initimage + l->l_addr;

  /* We are done mapping in the file.  We no longer need the descriptor.  */
  if (__glibc_unlikely (__close_nocancel (fd) != 0))
    {
      *perrstr = N_("cannot close file descriptor");
      goto call_lose_errno;
    }
  /* Signal that we closed the file.  */
  fd = -1;

  /* If this is ET_EXEC, we should have loaded it as lt_executable.  */
  assert (type != ET_EXEC || l->l_type == lt_executable);

  l->l_entry += l->l_addr;

  if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
    _dl_debug_printf ("\
      dynamic: 0x%0*lx  base: 0x%0*lx   size: 0x%0*Zx\n\
        entry: 0x%0*lx  phdr: 0x%0*lx  phnum:   %*u\n\n",
      (int) sizeof (void *) * 2,
      (unsigned long int) l->l_ld,
      (int) sizeof (void *) * 2,
      (unsigned long int) l->l_addr,
      (int) sizeof (void *) * 2, maplength,
      (int) sizeof (void *) * 2,
      (unsigned long int) l->l_entry,
      (int) sizeof (void *) * 2,
      (unsigned long int) l->l_phdr,
      (int) sizeof (void *) * 2, l->l_phnum);

  /* Set up the symbol hash table.  */
  _dl_setup_hash (l);

  /* If this object has DT_SYMBOLIC set modify now its scope.  We don't
     have to do this for the main map.  */
  if ((mode & RTLD_DEEPBIND) == 0
      && __glibc_unlikely (l->l_info[DT_SYMBOLIC] != NULL)
      && &l->l_searchlist != l->l_scope[0])
    {
      /* Create an appropriate searchlist.  It contains only this map.
         This is the definition of DT_SYMBOLIC in SysVr4.  */
      l->l_symbolic_searchlist.r_list[0] = l;
      l->l_symbolic_searchlist.r_nlist = 1;

      /* Now move the existing entries one back.  */
      memmove (&l->l_scope[1], &l->l_scope[0],
               (l->l_scope_max - 1) * sizeof (l->l_scope[0]));

      /* Now add the new entry.  */
      l->l_scope[0] = &l->l_symbolic_searchlist;
    }

  return 0;
}
