SH-2A FDPIC support

These are the changes used in CodeSourcery's SH-2A uClinux Lite toolchains.

Signed-off-by: Andrew Stubbs <ams@codesourcery.com>
This commit is contained in:
Andrew Stubbs 2011-02-23 12:23:03 +00:00
parent 60e244a98f
commit dd4643de1c
25 changed files with 1719 additions and 57 deletions

View File

@ -217,7 +217,7 @@ CPU_CFLAGS-$(UCLIBC_FORMAT_SHARED_FLAT) += -mid-shared-library
CPU_CFLAGS-$(UCLIBC_FORMAT_FLAT_SEP_DATA) += -msep-data
PICFLAG-y := -fPIC
PICFLAG-$(UCLIBC_FORMAT_FDPIC_ELF) := -mfdpic
PICFLAG-$(UCLIBC_FORMAT_FDPIC_ELF) := -mfdpic -fPIC
PICFLAG := $(PICFLAG-y)
PIEFLAG_NAME:=-fPIE

View File

@ -32,7 +32,6 @@ choice
config CONFIG_SH2A
select ARCH_HAS_NO_MMU
select HAVE_NO_PIC
bool "SH2A"
config CONFIG_SH2

View File

@ -2526,6 +2526,11 @@ typedef Elf32_Addr Elf32_Conflict;
/* SH specific declarations */
/* SH flags. */
#define EF_SH_PIC 0x100 /* Segments of an FDPIC binary may
be relocated independently. */
#define EF_SH_FDPIC 0x8000 /* Uses the FDPIC ABI. */
/* SH specific values for `st_other'. */
/* If set, this is a symbol pointing to SHmedia code, which will be branched
@ -2572,6 +2577,12 @@ typedef Elf32_Addr Elf32_Conflict;
#define R_SH_GOTPC 167
#define R_SH_RELATIVE_LOW16 197
#define R_SH_RELATIVE_MEDLOW16 198
#define R_SH_GOTFUNCDESC 203
#define R_SH_GOTFUNCDESC20 204
#define R_SH_GOTOFFFUNCDESC 205
#define R_SH_GOTOFFFUNCDESC20 206
#define R_SH_FUNCDESC 207
#define R_SH_FUNCDESC_VALUE 208
#define R_SH_IMM_LOW16 246
#define R_SH_IMM_LOW16_PCREL 247
#define R_SH_IMM_MEDLOW16 248

View File

@ -284,9 +284,10 @@ static __always_inline char * _dl_simple_ltoahex(char *local, unsigned long i)
/* On some arches constant strings are referenced through the GOT.
* This requires that load_addr must already be defined... */
#if defined(mc68000) || defined(__arm__) || defined(__thumb__) || \
defined(__mips__) || defined(__sh__) || defined(__powerpc__) || \
defined(__avr32__) || defined(__xtensa__) || defined(__sparc__)
#if (defined(mc68000) || defined(__arm__) || defined(__thumb__) || \
defined(__mips__) || defined(__sh__) || defined(__powerpc__) || \
defined(__avr32__) || defined(__xtensa__) || defined(__sparc__)) \
&& !defined(__FDPIC__)
# define CONSTANT_STRING_GOT_FIXUP(X) \
if ((X) < (const char *) load_addr) (X) += load_addr
# define NO_EARLY_SEND_STDERR

View File

@ -30,6 +30,7 @@
*/
#include "sys/mman.h"
#include "ldso.h"
#ifdef __LDSO_CACHE_SUPPORT__
@ -118,6 +119,7 @@ int _dl_unmap_cache(void)
void
_dl_protect_relro (struct elf_resolve *l)
{
#ifndef ARCH_CANNOT_PROTECT_MEMORY
ElfW(Addr) base = (ElfW(Addr)) DL_RELOC_ADDR(l->loadaddr, l->relro_addr);
ElfW(Addr) start = (base & PAGE_ALIGN);
ElfW(Addr) end = ((base + l->relro_size) & PAGE_ALIGN);
@ -127,6 +129,7 @@ _dl_protect_relro (struct elf_resolve *l)
_dl_dprintf(2, "%s: cannot apply additional memory protection after relocation", l->libname);
_dl_exit(0);
}
#endif
}
/* This function's behavior must exactly match that

View File

@ -201,7 +201,9 @@ DL_START(unsigned long args)
We are only doing ourself right now - we will have to do the rest later */
SEND_EARLY_STDERR_DEBUG("Scanning DYNAMIC section\n");
tpnt->dynamic_addr = dpnt;
#if defined(NO_FUNCS_BEFORE_BOOTSTRAP)
#ifdef DL_PARSE_DYNAMIC_INFO
DL_PARSE_DYNAMIC_INFO(dpnt, tpnt->dynamic_info, NULL, load_addr);
#elif defined(NO_FUNCS_BEFORE_BOOTSTRAP)
/* Some architectures cannot call functions here, must inline */
__dl_parse_dynamic_info(dpnt, tpnt->dynamic_info, NULL, load_addr);
#else

View File

@ -30,6 +30,7 @@
* SUCH DAMAGE.
*/
#include "sys/mman.h"
#include "ldso.h"
#include "unsecvars.h"
@ -405,8 +406,8 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr,
ppnt = (ElfW(Phdr) *) auxvt[AT_PHDR].a_un.a_val;
for (i = 0; i < auxvt[AT_PHNUM].a_un.a_val; i++, ppnt++) {
if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W))
_dl_mprotect((void *) (DL_RELOC_ADDR(app_tpnt->loadaddr, ppnt->p_vaddr) & PAGE_ALIGN),
((ppnt->p_vaddr + app_tpnt->loadaddr) & ADDR_ALIGN) +
_dl_mprotect((void *) ((ElfW(Addr)) DL_RELOC_ADDR(app_tpnt->loadaddr, ppnt->p_vaddr) & PAGE_ALIGN),
((ElfW(Addr)) DL_RELOC_ADDR (app_tpnt->loadaddr, ppnt->p_vaddr) & ADDR_ALIGN) +
(unsigned long) ppnt->p_filesz,
PROT_READ | PROT_WRITE | PROT_EXEC);
}
@ -897,7 +898,7 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr,
for (tpnt = _dl_loaded_modules; tpnt; tpnt = tpnt->next) {
for (myppnt = tpnt->ppnt, j = 0; j < tpnt->n_phent; j++, myppnt++) {
if (myppnt->p_type == PT_LOAD && !(myppnt->p_flags & PF_W) && tpnt->dynamic_info[DT_TEXTREL]) {
_dl_mprotect((void *) (DL_RELOC_ADDR(tpnt->loadaddr, myppnt->p_vaddr) & PAGE_ALIGN),
_dl_mprotect((void *) ((ElfW(Addr))DL_RELOC_ADDR(tpnt->loadaddr, myppnt->p_vaddr) & PAGE_ALIGN),
(myppnt->p_vaddr & ADDR_ALIGN) + (unsigned long) myppnt->p_filesz, LXFLAGS(myppnt->p_flags));
}
}

455
ldso/ldso/sh/dl-inlines.h Normal file
View File

@ -0,0 +1,455 @@
/* Copyright (C) 2003, 2004 Red Hat, Inc.
* Contributed by Alexandre Oliva <aoliva@redhat.com>
*
* Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
*/
#ifndef _dl_assert
# define _dl_assert(expr)
#endif
/* Initialize a DL_LOADADDR_TYPE given a got pointer and a complete
load map. The compiler refuses to inline this function, so use
a macro instead, until __always_inline is fixed. */
#define __dl_init_loadaddr_map(LOADADDR, GOT_VALUE, MAP) \
{ \
if ((MAP)->version != 0) \
{ \
SEND_EARLY_STDERR ("Invalid loadmap version number\n"); \
_dl_exit(-1); \
} \
if ((MAP)->nsegs == 0) \
{ \
SEND_EARLY_STDERR ("Invalid segment count in loadmap\n"); \
_dl_exit(-1); \
} \
(LOADADDR)->got_value = (GOT_VALUE); \
(LOADADDR)->map = (MAP); \
}
/* Figure out how many LOAD segments there are in the given headers,
and allocate a block for the load map big enough for them.
got_value will be properly initialized later on, with INIT_GOT. */
static __always_inline int
__dl_init_loadaddr (struct elf32_fdpic_loadaddr *loadaddr, Elf32_Phdr *ppnt,
int pcnt)
{
int count = 0, i;
size_t size;
for (i = 0; i < pcnt; i++)
if (ppnt[i].p_type == PT_LOAD)
count++;
loadaddr->got_value = 0;
size = sizeof (struct elf32_fdpic_loadmap)
+ sizeof (struct elf32_fdpic_loadseg) * count;
loadaddr->map = _dl_malloc (size);
if (! loadaddr->map)
_dl_exit (-1);
loadaddr->map->version = 0;
loadaddr->map->nsegs = 0;
return count;
}
/* Incrementally initialize a load map. */
static __always_inline void
__dl_init_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr,
Elf32_Phdr *phdr, int maxsegs)
{
struct elf32_fdpic_loadseg *segdata;
if (loadaddr.map->nsegs == maxsegs)
_dl_exit (-1);
segdata = &loadaddr.map->segs[loadaddr.map->nsegs++];
segdata->addr = (Elf32_Addr) addr;
segdata->p_vaddr = phdr->p_vaddr;
segdata->p_memsz = phdr->p_memsz;
#if defined (__SUPPORT_LD_DEBUG__)
{
extern char *_dl_debug;
extern int _dl_debug_file;
if (_dl_debug)
_dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n",
loadaddr.map->nsegs-1,
segdata->p_vaddr, segdata->addr, segdata->p_memsz);
}
#endif
}
static __always_inline void __dl_loadaddr_unmap
(struct elf32_fdpic_loadaddr loadaddr, struct funcdesc_ht *funcdesc_ht);
/* Figure out whether the given address is in one of the mapped
segments. */
static __always_inline int
__dl_addr_in_loadaddr (void *p, struct elf32_fdpic_loadaddr loadaddr)
{
struct elf32_fdpic_loadmap *map = loadaddr.map;
int c;
for (c = 0; c < map->nsegs; c++)
if ((void*)map->segs[c].addr <= p
&& (char*)p < (char*)map->segs[c].addr + map->segs[c].p_memsz)
return 1;
return 0;
}
static __always_inline void * _dl_funcdesc_for (void *entry_point, void *got_value);
/* The hashcode handling code below is heavily inspired in libiberty's
hashtab code, but with most adaptation points and support for
deleting elements removed.
Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
Contributed by Vladimir Makarov (vmakarov@cygnus.com). */
static __always_inline unsigned long
higher_prime_number (unsigned long n)
{
/* These are primes that are near, but slightly smaller than, a
power of two. */
static const unsigned long primes[] = {
(unsigned long) 7,
(unsigned long) 13,
(unsigned long) 31,
(unsigned long) 61,
(unsigned long) 127,
(unsigned long) 251,
(unsigned long) 509,
(unsigned long) 1021,
(unsigned long) 2039,
(unsigned long) 4093,
(unsigned long) 8191,
(unsigned long) 16381,
(unsigned long) 32749,
(unsigned long) 65521,
(unsigned long) 131071,
(unsigned long) 262139,
(unsigned long) 524287,
(unsigned long) 1048573,
(unsigned long) 2097143,
(unsigned long) 4194301,
(unsigned long) 8388593,
(unsigned long) 16777213,
(unsigned long) 33554393,
(unsigned long) 67108859,
(unsigned long) 134217689,
(unsigned long) 268435399,
(unsigned long) 536870909,
(unsigned long) 1073741789,
(unsigned long) 2147483647,
/* 4294967291L */
((unsigned long) 2147483647) + ((unsigned long) 2147483644),
};
const unsigned long *low = &primes[0];
const unsigned long *high = &primes[sizeof(primes) / sizeof(primes[0])];
while (low != high)
{
const unsigned long *mid = low + (high - low) / 2;
if (n > *mid)
low = mid + 1;
else
high = mid;
}
#if 0
/* If we've run out of primes, abort. */
if (n > *low)
{
fprintf (stderr, "Cannot find prime bigger than %lu\n", n);
abort ();
}
#endif
return *low;
}
struct funcdesc_ht
{
/* Table itself. */
struct funcdesc_value **entries;
/* Current size (in entries) of the hash table */
size_t size;
/* Current number of elements. */
size_t n_elements;
};
static __always_inline int
hash_pointer (const void *p)
{
return (int) ((long)p >> 3);
}
static __always_inline struct funcdesc_ht *
htab_create (void)
{
struct funcdesc_ht *ht = _dl_malloc (sizeof (struct funcdesc_ht));
if (! ht)
return NULL;
ht->size = 3;
ht->entries = _dl_malloc (sizeof (struct funcdesc_ht_value *) * ht->size);
if (! ht->entries)
return NULL;
ht->n_elements = 0;
_dl_memset (ht->entries, 0, sizeof (struct funcdesc_ht_value *) * ht->size);
return ht;
}
/* This is only called from _dl_loadaddr_unmap, so it's safe to call
_dl_free(). See the discussion below. */
static __always_inline void
htab_delete (struct funcdesc_ht *htab)
{
int i;
for (i = htab->size - 1; i >= 0; i--)
if (htab->entries[i])
_dl_free (htab->entries[i]);
_dl_free (htab->entries);
_dl_free (htab);
}
/* Similar to htab_find_slot, but without several unwanted side effects:
- Does not call htab->eq_f when it finds an existing entry.
- Does not change the count of elements/searches/collisions in the
hash table.
This function also assumes there are no deleted entries in the table.
HASH is the hash value for the element to be inserted. */
static __always_inline struct funcdesc_value **
find_empty_slot_for_expand (struct funcdesc_ht *htab, int hash)
{
size_t size = htab->size;
unsigned int index = hash % size;
struct funcdesc_value **slot = htab->entries + index;
int hash2;
if (! *slot)
return slot;
hash2 = 1 + hash % (size - 2);
for (;;)
{
index += hash2;
if (index >= size)
index -= size;
slot = htab->entries + index;
if (! *slot)
return slot;
}
}
/* The following function changes size of memory allocated for the
entries and repeatedly inserts the table elements. The occupancy
of the table after the call will be about 50%. Naturally the hash
table must already exist. Remember also that the place of the
table entries is changed. If memory allocation failures are allowed,
this function will return zero, indicating that the table could not be
expanded. If all goes well, it will return a non-zero value. */
static __always_inline int
htab_expand (struct funcdesc_ht *htab)
{
struct funcdesc_value **oentries;
struct funcdesc_value **olimit;
struct funcdesc_value **p;
struct funcdesc_value **nentries;
size_t nsize;
oentries = htab->entries;
olimit = oentries + htab->size;
/* Resize only when table after removal of unused elements is either
too full or too empty. */
if (htab->n_elements * 2 > htab->size)
nsize = higher_prime_number (htab->n_elements * 2);
else
nsize = htab->size;
nentries = _dl_malloc (sizeof (struct funcdesc_value *) * nsize);
_dl_memset (nentries, 0, sizeof (struct funcdesc_value *) * nsize);
if (nentries == NULL)
return 0;
htab->entries = nentries;
htab->size = nsize;
p = oentries;
do
{
if (*p)
*find_empty_slot_for_expand (htab, hash_pointer ((*p)->entry_point))
= *p;
p++;
}
while (p < olimit);
#if 0 /* We can't tell whether this was allocated by the _dl_malloc()
built into ld.so or malloc() in the main executable or libc,
and calling free() for something that wasn't malloc()ed could
do Very Bad Things (TM). Take the conservative approach
here, potentially wasting as much memory as actually used by
the hash table, even if multiple growths occur. That's not
so bad as to require some overengineered solution that would
enable us to keep track of how it was allocated. */
_dl_free (oentries);
#endif
return 1;
}
/* This function searches for a hash table slot containing an entry
equal to the given element. To delete an entry, call this with
INSERT = 0, then call htab_clear_slot on the slot returned (possibly
after doing some checks). To insert an entry, call this with
INSERT = 1, then write the value you want into the returned slot.
When inserting an entry, NULL may be returned if memory allocation
fails. */
static __always_inline struct funcdesc_value **
htab_find_slot (struct funcdesc_ht *htab, void *ptr, int insert)
{
unsigned int index;
int hash, hash2;
size_t size;
struct funcdesc_value **entry;
if (htab->size * 3 <= htab->n_elements * 4
&& htab_expand (htab) == 0)
return NULL;
hash = hash_pointer (ptr);
size = htab->size;
index = hash % size;
entry = &htab->entries[index];
if (!*entry)
goto empty_entry;
else if ((*entry)->entry_point == ptr)
return entry;
hash2 = 1 + hash % (size - 2);
for (;;)
{
index += hash2;
if (index >= size)
index -= size;
entry = &htab->entries[index];
if (!*entry)
goto empty_entry;
else if ((*entry)->entry_point == ptr)
return entry;
}
empty_entry:
if (!insert)
return NULL;
htab->n_elements++;
return entry;
}
void *
_dl_funcdesc_for (void *entry_point, void *got_value)
{
struct elf_resolve *tpnt = ((void**)got_value)[2];
struct funcdesc_ht *ht = tpnt->funcdesc_ht;
struct funcdesc_value **entry;
_dl_assert (got_value == tpnt->loadaddr.got_value);
if (! ht)
{
ht = htab_create ();
if (! ht)
return (void*)-1;
tpnt->funcdesc_ht = ht;
}
entry = htab_find_slot (ht, entry_point, 1);
if (*entry)
{
_dl_assert ((*entry)->entry_point == entry_point);
return _dl_stabilize_funcdesc (*entry);
}
*entry = _dl_malloc (sizeof (struct funcdesc_value));
(*entry)->entry_point = entry_point;
(*entry)->got_value = got_value;
return _dl_stabilize_funcdesc (*entry);
}
static __always_inline void const *
_dl_lookup_address (void const *address)
{
struct elf_resolve *rpnt;
struct funcdesc_value const *fd;
/* Make sure we don't make assumptions about its alignment. */
__asm__ ("" : "+r" (address));
if ((Elf32_Addr)address & 7)
/* It's not a function descriptor. */
return address;
fd = (struct funcdesc_value const *)address;
for (rpnt = _dl_loaded_modules; rpnt; rpnt = rpnt->next)
{
if (! rpnt->funcdesc_ht)
continue;
if (fd->got_value != rpnt->loadaddr.got_value)
continue;
address = htab_find_slot (rpnt->funcdesc_ht, (void*)fd->entry_point, 0);
if (address && *(struct funcdesc_value *const*)address == fd)
{
address = (*(struct funcdesc_value *const*)address)->entry_point;
break;
}
else
address = fd;
}
return address;
}
void
__dl_loadaddr_unmap (struct elf32_fdpic_loadaddr loadaddr,
struct funcdesc_ht *funcdesc_ht)
{
int i;
for (i = 0; i < loadaddr.map->nsegs; i++)
_dl_munmap ((void*)loadaddr.map->segs[i].addr,
loadaddr.map->segs[i].p_memsz);
/* _dl_unmap is only called for dlopen()ed libraries, for which
calling free() is safe, or before we've completed the initial
relocation, in which case calling free() is probably pointless,
but still safe. */
_dl_free (loadaddr.map);
if (funcdesc_ht)
htab_delete (funcdesc_ht);
}

View File

@ -2,6 +2,7 @@
* will work as expected and cope with whatever platform specific wierdness is
* needed for this architecture. */
#ifndef __SH_FDPIC__
__asm__(
" .text\n"
" .globl _start\n"
@ -29,6 +30,99 @@ __asm__(
" .size _start,.-_start\n"
" .previous\n"
);
#else /* __SH_FDPIC__ */
/* On entry:
R8 = load map for user executable.
R9 = load map for interpreter (this program), or zero if
the user executed the interpreter directly.
R10 = address of _DYNAMIC section. */
__asm__(
" .text\n"
" .globl _start\n"
" .type _start,@function\n"
"_start:\n"
/* Select the correct load map. */
" tst r9, r9\n"
" bf/s 1f\n"
" mov r9, r4\n"
" mov r8, r4\n"
"1:\n"
/* GOT = __self_reloc (load_map, rofixup, rofixupend) */
" mova .L_rofixup, r0\n"
" mov.l .L_rofixup, r5\n"
" add r0, r5\n"
" mova .L_rofixupend, r0\n"
" mov.l .L_rofixupend, r6\n"
" add r0, r6\n"
" mov.l .L_self_reloc, r1\n"
" bsrf r1\n"
" nop\n"
".L_self_reloc_jmp_loc:\n"
" mov r0, r12\n"
/* r12 now contains the GOT/FDPIC address. */
/* entry_point = _dl_start (dl_boot_got_pointer, dl_boot_progmap,
* dl_boot_ldsomap, dl_boot_ldso_dyn_pointer,
* argv) */
" mov r12, r4\n"
" mov r8, r5\n"
" mov r9, r6\n"
" mov r10, r7\n"
" mov r15, r11\n"
" add #4, r11\n"
" mov.l .L_dl_start, r0\n"
" bsrf r0\n"
" mov.l r11, @-r15\n" /* arg5 on the stack. */
".L_dl_start_jmp_loc:\n"
" add #4, r15\n" /* Pop arg5 from stack. */
/* Start the user program. R8, R9 and R10 should remain as on entry. */
" mov r0, r1\n"
" mov.l .L_dl_fini, r0\n"
" mov.l @(r0,r12), r4 ! Pass the finalizer FD in r4\n"
" mov.l @(0,r1), r2\n"
" jmp @r2\n"
" mov.l @(4,r1), r12\n"
".L_self_reloc:\n"
" .long __self_reloc-.L_self_reloc_jmp_loc\n"
".L_rofixup:\n"
" .long __ROFIXUP_LIST__@PCREL\n"
".L_rofixupend:\n"
" .long __ROFIXUP_END__@PCREL\n"
".L_dl_start:\n"
" .long _dl_start-.L_dl_start_jmp_loc\n"
".L_dl_fini:\n"
" .long _dl_fini@GOTFUNCDESC\n"
" .size _start,.-_start\n"
" .previous\n"
);
#undef DL_START
#define DL_START(X) \
/*static*/ void * __attribute__ ((used)) \
_dl_start (void *dl_boot_got_pointer, \
struct elf32_fdpic_loadmap *dl_boot_progmap, \
struct elf32_fdpic_loadmap *dl_boot_ldsomap, \
Elf32_Dyn *dl_boot_ldso_dyn_pointer, \
X)
#define START() do { \
static struct funcdesc_value _dl_elf_main_fd; \
struct elf_resolve *exec_mod = _dl_loaded_modules; \
while (exec_mod->libtype != elf_executable) \
exec_mod = exec_mod->next; \
_dl_elf_main_fd.entry_point = _dl_elf_main; \
_dl_elf_main_fd.got_value = exec_mod->loadaddr.got_value; \
return &_dl_elf_main_fd; \
} while (0)
#endif /* __SH_FDPIC__ */
/*
* Get a pointer to the argv array. On many platforms this can be just
@ -47,6 +141,7 @@ __asm__(
* SYMBOL is the symbol involved in the relocation, and LOAD is the
* load address.
*/
#ifndef __SH_FDPIC__
#define PERFORM_BOOTSTRAP_RELOC(RELP,REL,SYMBOL,LOAD,SYMTAB) \
switch(ELF32_R_TYPE((RELP)->r_info)){ \
case R_SH_REL32: \
@ -66,3 +161,28 @@ __asm__(
default: \
_dl_exit(1); \
}
#else
#define PERFORM_BOOTSTRAP_RELOC(RELP,REL,SYMBOL,LOAD,SYMTAB) \
switch(ELF32_R_TYPE((RELP)->r_info)){ \
case R_SH_DIR32: \
case R_SH_GLOB_DAT: \
case R_SH_JMP_SLOT: \
*(REL) = (SYMBOL) + (RELP)->r_addend; \
break; \
case R_SH_FUNCDESC_VALUE: \
{ \
struct funcdesc_value fv = { \
(void*)((SYMBOL) + *(REL)), \
(LOAD).got_value \
}; \
*(struct funcdesc_value volatile *)(REL) = fv;\
break; \
} \
case R_SH_NONE: \
break; \
default: \
SEND_EARLY_STDERR_DEBUG("Unknown relocation: ");\
SEND_NUMBER_STDERR_DEBUG(ELF32_R_TYPE((RELP)->r_info), 1);\
_dl_exit(1); \
}
#endif

View File

@ -5,10 +5,33 @@ extern int _dl_errno;
#undef __set_errno
#define __set_errno(X) {(_dl_errno) = (X);}
#if __GNUC_PREREQ (4, 1)
#if __GNUC_PREREQ (4, 1) && !__GNUC_PREREQ (4, 5)
#warning !!! gcc 4.1 and later have problems with __always_inline so redefined as inline
/* but inlining doesn't work properly with GCC 4.5 without __always_inline. */
# ifdef __always_inline
# undef __always_inline
# define __always_inline __inline__
# endif
#endif
#ifdef __NR_pread64 /* Newer kernels renamed but it's the same. */
# ifdef __NR_pread
# error "__NR_pread and __NR_pread64 both defined???"
# endif
# define __NR_pread __NR_pread64
#endif
#ifdef __NR_pread
#include "endian.h"
# define __NR___syscall_pread __NR_pread
static __inline__ _syscall6(ssize_t, __syscall_pread, int, fd, void *, buf,
size_t, count, int, dummy, off_t, offset_hi, off_t, offset_lo)
static __always_inline
ssize_t _dl_pread(int fd, void *buf, size_t count, off_t offset)
{
return(__syscall_pread(fd,buf,count,0,__LONG_LONG_PAIR(offset >> 31,offset)));
}
#endif /* __NR_pread */

View File

@ -6,6 +6,8 @@
/* Define this if the system uses RELOCA. */
#define ELF_USES_RELOCA
#include <elf.h>
#ifndef __SH_FDPIC__
/*
* Initialization sequence for a GOT.
*/
@ -14,6 +16,24 @@
GOT_BASE[2] = (unsigned long) _dl_linux_resolve; \
GOT_BASE[1] = (unsigned long) (MODULE); \
}
#else
/*
* Initialization sequence for a GOT. Copy the resolver function
* descriptor and the pointer to the elf_resolve/link_map data
* structure. Initialize the got_value in the module while at that.
*/
#define INIT_GOT(GOT_BASE,MODULE) \
{ \
(MODULE)->loadaddr.got_value = (GOT_BASE); \
GOT_BASE[0] = ((unsigned long *)&_dl_linux_resolve)[0]; \
GOT_BASE[1] = ((unsigned long *)&_dl_linux_resolve)[1]; \
GOT_BASE[2] = (unsigned long) MODULE; \
}
/* Make sure the dynamic linker GOT is initialized.
FIXME: Is this the right solution? No other target seems to need it???? */
#define RERELOCATE_LDSO 1
#endif
/* Here we define the magic numbers that this dynamic loader should accept */
#define MAGIC1 EM_SH
@ -23,7 +43,183 @@
#define ELF_TARGET "sh"
struct elf_resolve;
#ifdef __SH_FDPIC__
struct funcdesc_value;
extern struct funcdesc_value volatile * _dl_linux_resolver(struct elf_resolve * tpnt, int reloc_entry);
#else
extern unsigned long _dl_linux_resolver(struct elf_resolve * tpnt, int reloc_entry);
#endif
#ifdef __SH_FDPIC__
struct funcdesc_value
{
void *entry_point;
void *got_value;
} __attribute__((__aligned__(8)));
#define ARCH_NEEDS_BOOTSTRAP_RELOCS
#define ARCH_CANNOT_PROTECT_MEMORY
#define HAVE_DL_INLINES_H
#define STRINGIFY1(s) #s
#define STRINGIFY(s) STRINGIFY1 (s)
#undef SEND_EARLY_STDERR
#define SEND_EARLY_STDERR(S) \
do { \
const char *__s; \
int len; \
__asm__("mova 1f, r0\n" \
" bra 2f\n" \
" nop\n" \
" .align 2\n" \
"1: .string " STRINGIFY (S) "\n" \
" .align 2\n" \
"2:" \
: "=z" (__s)); \
for (len = 0; __s[len] != '\0'; len++) \
; \
_dl_write (2, __s, len); \
} while (0)
#define DL_LOADADDR_TYPE struct elf32_fdpic_loadaddr
#define DL_RELOC_ADDR(LOADADDR, ADDR) \
(__reloc_pointer ((void*)(ADDR), (LOADADDR).map))
#define DL_ADDR_TO_FUNC_PTR(ADDR, LOADADDR) \
((void(*)(void)) _dl_funcdesc_for ((void*)(ADDR), (LOADADDR).got_value))
#define _dl_stabilize_funcdesc(val) \
({ __asm__ ("" : "+m" (*(val))); (val); })
#define DL_CALL_FUNC_AT_ADDR(ADDR, LOADADDR, SIGNATURE, ...) \
({ struct funcdesc_value fd = { (void*)(ADDR), (LOADADDR).got_value }; \
void (*pf)(void) = (void*) _dl_stabilize_funcdesc (&fd); \
(* SIGNATURE pf)(__VA_ARGS__); })
/* Get an absolute address by relative means. */
#define DL_GET_ADDRESS(SYM) \
({ \
void *val; \
__asm__ ( \
"mova 1f, r0\n" \
" mov.l 1f, r1\n" \
" bra 2f\n" \
" add r1, r0\n" \
" .align 2\n" \
"1: .long " STRINGIFY(SYM) "@PCREL\n" \
"2:\n" \
: "=z" (val) : : "r1"); \
val; \
})
#define DL_CALL_FUNC_PCREL(FUNC, LOADADDR, ...) \
DL_CALL_FUNC_AT_ADDR (DL_GET_ADDRESS (FUNC), \
LOADADDR, \
(__typeof(FUNC) *), \
__VA_ARGS__)
#define DL_INIT_LOADADDR_BOOT(LOADADDR, BASEADDR) \
(__dl_init_loadaddr_map (&(LOADADDR), dl_boot_got_pointer, \
dl_boot_ldsomap ?: dl_boot_progmap))
#define DL_INIT_LOADADDR_PROG(LOADADDR, BASEADDR) \
(__dl_init_loadaddr_map (&(LOADADDR), 0, dl_boot_progmap))
/* The GOT address is not at a fixed offset in FDPIC.
The address is computed by __sefl_reloc, and passed into _dl_start
as a function argument, dl_boot_got_pointer. */
#define DL_BOOT_COMPUTE_GOT(got) ((got) = dl_boot_got_pointer)
/* The _DYNAMIC address is not at a fixed offset or referenced in
the GOT. The kernel passes it in R10, and it is passed to
_dl_start as an argument, dl_boot_ldso_dyn_pointer. */
#define DL_BOOT_COMPUTE_DYN(DPNT, GOT, LOAD_ADDR) \
((DPNT) = dl_boot_ldso_dyn_pointer)
#define DL_PARSE_DYNAMIC_INFO(DPNT, DYNAMIC_INFO, DEBUG_ADDR, LOAD_OFF) \
DL_CALL_FUNC_PCREL (_dl_parse_dynamic_info, \
LOAD_OFF, \
DPNT, DYNAMIC_INFO, DEBUG_ADDR, LOAD_OFF)
#define DL_INIT_LOADADDR_EXTRA_DECLS \
int dl_init_loadaddr_load_count;
#define DL_INIT_LOADADDR(LOADADDR, BASEADDR, PHDR, PHDRCNT) \
(dl_init_loadaddr_load_count = \
__dl_init_loadaddr (&(LOADADDR), (PHDR), (PHDRCNT)))
#define DL_INIT_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \
(__dl_init_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR), \
dl_init_loadaddr_load_count))
#define DL_LOADADDR_UNMAP(LOADADDR, LEN) \
(__dl_loadaddr_unmap ((LOADADDR), (NULL)))
#define DL_LIB_UNMAP(LIB, LEN) \
(__dl_loadaddr_unmap ((LIB)->loadaddr, (LIB)->funcdesc_ht))
#define DL_LOADADDR_BASE(LOADADDR) \
((LOADADDR).map->segs[0].addr)
/* This is called from dladdr(), such that we map a function
descriptor's address to the function's entry point before trying to
find in which library it's defined. */
#define DL_LOOKUP_ADDRESS(ADDRESS) (_dl_lookup_address (ADDRESS))
#define DL_ADDR_IN_LOADADDR(ADDR, TPNT, TFROM) \
(! (TFROM) && __dl_addr_in_loadaddr ((void*)(ADDR), (TPNT)->loadaddr))
/* We only support loading FDPIC independently-relocatable shared
libraries. It probably wouldn't be too hard to support loading
shared libraries that require relocation by the same amount, but we
don't know that they exist or would be useful, and the dynamic
loader code could leak the whole-library map unless we keeping a
bit more state for DL_LOADADDR_UNMAP and DL_LIB_UNMAP, so let's
keep things simple for now. */
#define DL_CHECK_LIB_TYPE(epnt, piclib, _dl_progname, libname) \
do \
{ \
if (((epnt)->e_flags & EF_SH_FDPIC) && ((epnt)->e_flags & EF_SH_PIC)) \
(piclib) = 2; \
else \
{ \
_dl_internal_error_number = LD_ERROR_NOTDYN; \
_dl_dprintf(2, "%s: '%s' is not an FDPIC shared library" \
"\n", (_dl_progname), (libname)); \
_dl_close(infile); \
return NULL; \
} \
} \
while (0)
/* We want want to apply all relocations in the interpreter during
bootstrap. Because of this, we have to skip the interpreter
relocations in _dl_parse_relocation_information(), see
elfinterp.c. */
#define DL_SKIP_BOOTSTRAP_RELOC(SYMTAB, INDEX, STRTAB) 0
#ifdef __NR_pread
#define _DL_PREAD(FD, BUF, SIZE, OFFSET) \
(_dl_pread((FD), (BUF), (SIZE), (OFFSET)))
#else
# error "pread is required for FDPIC"
#endif
/* We want to return to dlsym() a function descriptor if the symbol
turns out to be a function. */
#define DL_FIND_HASH_VALUE(TPNT, TYPE_CLASS, SYM) \
(((TYPE_CLASS) & ELF_RTYPE_CLASS_DLSYM) \
&& ELF32_ST_TYPE((SYM)->st_info) == STT_FUNC \
? _dl_funcdesc_for (DL_RELOC_ADDR ((TPNT)->loadaddr, (SYM)->st_value), \
(TPNT)->loadaddr.got_value) \
: DL_RELOC_ADDR ((TPNT)->loadaddr, (SYM)->st_value))
#define DL_GET_READY_TO_RUN_EXTRA_PARMS \
, struct elf32_fdpic_loadmap *dl_boot_progmap
#define DL_GET_READY_TO_RUN_EXTRA_ARGS \
, dl_boot_progmap
#else /* !__SH_FDPIC__ */
#include <link.h>
#define DL_LOADADDR_TYPE ElfW(Addr)
#endif
static __always_inline unsigned int
_dl_urem(unsigned int n, unsigned int base)
@ -92,6 +288,7 @@ _dl_urem(unsigned int n, unsigned int base)
((((type) == R_SH_JMP_SLOT) * ELF_RTYPE_CLASS_PLT) \
| (((type) == R_SH_COPY) * ELF_RTYPE_CLASS_COPY))
#ifndef __SH_FDPIC__
/* Return the link-time address of _DYNAMIC. Conveniently, this is the
first element of the GOT. This must be inlined in a function which
uses global data. */
@ -102,7 +299,9 @@ elf_machine_dynamic (void)
__asm__ ("mov r12,%0" :"=r" (got));
return *got;
}
#endif
#ifndef __SH_FDPIC__
/* Return the run-time load address of the shared object. */
static __always_inline Elf32_Addr __attribute__ ((unused))
elf_machine_load_address (void)
@ -121,6 +320,22 @@ elf_machine_load_address (void)
: "=r" (addr) : : "r0", "r1", "r2");
return addr;
}
#else
/* FDPIC needs access to the loadmap for this. */
#include <bits/elf-fdpic.h>
#define elf_machine_load_address() \
elf_machine_load_address1 (dl_boot_ldsomap, dl_boot_progmap)
static __always_inline Elf32_Addr __attribute__ ((unused))
elf_machine_load_address1 (struct elf32_fdpic_loadmap *ldso_map,
struct elf32_fdpic_loadmap *exec_map)
{
struct elf32_fdpic_loadmap *map = (ldso_map ?: exec_map);
/* The ELF header is assumed to be the first entry in the load map. */
return map->segs[0].addr;
}
#endif
#define COPY_UNALIGNED_WORD(swp, twp, align) \
{ \
@ -147,9 +362,10 @@ elf_machine_load_address (void)
}
static __always_inline void
elf_machine_relative (Elf32_Addr load_off, const Elf32_Addr rel_addr,
elf_machine_relative (DL_LOADADDR_TYPE load_off, const Elf32_Addr rel_addr,
Elf32_Word relative_count)
{
#ifndef __SH_FDPIC__
Elf32_Addr value;
Elf32_Rela * rpnt = (void *)rel_addr;
@ -165,5 +381,7 @@ elf_machine_relative (Elf32_Addr load_off, const Elf32_Addr rel_addr,
COPY_UNALIGNED_WORD (&value, reloc_addr, (int) reloc_addr & 3);
rpnt++;
} while (--relative_count);
#endif
#undef COPY_UNALIGNED_WORD
}

View File

@ -43,17 +43,82 @@
extern int _dl_linux_resolve(void);
unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry)
#ifdef __SH_FDPIC__
static int
is_in_module (void *address, struct elf32_fdpic_loadmap *map)
{
int i;
for (i = 0; i < map->nsegs; i++)
if (address >= map->segs[i].addr
&& address < map->segs[i].addr + map->segs[i].p_memsz)
return 1;
return 0;
}
#endif
#ifdef __SH_FDPIC__
struct funcdesc_value volatile *
#else
unsigned long
#endif
_dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry)
{
ELF_RELOC *this_reloc;
char *strtab;
Elf32_Sym *symtab;
int symtab_index;
char *rel_addr;
char *symname;
char *new_addr;
#ifdef __SH_FDPIC__
struct elf_resolve *new_tpnt;
struct funcdesc_value funcval;
struct funcdesc_value volatile *got_addr;
#else
char **got_addr;
unsigned long instr_addr;
char *symname;
#endif
#ifdef __SH_FDPIC__
/* In FDPIC the parameters are not what they appear to be, but they can be
used to derive the right information.
On entry,
TPNT is the elf_resolve pointer for the calling function, OR, if the
relocation has already been done by a competing thread, it might be
the elf_resolve pointer for the function we are about to call.
RELOC_ENTRY is the address of the lazy PLT stub. The actual value will
be at address reloc_entry-4.
In order to calculate the true values of the parameters, we need to
detect, and possibly correct, the race condition. */
struct elf_resolve *module = tpnt;
void *lazyPLT = (void *)reloc_entry;
if (!is_in_module (lazyPLT, module->loadaddr.map))
{
/* The race condition has occurred! */
/* Traverse the module list and find the one containing lazyPLT. */
while (module->prev)
module = module->prev;
for (; module; module = module->next)
if (is_in_module (lazyPLT, module->loadaddr.map))
break;
if (!module)
{
/* No module was found. This shouldn't ever happen. */
_dl_dprintf (2, "%s: internal error in lazy relocation\n", _dl_progname);
_dl_exit(1);
}
}
/* We now know the proper values for the parameters. */
tpnt = module;
reloc_entry = ((int*)lazyPLT)[-1];
#endif
rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL];
@ -64,32 +129,62 @@ unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry)
strtab = (char *)tpnt->dynamic_info[DT_STRTAB];
symname = strtab + symtab[symtab_index].st_name;
#ifdef __SH_FDPIC__
/* Address of GOT entry fix up */
got_addr = (struct funcdesc_value *)
DL_RELOC_ADDR (tpnt->loadaddr, this_reloc->r_offset);
#else
/* Address of jump instruction to fix up */
instr_addr = (unsigned long) (this_reloc->r_offset + tpnt->loadaddr);
got_addr = (char **) instr_addr;
#endif
/* Get the address of the GOT entry */
new_addr = _dl_find_hash(symname, tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT);
new_addr = _dl_lookup_hash(symname, tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT
#if __SH_FDPIC__
, &new_tpnt
#endif
);
if (unlikely(!new_addr)) {
_dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname);
_dl_exit(1);
}
#ifdef __SH_FDPIC__
funcval.entry_point = new_addr;
funcval.got_value = new_tpnt->loadaddr.got_value;
#endif
#if defined (__SUPPORT_LD_DEBUG__)
if ((unsigned long) got_addr < 0x20000000) {
if (_dl_debug_bindings) {
_dl_dprintf(_dl_debug_file, "\nresolve function: %s", symname);
if (_dl_debug_detail) _dl_dprintf(_dl_debug_file,
#ifdef __SH_FDPIC__
"\n\tpatched (%x,%x) ==> (%x,%x) @ %x\n",
got_addr->entry_point, got_addr->got_value,
funcval.entry_point, funcval.got_value,
got_addr);
#else
"\n\tpatched %x ==> %x @ %x\n", *got_addr, new_addr, got_addr);
#endif
}
}
if (!_dl_debug_nofixups)
*got_addr = new_addr;
#endif
#ifdef __SH_FDPIC__
/* To deal with a race condtion, the GOT value must be written first. */
got_addr->got_value = funcval.got_value;
got_addr->entry_point = funcval.entry_point;
#else
*got_addr = new_addr;
#endif
#ifdef __SH_FDPIC__
return got_addr;
#else
return (unsigned long) new_addr;
#endif
}
@ -158,16 +253,30 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope,
#if defined (__SUPPORT_LD_DEBUG__)
unsigned long old_val;
#endif
#ifdef __SH_FDPIC__
struct funcdesc_value funcval;
struct elf_resolve *symbol_tpnt;
#endif
reloc_addr = (unsigned long *)(intptr_t) (tpnt->loadaddr + (unsigned long) rpnt->r_offset);
reloc_addr = (unsigned long *) DL_RELOC_ADDR(tpnt->loadaddr, rpnt->r_offset);
reloc_type = ELF32_R_TYPE(rpnt->r_info);
symtab_index = ELF32_R_SYM(rpnt->r_info);
symbol_addr = 0;
symname = strtab + symtab[symtab_index].st_name;
#ifdef __SH_FDPIC__
if (ELF_ST_BIND (symtab[symtab_index].st_info) == STB_LOCAL) {
symbol_addr = (unsigned long) DL_RELOC_ADDR(tpnt->loadaddr, symtab[symtab_index].st_value);
symbol_tpnt = tpnt;
} else
#endif
if (symtab_index) {
symbol_addr = (unsigned long) _dl_find_hash(symname, scope, tpnt,
elf_machine_type_class(reloc_type));
symbol_addr = (unsigned long) _dl_lookup_hash(symname, scope, tpnt,
elf_machine_type_class(reloc_type)
#ifdef __SH_FDPIC__
, &symbol_tpnt
#endif
);
/*
* We want to allow undefined references to weak symbols - this might
@ -185,6 +294,10 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope,
return 1;
}
}
#ifdef __SH_FDPIC__
else
symbol_tpnt = tpnt;
#endif
#if defined (__SUPPORT_LD_DEBUG__)
old_val = *reloc_addr;
@ -213,8 +326,34 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope,
(unsigned long) reloc_addr;
break;
case R_SH_RELATIVE:
*reloc_addr = (unsigned long) tpnt->loadaddr + rpnt->r_addend;
*reloc_addr = (unsigned long) DL_RELOC_ADDR (tpnt->loadaddr, rpnt->r_addend);
break;
#ifdef __SH_FDPIC__
case R_SH_FUNCDESC_VALUE:
funcval.entry_point = (void*)symbol_addr;
/* The addend of FUNCDESC_VALUE
relocations referencing global
symbols must be ignored, because it
may hold the address of a lazy PLT
entry. */
if (ELF_ST_BIND(symtab[symtab_index].st_info) == STB_LOCAL)
funcval.entry_point += *reloc_addr;
if (symbol_addr)
funcval.got_value
= symbol_tpnt->loadaddr.got_value;
else
funcval.got_value = 0;
*(struct funcdesc_value *)reloc_addr = funcval;
break;
case R_SH_FUNCDESC:
if (symbol_addr)
*reloc_addr = (unsigned long)_dl_funcdesc_for(
(char *)symbol_addr + *reloc_addr,
symbol_tpnt->loadaddr.got_value);
else
*reloc_addr = 0;
break;
#endif
default:
return -1;
@ -233,7 +372,12 @@ _dl_do_lazy_reloc (struct elf_resolve *tpnt, struct dyn_elf *scope,
ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab)
{
int reloc_type;
#ifdef __SH_FDPIC__
struct funcdesc_value volatile *reloc_addr;
struct funcdesc_value funcval;
#else
unsigned long *reloc_addr;
#endif
#if defined (__SUPPORT_LD_DEBUG__)
unsigned long old_val;
#endif
@ -241,24 +385,43 @@ _dl_do_lazy_reloc (struct elf_resolve *tpnt, struct dyn_elf *scope,
(void)symtab;
(void)strtab;
reloc_addr = (unsigned long *)(intptr_t) (tpnt->loadaddr + (unsigned long) rpnt->r_offset);
reloc_addr = DL_RELOC_ADDR (tpnt->loadaddr, rpnt->r_offset);
reloc_type = ELF32_R_TYPE(rpnt->r_info);
#if defined (__SUPPORT_LD_DEBUG__)
#ifdef __SH_FDPIC__
old_val = (unsigned long)reloc_addr->entry_point;
#else
old_val = *reloc_addr;
#endif
#endif
switch (reloc_type) {
case R_SH_NONE:
break;
case R_SH_JMP_SLOT:
*reloc_addr += (unsigned long) tpnt->loadaddr;
#ifdef __SH_FDPIC__
case R_SH_FUNCDESC_VALUE:
funcval = *reloc_addr;
funcval.entry_point = DL_RELOC_ADDR(tpnt->loadaddr, funcval.entry_point);
funcval.got_value = tpnt->loadaddr.got_value;
*reloc_addr = funcval;
break;
#else
case R_SH_JMP_SLOT:
*reloc_addr = (unsigned long) DL_RELOC_ADDR (tpnt->loadaddr, *reloc_addr);
break;
#endif
default:
return -1;
}
#if defined (__SUPPORT_LD_DEBUG__)
if (_dl_debug_reloc && _dl_debug_detail)
_dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x", old_val, *reloc_addr, reloc_addr);
_dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x", old_val,
#ifdef __SH_FDPIC__
reloc_addr->entry_point,
#else
*reloc_addr,
#endif
reloc_addr);
#endif
return 0;
@ -275,3 +438,7 @@ int _dl_parse_relocation_information(struct dyn_elf *rpnt,
{
return _dl_parse(rpnt->dyn, rpnt->dyn->symbol_scope, rel_addr, rel_size, _dl_do_reloc);
}
#if defined __SH_FDPIC__ && !defined IS_IN_libdl
# include "../../libc/sysdeps/linux/sh/crtreloc.c"
#endif

View File

@ -15,14 +15,14 @@ _dl_linux_resolve:
mov.l r6, @-r15
mov.l r7, @-r15
mov.l r12, @-r15
movt r3 ! Save T flag
mov.l r3, @-r15
movt r4 ! Save T flag
mov.l r4, @-r15
#ifdef HAVE_FPU
sts.l fpscr, @-r15
mov #8,r3
swap.w r3, r3
lds r3, fpscr
mov #8,r4
swap.w r4, r4
lds r4, fpscr
fmov.s fr11, @-r15
fmov.s fr10, @-r15
fmov.s fr9, @-r15
@ -33,6 +33,31 @@ _dl_linux_resolve:
fmov.s fr4, @-r15
#endif
sts.l pr, @-r15
#ifdef __SH_FDPIC__
/* On entry,
R0 the address of the resolver itself (this function)
R3 the GOT address (FDPIC value) for the resolver's GOT
R1 the address of the lazy PLT entry being resolved
R12 the GOT address for the caller's GOT
or sometimes for the called function's GOT
The uncertainty in R12 is due to a race condition when overwriting
the function descriptor. This code should not rely on R12.
Since we do not confidently know the value of the arguments to
_dl_linux_resolver, we pass the information we do have, and let it
work it out for itself ... it's easier to do this in C. */
mov.l @(8,r12), r4 ! load map
mov r1, r5 ! Lazy PLT stub
mov.l .L_dl_linux_resolver, r0
mov.l @(r0, r3), r6
mov.l @(0,r6), r7
jsr @r7
mov.l @(4,r6), r12
#else
/* Note - The PLT entries have been "optimised" not to use r2. r2 is used by
GCC to return the address of large structures, so it should not be
corrupted here. This does mean however, that those PLTs does not conform
@ -55,13 +80,15 @@ _dl_linux_resolve:
mov r2, r0 ! link map address in r2 (SH PIC ABI)
1:
mov r0, r4 ! link map address in r0 (GNUs PLT)
mova .LG, r0
mov.l .LG, r5
add r5, r0
mov.l 3f, r5
mov.l .L_dl_linux_resolver, r5
mov.l @(r0, r5),r5
jsr @r5
mov r1, r5 ! Reloc offset
#endif
lds.l @r15+, pr ! Get register content back
@ -85,14 +112,23 @@ _dl_linux_resolve:
mov.l @r15+, r5
mov.l @r15+, r4
mov.l @r15+, r3
#ifdef __SH_FDPIC__
mov.l @(4,r0), r12
mov.l @(0,r0), r0
#endif
jmp @r0 ! Jump to function address
mov.l @r15+, r2 ! see Note2 above
.balign 4
3:
#ifdef __SH_FDPIC__
.L_dl_linux_resolver:
.long _dl_linux_resolver@GOTFUNCDESC
#else
.L_dl_linux_resolver:
.long _dl_linux_resolver@GOT
.LG:
.long _GLOBAL_OFFSET_TABLE_
#endif
.size _dl_linux_resolve, . - _dl_linux_resolve

View File

@ -30,6 +30,7 @@
*/
#include <sys/mman.h>
#include <ldso.h>
#include <stdio.h>
#include <string.h> /* Needed for 'strstr' prototype' */

View File

@ -29,7 +29,11 @@ __dl_iterate_phdr (int (*callback) (struct dl_phdr_info *info, size_t size, void
int ret = 0;
for (l = _dl_loaded_modules; l != NULL; l = l->next) {
#ifdef __FDPIC__
info.dlpi_addr = (struct elf32_fdpic_loadaddr) {0, 0};
#else
info.dlpi_addr = l->loadaddr;
#endif
info.dlpi_name = l->libname;
info.dlpi_phdr = l->ppnt;
info.dlpi_phnum = l->n_phent;
@ -60,7 +64,11 @@ dl_iterate_phdr (int (*callback) (struct dl_phdr_info *info,
/* This entry describes this statically-linked program itself. */
struct dl_phdr_info info;
int ret;
#ifdef __FDPIC__
info.dlpi_addr = (struct elf32_fdpic_loadaddr) { 0, 0 };
#else
info.dlpi_addr = 0;
#endif
info.dlpi_name = "";
info.dlpi_phdr = _dl_phdr;
info.dlpi_phnum = _dl_phnum;

View File

@ -6,8 +6,13 @@
# Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
#
ifeq ($(UCLIBC_FORMAT_FDPIC_ELF),y)
FDPIC_CSRC=crtreloc.c
endif
CSRC := \
mmap.c pipe.c __init_brk.c brk.c sbrk.c syscall.c pread_write.c
mmap.c pipe.c __init_brk.c brk.c sbrk.c syscall.c pread_write.c \
$(FDPIC_CSRC)
SSRC := setjmp.S __longjmp.S vfork.S clone.S ___fpscr_values.S

View File

@ -0,0 +1,115 @@
/* Copyright 2003, 2004 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
In addition to the permissions in the GNU Lesser General Public
License, the Free Software Foundation gives you unlimited
permission to link the compiled version of this file with other
programs, and to distribute those programs without any restriction
coming from the use of this file. (The GNU Lesser General Public
License restrictions do apply in other respects; for example, they
cover modification of the file, and distribution when not linked
into another program.)
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If
not, write to the Free Software Foundation, Inc., 675 Mass Ave,
Cambridge, MA 02139, USA. */
#ifndef _BITS_ELF_FDPIC_H
#define _BITS_ELF_FDPIC_H
/* These data structures are described in the FDPIC ABI extension.
The kernel passes a process a memory map, such that for every LOAD
segment there is an elf32_fdpic_loadseg entry. A pointer to an
elf32_fdpic_loadmap is passed in R8 at start-up, and a pointer to
an additional such map is passed in R9 for the interpreter, when
there is one. */
#include <elf.h>
/* This data structure represents a PT_LOAD segment. */
struct elf32_fdpic_loadseg
{
/* Core address to which the segment is mapped. */
Elf32_Addr addr;
/* VMA recorded in the program header. */
Elf32_Addr p_vaddr;
/* Size of this segment in memory. */
Elf32_Word p_memsz;
};
struct elf32_fdpic_loadmap {
/* Protocol version number, must be zero. */
Elf32_Half version;
/* Number of segments in this map. */
Elf32_Half nsegs;
/* The actual memory map. */
struct elf32_fdpic_loadseg segs[/*nsegs*/];
};
struct elf32_fdpic_loadaddr {
struct elf32_fdpic_loadmap *map;
void *got_value;
};
/* Map a pointer's VMA to its corresponding address according to the
load map. */
static __always_inline void *
__reloc_pointer (void *p,
const struct elf32_fdpic_loadmap *map)
{
int c;
#if 0
if (map->version != 0)
/* Crash. */
((void(*)())0)();
#endif
/* No special provision is made for NULL. We don't want NULL
addresses to go through relocation, so they shouldn't be in
.rofixup sections, and, if they're present in dynamic
relocations, they shall be mapped to the NULL address without
undergoing relocations. */
for (c = 0;
/* Take advantage of the fact that the loadmap is ordered by
virtual addresses. In general there will only be 2 entries,
so it's not profitable to do a binary search. */
c < map->nsegs && p >= (void*)map->segs[c].p_vaddr;
c++)
{
/* This should be computed as part of the pointer comparison
above, but we want to use the carry in the comparison, so we
can't convert it to an integer type beforehand. */
unsigned long offset = p - (void*)map->segs[c].p_vaddr;
/* We only check for one-past-the-end for the last segment,
assumed to be the data segment, because other cases are
ambiguous in the absence of padding between segments, and
rofixup already serves as padding between text and data.
Unfortunately, unless we special-case the last segment, we
fail to relocate the _end symbol. */
if (offset < map->segs[c].p_memsz
|| (offset == map->segs[c].p_memsz && c + 1 == map->nsegs))
return (char*)map->segs[c].addr + offset;
}
/* We might want to crash instead. */
return (void*)-1;
}
# define __RELOC_POINTER(ptr, loadaddr) \
(__reloc_pointer ((void*)(ptr), \
(loadaddr).map))
#endif /* _BITS_ELF_FDPIC_H */

View File

@ -26,7 +26,9 @@
#include <bits/sysnum.h>
#ifdef __HAVE_SHARED__
#ifdef __SH_FDPIC__
#define PLTJMP(_x) _x@PLT-.
#elif defined __HAVE_SHARED__
#define PLTJMP(_x) _x@PLT
#else
#define PLTJMP(_x) _x
@ -94,12 +96,23 @@ clone:
3:
/* thread starts */
mov.l @r15, r1
#ifdef __SH_FDPIC__
/* r1 contains a function descriptor. */
mov.l @(4,r1), r12
mov.l @r1, r1
#endif
jsr @r1
mov.l @(4,r15), r4
/* we are done, passing the return value through r0 */
mov.l .L1, r1
#ifdef __HAVE_SHARED__
#ifdef __SH_FDPIC__
mov r0, r4
mov.l .L1, r0
add r0, r1
jmp @r1
nop
#elif defined __HAVE_SHARED__
mov.l r12, @-r15
sts.l pr, @-r15
mov r0, r4

View File

@ -28,6 +28,7 @@
This is how the dynamic linker arranges to have DT_FINI
functions called for shared libraries that have been loaded
before this code runs.
For FDPIC, this will be a function descriptor.
sp The stack contains the arguments and environment:
0(sp) argc
@ -37,6 +38,12 @@
(4*(argc+1))(sp) envp[0]
...
NULL
FDPIC only:
r8 Contains the address of the load map.
The type is "const struct elf32_fdpic_loadmap *".
See sh/bits/elf-fdpic.h.
*/
#include <features.h>
@ -49,6 +56,89 @@ _start:
/* Clear the frame pointer since this is the outermost frame. */
mov #0, r14
#ifdef __ARCH_HAS_NO_MMU__
/* ucLinux doesn't seem to provide a valid loader finalization
function pointer. */
mov #0, r4
#endif
#ifdef __SH_FDPIC__
mov r4, r10 /* move to callee save */
/* GOT = __self_reloc (load_map, rofixup, rofixupend) */
mov r8, r4
mova L_rofixup, r0
mov.l L_rofixup, r5
add r0, r5
mova L_rofixupend, r0
mov.l L_rofixupend, r6
add r0, r6
mova L_self_reloc, r0
mov.l L_self_reloc, r1
add r0, r1
jsr @r1
nop
mov r0, r12
/* r12 now contains the GOT/FDPIC address. */
#ifdef __SH2A_DOUBLE__
/* The SH2A kernel doesn't initialize the FPSCR to double precision. */
mov r12, r13
#ifdef __FMOVD_ENABLED__
movi20s #0x180000, r4
#else
movi20s #0x80000, r4
#endif
mov.l L_set_fpscr, r0
mov.l @(r0,r12), r1
mov.l @(0,r1), r0
jsr @r0
mov.l @(4,r1), r12
mov r13, r12
#endif
/* Pop argc off the stack and save a pointer to argv */
mov.l @r15+,r5
mov r15, r6
/* In FDPIC, the GOT register is caller save. */
mov.l r12, @-r15
/* Push the stack_end, rtld_fini and fini func onto the stack */
mov.l r6,@-r15
mov.l r10,@-r15
mov.l L_fini,r0
add r12, r0
mov.l r0,@-r15
/* Set up the main/init funcs that go in registers */
mov.l L_main, r4
add r12, r4
mov.l L_init, r7
add r12, r7
/* __uClibc_main (main, argc, argv, init, fini, rtld_fini, stack_end) */
/* Let the libc call main and exit with its return code. */
mov.l L_uClibc_main,r0
mov.l @(r0,r12), r0
mov.l @(0,r0),r1
jsr @r1
mov.l @(4,r0), r12
/* We should not get here. */
add #12, r15 /* Pop parameters from stack. */
mov.l @r15+, r12
mov.l L_abort,r0
mov.l @(r0,r12), r0
mov.l @(0,r0),r1
jsr @r1
mov.l @(4,r0), r12
#elif defined __PIC__
/* Pop argc off the stack and save a pointer to argv */
mov.l @r15+,r5
mov r15, r6
@ -57,7 +147,6 @@ _start:
mov.l r6,@-r15
mov.l r4,@-r15
#ifdef __PIC__
mova L_got, r0
mov.l L_got, r12
add r0, r12
@ -85,6 +174,14 @@ _start:
jsr @r1
nop
#else
/* Pop argc off the stack and save a pointer to argv */
mov.l @r15+,r5
mov r15, r6
/* Push the stack_end, rtld_fini and fini func onto the stack */
mov.l r6,@-r15
mov.l r4,@-r15
mov.l L_fini,r0
mov.l r0,@-r15
@ -107,7 +204,32 @@ _start:
.size _start,.-_start
.align 2
#ifdef __PIC__
#ifdef __SH_FDPIC__
/* These are used before the FDPIC register is set, so must be relative.
The functions must be implemented without using r12. */
L_self_reloc:
.long __self_reloc@PCREL
L_rofixup:
.long __ROFIXUP_LIST__@PCREL
L_rofixupend:
.long __ROFIXUP_END__@PCREL
/* These must use FDPIC relocations. */
#ifdef __SH2A_DOUBLE__
L_set_fpscr:
.long __set_fpscr@GOTFUNCDESC
#endif
L_main:
.long main@GOTOFFFUNCDESC
L_init:
.long _init@GOTOFFFUNCDESC
L_fini:
.long _fini@GOTOFFFUNCDESC
L_uClibc_main:
.long __uClibc_main@GOTFUNCDESC
L_abort:
.long abort@GOTFUNCDESC
#elif defined __PIC__
L_got:
.long _GLOBAL_OFFSET_TABLE_
L_main:

View File

@ -0,0 +1,141 @@
/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
written by Alexandre Oliva <aoliva@redhat.com>
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
In addition to the permissions in the GNU Lesser General Public
License, the Free Software Foundation gives you unlimited
permission to link the compiled version of this file with other
programs, and to distribute those programs without any restriction
coming from the use of this file. (The GNU Lesser General Public
License restrictions do apply in other respects; for example, they
cover modification of the file, and distribution when not linked
into another program.)
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If
not, write to the Free Software Foundation, Inc., 675 Mass Ave,
Cambridge, MA 02139, USA. */
#include <sys/types.h>
#include <link.h>
#ifdef __LITTLE_ENDIAN__
#define BYTE(n) (n)
#else
#define BYTE(n) (3-(n))
#endif
/* This file is to be compiled into crt object files, to enable
executables to easily self-relocate. */
static __always_inline int
unaligned_read (int *src)
{
unsigned char *bytes = (unsigned char *)src;
return (bytes[BYTE(0)]
| (bytes[BYTE(1)] << 8)
| (bytes[BYTE(2)] << 16)
| (bytes[BYTE(3)] << 24));
}
static __always_inline void
unaligned_write (int *dest, int val)
{
char *bytes = (char *)dest;
bytes[BYTE(0)] = val & 0xff;
bytes[BYTE(1)] = (val >> 8) & 0xff;
bytes[BYTE(2)] = (val >> 16) & 0xff;
bytes[BYTE(3)] = (val >> 24) & 0xff;
}
/* Compute the runtime address of pointer in the range [p,e), and then
map the pointer pointed by it. */
static __always_inline void ***
reloc_range_indirect (void ***p, void ***e,
const struct elf32_fdpic_loadmap *map)
{
while (p < e)
{
void *ptr = __reloc_pointer (*p, map);
if (ptr)
{
void *pt = (void *)unaligned_read (ptr);
pt = __reloc_pointer (pt, map);
unaligned_write (ptr, (int)pt);
}
p++;
}
return p;
}
/* Call __reloc_range_indirect for the given range except for the last
entry, whose contents are only relocated. It's expected to hold
the GOT value. */
void* attribute_hidden
__self_reloc (const struct elf32_fdpic_loadmap *map,
void ***p, void ***e)
{
p = reloc_range_indirect (p, e-1, map);
if (p >= e)
return (void*)-1;
return __reloc_pointer (*p, map);
}
#if 0
/* These are other functions that might be useful, but that we don't
need. */
/* Remap pointers in [p,e). */
static __always_inline void**
reloc_range (void **p, void **e,
const struct elf32_fdpic_loadmap *map)
{
while (p < e)
{
*p = __reloc_pointer (*p, map);
p++;
}
return p;
}
/* Remap p, adjust e by the same offset, then map the pointers in the
range determined by them. */
void attribute_hidden
__reloc_range (const struct elf32_fdpic_loadmap *map,
void **p, void **e)
{
void **old = p;
p = __reloc_pointer (p, map);
e += p - old;
reloc_range (p, e, map);
}
/* Remap p, adjust e by the same offset, then map pointers referenced
by the (unadjusted) pointers in the range. Return the relocated
value of the last pointer in the range. */
void* attribute_hidden
__reloc_range_indirect (const struct elf32_fdpic_loadmap *map,
void ***p, void ***e)
{
void ***old = p;
p = __reloc_pointer (p, map);
e += p - old;
return reloc_range_indirect (p, e, map);
}
#endif

View File

@ -77,7 +77,17 @@ __sigsetjmp_intern:
mov.l r9, @-r4
mov.l r8, @-r4
#ifdef __HAVE_SHARED__
#ifdef __SH_FDPIC__
/* Make a tail call to __sigjmp_save; it takes the same args. */
mov.l .L1, r0
mov.l @(r0,r12), r0
mov.l @(0,r0), r2
mov.l @(4,r0), r12
jmp @r2
mov r1, r0
.align 2
.L1: .long __sigjmp_save@GOTFUNCDESC
#elif defined __HAVE_SHARED__
mov.l .LG, r2
mova .LG, r0
add r0, r2

View File

@ -0,0 +1,176 @@
/* `ptrace' debugger support interface. Linux version.
Copyright (C) 1996-1999,2000,2006,2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#ifndef _SYS_PTRACE_H
#define _SYS_PTRACE_H 1
#include <features.h>
__BEGIN_DECLS
/* Type of the REQUEST argument to `ptrace.' */
enum __ptrace_request
{
/* Indicate that the process making this request should be traced.
All signals received by this process can be intercepted by its
parent, and its parent can use the other `ptrace' requests. */
PTRACE_TRACEME = 0,
#define PT_TRACE_ME PTRACE_TRACEME
/* Return the word in the process's text space at address ADDR. */
PTRACE_PEEKTEXT = 1,
#define PT_READ_I PTRACE_PEEKTEXT
/* Return the word in the process's data space at address ADDR. */
PTRACE_PEEKDATA = 2,
#define PT_READ_D PTRACE_PEEKDATA
/* Return the word in the process's user area at offset ADDR. */
PTRACE_PEEKUSER = 3,
#define PT_READ_U PTRACE_PEEKUSER
/* Write the word DATA into the process's text space at address ADDR. */
PTRACE_POKETEXT = 4,
#define PT_WRITE_I PTRACE_POKETEXT
/* Write the word DATA into the process's data space at address ADDR. */
PTRACE_POKEDATA = 5,
#define PT_WRITE_D PTRACE_POKEDATA
/* Write the word DATA into the process's user area at offset ADDR. */
PTRACE_POKEUSER = 6,
#define PT_WRITE_U PTRACE_POKEUSER
/* Continue the process. */
PTRACE_CONT = 7,
#define PT_CONTINUE PTRACE_CONT
/* Kill the process. */
PTRACE_KILL = 8,
#define PT_KILL PTRACE_KILL
/* Single step the process.
This is not supported on all machines. */
PTRACE_SINGLESTEP = 9,
#define PT_STEP PTRACE_SINGLESTEP
/* Get all general purpose registers used by a processes.
This is not supported on all machines. */
PTRACE_GETREGS = 12,
#define PT_GETREGS PTRACE_GETREGS
/* Set all general purpose registers used by a processes.
This is not supported on all machines. */
PTRACE_SETREGS = 13,
#define PT_SETREGS PTRACE_SETREGS
/* Get all floating point registers used by a processes.
This is not supported on all machines. */
PTRACE_GETFPREGS = 14,
#define PT_GETFPREGS PTRACE_GETFPREGS
/* Set all floating point registers used by a processes.
This is not supported on all machines. */
PTRACE_SETFPREGS = 15,
#define PT_SETFPREGS PTRACE_SETFPREGS
/* Attach to a process that is already running. */
PTRACE_ATTACH = 16,
#define PT_ATTACH PTRACE_ATTACH
/* Detach from a process attached to with PTRACE_ATTACH. */
PTRACE_DETACH = 17,
#define PT_DETACH PTRACE_DETACH
/* Get all extended floating point registers used by a processes.
This is not supported on all machines. */
PTRACE_GETFPXREGS = 18,
#define PT_GETFPXREGS PTRACE_GETFPXREGS
/* Set all extended floating point registers used by a processes.
This is not supported on all machines. */
PTRACE_SETFPXREGS = 19,
#define PT_SETFPXREGS PTRACE_SETFPXREGS
/* Continue and stop at the next (return from) syscall. */
PTRACE_SYSCALL = 24,
#define PT_SYSCALL PTRACE_SYSCALL
/* Obtain the load map of the main program or the interpreter of the
ptraced process, depending on whether the addr argument is
(void*)0 or (void*)1, respectively. */
PTRACE_GETFDPIC = 31,
#define PT_GETFDPIC PTRACE_GETFDPIC
/* Set ptrace filter options. */
PTRACE_SETOPTIONS = 0x4200,
#define PT_SETOPTIONS PTRACE_SETOPTIONS
/* Get last ptrace message. */
PTRACE_GETEVENTMSG = 0x4201,
#define PT_GETEVENTMSG PTRACE_GETEVENTMSG
/* Get siginfo for process. */
PTRACE_GETSIGINFO = 0x4202,
#define PT_GETSIGINFO PTRACE_GETSIGINFO
/* Set new siginfo for process. */
PTRACE_SETSIGINFO = 0x4203
#define PT_SETSIGINFO PTRACE_SETSIGINFO
};
#define PTRACE_GETFDPIC_EXEC ((void*)0) /* [addr] request the executable loadmap */
#define PTRACE_GETFDPIC_INTERP ((void*)1) /* [addr] request the interpreter loadmap */
/* Options set using PTRACE_SETOPTIONS. */
enum __ptrace_setoptions {
PTRACE_O_TRACESYSGOOD = 0x00000001,
PTRACE_O_TRACEFORK = 0x00000002,
PTRACE_O_TRACEVFORK = 0x00000004,
PTRACE_O_TRACECLONE = 0x00000008,
PTRACE_O_TRACEEXEC = 0x00000010,
PTRACE_O_TRACEVFORKDONE = 0x00000020,
PTRACE_O_TRACEEXIT = 0x00000040,
PTRACE_O_MASK = 0x0000007f
};
/* Wait extended result codes for the above trace options. */
enum __ptrace_eventcodes {
PTRACE_EVENT_FORK = 1,
PTRACE_EVENT_VFORK = 2,
PTRACE_EVENT_CLONE = 3,
PTRACE_EVENT_EXEC = 4,
PTRACE_EVENT_VFORK_DONE = 5,
PTRACE_EVENT_EXIT = 6
};
/* Perform process tracing functions. REQUEST is one of the values
above, and determines the action to be taken.
For all requests except PTRACE_TRACEME, PID specifies the process to be
traced.
PID and the other arguments described above for the various requests should
appear (those that are used for the particular request) as:
pid_t PID, void *ADDR, int DATA, void *ADDR2
after REQUEST. */
extern long int ptrace (enum __ptrace_request __request, ...) __THROW;
__END_DECLS
#endif /* _SYS_PTRACE_H */

View File

@ -1,6 +1,19 @@
.align 4
__syscall_error:
/* Call errno_location, store '-r4' in errno and return -1 */
#ifdef __SH_FDPIC__
neg r4, r4
sts.l pr, @-r15
mov.l r4, @-r15
mov.l 1f, r0
mov.l @(r0,r12), r0
mov.l @(0,r0), r1
jsr @r1
mov.l @(4,r0), r12
mov.l @r15+, r4
lds.l @r15+, pr
mov.l r4, @r0
#else
mov.l r12, @-r15
sts.l pr, @-r15
#ifdef __HAVE_SHARED__
@ -20,6 +33,7 @@ __syscall_error:
mov.l r12, @r0
lds.l @r15+, pr
mov.l @r15+,r12
#endif
/* And just kick back a -1. */
rts
@ -27,7 +41,9 @@ __syscall_error:
.align 4
#ifdef __HAVE_SHARED__
#ifdef __SH_FDPIC__
1: .long __errno_location@GOTFUNCDESC
#elif defined __HAVE_SHARED__
1: .long __errno_location@GOT
.LG: .long _GLOBAL_OFFSET_TABLE_
#else

View File

@ -54,26 +54,35 @@ __asm__ ("\n\
_init:\n\
mov.l r12,@-r15\n\
mov.l r14,@-r15\n\
sts.l pr,@-r15\n\
mova .L22,r0\n\
sts.l pr,@-r15\n"
#ifndef __SH_FDPIC__
" mova .L22,r0\n\
mov.l .L22,r12\n\
add r0,r12\n\
mova .L24,r0\n\
add r0,r12\n"
#endif
" mova .L24,r0\n\
mov.l .L24,r1\n\
add r0,r1\n\
jsr @r1\n\
nop\n\
mova .L23,r0\n\
nop\n"
#ifdef __SH_FDPIC__
/* r12 is caller save in FDPIC, so it must be reloaded
if the next PLT call is to work. */
" mov.l @(8,r15), r12\n"
#endif
" mova .L23,r0\n\
mov.l .L23,r1\n\
add r0,r1\n\
jsr @r1\n\
mov r15,r14\n\
bra 1f\n\
nop\n\
.align 2\n\
.L22:\n\
.long _GLOBAL_OFFSET_TABLE_\n\
.L23:\n\
.align 2\n"
#ifndef __SH_FDPIC__
".L22:\n\
.long _GLOBAL_OFFSET_TABLE_\n"
#endif
".L23:\n\
.long __gmon_start__@PLT\n\
.L24:\n\
.long __pthread_initialize_minimal@PLT\n\
@ -112,19 +121,23 @@ __gmon_start__:\n\
_fini:\n\
mov.l r12,@-r15\n\
mov.l r14,@-r15\n\
sts.l pr,@-r15\n\
mova .L27,r0\n\
sts.l pr,@-r15\n"
#ifndef __SH_FDPIC__
" mova .L27,r0\n\
mov.l .L27,r12\n\
add r0,r12\n\
mov r15,r14\n\
add r0,r12\n"
#endif
" mov r15,r14\n\
ALIGN\n\
END_FINI\n\
bra 1f\n\
nop\n\
.align 2\n\
.L27:\n\
.long _GLOBAL_OFFSET_TABLE_\n\
1:\n\
.align 2\n"
#ifndef __SH_FDPIC__
".L27:\n\
.long _GLOBAL_OFFSET_TABLE_\n"
#endif
"1:\n\
/*@_fini_PROLOG_ENDS*/\n\
\n\
/*@_fini_EPILOG_BEGINS*/\n\

View File

@ -27,7 +27,10 @@
and the process ID of the new process to the old process. */
ENTRY (__vfork)
#ifdef SHARED
#ifdef __SH_FDPIC__
mov.l .Lpthread_func, r0
mov.l @(r12, r0), r0
#elif defined SHARED
mov.l .Lgot, r1
mova .Lgot, r0
add r0, r1
@ -53,7 +56,10 @@ ENTRY (__vfork)
nop
.L1: .word __NR_vfork
.align 2
#ifdef SHARED
#ifdef __SH_FDPIC__
.Lthread_func:
.long __libc_pthread_functions@GOTOFFFUNCDESC
#elif defined SHARED
.Lgot:
.long _GLOBAL_OFFSET_TABLE_
.Lpthread_func: