mirror of
https://git.busybox.net/uClibc
synced 2025-10-14 01:32:00 +08:00
use uniform form of C99 keywords
Signed-off-by: Bernhard Reutner-Fischer <rep.dot.nop@gmail.com>
This commit is contained in:
@@ -14,8 +14,8 @@
|
||||
#include <string.h>
|
||||
|
||||
#define FPSCR_SR (1 << 20)
|
||||
#define STORE_FPSCR(x) __asm__ volatile("sts fpscr, %0" : "=r"(x))
|
||||
#define LOAD_FPSCR(x) __asm__ volatile("lds %0, fpscr" : : "r"(x))
|
||||
#define STORE_FPSCR(x) __asm__ __volatile__("sts fpscr, %0" : "=r"(x))
|
||||
#define LOAD_FPSCR(x) __asm__ __volatile__("lds %0, fpscr" : : "r"(x))
|
||||
|
||||
static void fpu_optimised_copy_fwd(void *dest, const void *src, size_t len)
|
||||
{
|
||||
@@ -51,24 +51,24 @@ static void fpu_optimised_copy_fwd(void *dest, const void *src, size_t len)
|
||||
LOAD_FPSCR(FPSCR_SR);
|
||||
|
||||
while (len >= 32) {
|
||||
__asm__ volatile ("fmov @%0+,dr0":"+r" (s1));
|
||||
__asm__ volatile ("fmov @%0+,dr2":"+r" (s1));
|
||||
__asm__ volatile ("fmov @%0+,dr4":"+r" (s1));
|
||||
__asm__ volatile ("fmov @%0+,dr6":"+r" (s1));
|
||||
__asm__ __volatile__ ("fmov @%0+,dr0":"+r" (s1));
|
||||
__asm__ __volatile__ ("fmov @%0+,dr2":"+r" (s1));
|
||||
__asm__ __volatile__ ("fmov @%0+,dr4":"+r" (s1));
|
||||
__asm__ __volatile__ ("fmov @%0+,dr6":"+r" (s1));
|
||||
__asm__
|
||||
volatile ("fmov dr0,@%0"::"r"
|
||||
__volatile__ ("fmov dr0,@%0"::"r"
|
||||
(d1):"memory");
|
||||
d1 += 2;
|
||||
__asm__
|
||||
volatile ("fmov dr2,@%0"::"r"
|
||||
__volatile__ ("fmov dr2,@%0"::"r"
|
||||
(d1):"memory");
|
||||
d1 += 2;
|
||||
__asm__
|
||||
volatile ("fmov dr4,@%0"::"r"
|
||||
__volatile__ ("fmov dr4,@%0"::"r"
|
||||
(d1):"memory");
|
||||
d1 += 2;
|
||||
__asm__
|
||||
volatile ("fmov dr6,@%0"::"r"
|
||||
__volatile__ ("fmov dr6,@%0"::"r"
|
||||
(d1):"memory");
|
||||
d1 += 2;
|
||||
len -= 32;
|
||||
|
@@ -529,8 +529,8 @@ __inline_mathcodeNP (tanh, __x, \
|
||||
|
||||
__inline_mathcodeNP (floor, __x, \
|
||||
register long double __value; \
|
||||
__volatile unsigned short int __cw; \
|
||||
__volatile unsigned short int __cwtmp; \
|
||||
__volatile__ unsigned short int __cw; \
|
||||
__volatile__ unsigned short int __cwtmp; \
|
||||
__asm__ __volatile__ ("fnstcw %0" : "=m" (__cw)); \
|
||||
__cwtmp = (__cw & 0xf3ff) | 0x0400; /* rounding down */ \
|
||||
__asm__ __volatile__ ("fldcw %0" : : "m" (__cwtmp)); \
|
||||
@@ -540,8 +540,8 @@ __inline_mathcodeNP (floor, __x, \
|
||||
|
||||
__inline_mathcodeNP (ceil, __x, \
|
||||
register long double __value; \
|
||||
__volatile unsigned short int __cw; \
|
||||
__volatile unsigned short int __cwtmp; \
|
||||
__volatile__ unsigned short int __cw; \
|
||||
__volatile__ unsigned short int __cwtmp; \
|
||||
__asm__ __volatile__ ("fnstcw %0" : "=m" (__cw)); \
|
||||
__cwtmp = (__cw & 0xf3ff) | 0x0800; /* rounding up */ \
|
||||
__asm__ __volatile__ ("fldcw %0" : : "m" (__cwtmp)); \
|
||||
|
@@ -54,7 +54,7 @@
|
||||
register long _r15 __asm__ ("r15") = name; \
|
||||
long _retval; \
|
||||
LOAD_REGS_##nr \
|
||||
__asm __volatile (BREAK_INSN (__IA64_BREAK_SYSCALL) \
|
||||
__asm__ __volatile__ (BREAK_INSN (__IA64_BREAK_SYSCALL) \
|
||||
: "=r" (_r8), "=r" (_r10), "=r" (_r15) \
|
||||
ASM_OUTARGS_##nr \
|
||||
: "2" (_r15) ASM_ARGS_##nr \
|
||||
|
@@ -169,7 +169,7 @@ L(syse1):
|
||||
{ \
|
||||
register long __v0 __asm__("$2") ncs_init; \
|
||||
register long __a3 __asm__("$7"); \
|
||||
__asm__ volatile ( \
|
||||
__asm__ __volatile__ ( \
|
||||
".set\tnoreorder\n\t" \
|
||||
cs_init \
|
||||
"syscall\n\t" \
|
||||
@@ -192,7 +192,7 @@ L(syse1):
|
||||
register long __v0 __asm__("$2") ncs_init; \
|
||||
register long __a0 __asm__("$4") = (long) arg1; \
|
||||
register long __a3 __asm__("$7"); \
|
||||
__asm__ volatile ( \
|
||||
__asm__ __volatile__ ( \
|
||||
".set\tnoreorder\n\t" \
|
||||
cs_init \
|
||||
"syscall\n\t" \
|
||||
@@ -216,7 +216,7 @@ L(syse1):
|
||||
register long __a0 __asm__("$4") = (long) arg1; \
|
||||
register long __a1 __asm__("$5") = (long) arg2; \
|
||||
register long __a3 __asm__("$7"); \
|
||||
__asm__ volatile ( \
|
||||
__asm__ __volatile__ ( \
|
||||
".set\tnoreorder\n\t" \
|
||||
cs_init \
|
||||
"syscall\n\t" \
|
||||
@@ -241,7 +241,7 @@ L(syse1):
|
||||
register long __a1 __asm__("$5") = (long) arg2; \
|
||||
register long __a2 __asm__("$6") = (long) arg3; \
|
||||
register long __a3 __asm__("$7"); \
|
||||
__asm__ volatile ( \
|
||||
__asm__ __volatile__ ( \
|
||||
".set\tnoreorder\n\t" \
|
||||
cs_init \
|
||||
"syscall\n\t" \
|
||||
@@ -266,7 +266,7 @@ L(syse1):
|
||||
register long __a1 __asm__("$5") = (long) arg2; \
|
||||
register long __a2 __asm__("$6") = (long) arg3; \
|
||||
register long __a3 __asm__("$7") = (long) arg4; \
|
||||
__asm__ volatile ( \
|
||||
__asm__ __volatile__ ( \
|
||||
".set\tnoreorder\n\t" \
|
||||
cs_init \
|
||||
"syscall\n\t" \
|
||||
@@ -298,7 +298,7 @@ L(syse1):
|
||||
register long __a1 __asm__("$5") = (long) arg2; \
|
||||
register long __a2 __asm__("$6") = (long) arg3; \
|
||||
register long __a3 __asm__("$7") = (long) arg4; \
|
||||
__asm__ volatile ( \
|
||||
__asm__ __volatile__ ( \
|
||||
".set\tnoreorder\n\t" \
|
||||
"subu\t$29, 32\n\t" \
|
||||
"sw\t%6, 16($29)\n\t" \
|
||||
@@ -328,7 +328,7 @@ L(syse1):
|
||||
register long __a1 __asm__("$5") = (long) arg2; \
|
||||
register long __a2 __asm__("$6") = (long) arg3; \
|
||||
register long __a3 __asm__("$7") = (long) arg4; \
|
||||
__asm__ volatile ( \
|
||||
__asm__ __volatile__ ( \
|
||||
".set\tnoreorder\n\t" \
|
||||
"subu\t$29, 32\n\t" \
|
||||
"sw\t%6, 16($29)\n\t" \
|
||||
@@ -359,7 +359,7 @@ L(syse1):
|
||||
register long __a1 __asm__("$5") = (long) arg2; \
|
||||
register long __a2 __asm__("$6") = (long) arg3; \
|
||||
register long __a3 __asm__("$7") = (long) arg4; \
|
||||
__asm__ volatile ( \
|
||||
__asm__ __volatile__ ( \
|
||||
".set\tnoreorder\n\t" \
|
||||
"subu\t$29, 32\n\t" \
|
||||
"sw\t%6, 16($29)\n\t" \
|
||||
|
@@ -341,7 +341,7 @@
|
||||
* So if the build is using -mcpu=[power4,power5,power5+,970] we can
|
||||
* safely use lwsync.
|
||||
*/
|
||||
# define atomic_read_barrier() __asm ("lwsync" ::: "memory")
|
||||
# define atomic_read_barrier() __asm__ ("lwsync" ::: "memory")
|
||||
/*
|
||||
* "light weight" sync can also be used for the release barrier.
|
||||
*/
|
||||
|
@@ -80,7 +80,7 @@ typedef uintmax_t uatomic_max_t;
|
||||
|
||||
#define __arch_compare_and_exchange_n(mem, newval, oldval, bwl, version) \
|
||||
({ signed long __result; \
|
||||
__asm __volatile ("\
|
||||
__asm__ __volatile__ ("\
|
||||
.align 2\n\
|
||||
mova 1f,r0\n\
|
||||
nop\n\
|
||||
@@ -139,7 +139,7 @@ typedef uintmax_t uatomic_max_t;
|
||||
*/
|
||||
|
||||
#define __arch_operate_old_new_n(mem, value, old, new, bwl, oper) \
|
||||
(void) ({ __asm __volatile ("\
|
||||
(void) ({ __asm__ __volatile__ ("\
|
||||
.align 2\n\
|
||||
mova 1f,r0\n\
|
||||
mov r15,r1\n\
|
||||
@@ -185,7 +185,7 @@ typedef uintmax_t uatomic_max_t;
|
||||
|
||||
#define __arch_operate_new_n(mem, value, bwl, oper) \
|
||||
({ int32_t __value = (value), __new; \
|
||||
__asm __volatile ("\
|
||||
__asm__ __volatile__ ("\
|
||||
.align 2\n\
|
||||
mova 1f,r0\n\
|
||||
mov r15,r1\n\
|
||||
|
@@ -119,7 +119,7 @@
|
||||
register long int r3 __asm__ ("%r3") = (name); \
|
||||
SUBSTITUTE_ARGS_##nr(args); \
|
||||
\
|
||||
__asm__ volatile (SYSCALL_INST_STR##nr SYSCALL_INST_PAD \
|
||||
__asm__ __volatile__ (SYSCALL_INST_STR##nr SYSCALL_INST_PAD \
|
||||
: "=z" (resultvar) \
|
||||
: "r" (r3) ASMFMT_##nr \
|
||||
: "memory"); \
|
||||
|
@@ -69,7 +69,7 @@ volatile unsigned char __sparc32_atomic_locks[64]
|
||||
unsigned int __idx = (((long) addr >> 2) ^ ((long) addr >> 12)) \
|
||||
& 63; \
|
||||
do \
|
||||
__asm __volatile ("ldstub %1, %0" \
|
||||
__asm__ __volatile__ ("ldstub %1, %0" \
|
||||
: "=r" (__old_lock), \
|
||||
"=m" (__sparc32_atomic_locks[__idx]) \
|
||||
: "m" (__sparc32_atomic_locks[__idx]) \
|
||||
@@ -83,7 +83,7 @@ volatile unsigned char __sparc32_atomic_locks[64]
|
||||
{ \
|
||||
__sparc32_atomic_locks[(((long) addr >> 2) \
|
||||
^ ((long) addr >> 12)) & 63] = 0; \
|
||||
__asm __volatile ("" ::: "memory"); \
|
||||
__asm__ __volatile__ ("" ::: "memory"); \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
@@ -92,7 +92,7 @@ volatile unsigned char __sparc32_atomic_locks[64]
|
||||
{ \
|
||||
unsigned int __old_lock; \
|
||||
do \
|
||||
__asm __volatile ("ldstub %1, %0" \
|
||||
__asm__ __volatile__ ("ldstub %1, %0" \
|
||||
: "=r" (__old_lock), "=m" (*(addr)) \
|
||||
: "m" (*(addr)) \
|
||||
: "memory"); \
|
||||
@@ -104,7 +104,7 @@ volatile unsigned char __sparc32_atomic_locks[64]
|
||||
do \
|
||||
{ \
|
||||
*(char *) (addr) = 0; \
|
||||
__asm __volatile ("" ::: "memory"); \
|
||||
__asm__ __volatile__ ("" ::: "memory"); \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
@@ -112,14 +112,14 @@ volatile unsigned char __sparc32_atomic_locks[64]
|
||||
#ifndef SHARED
|
||||
# define __v9_compare_and_exchange_val_32_acq(mem, newval, oldval) \
|
||||
({ \
|
||||
register __typeof (*(mem)) __acev_tmp __asm ("%g6"); \
|
||||
register __typeof (mem) __acev_mem __asm ("%g1") = (mem); \
|
||||
register __typeof (*(mem)) __acev_oldval __asm ("%g5"); \
|
||||
register __typeof (*(mem)) __acev_tmp __asm__ ("%g6"); \
|
||||
register __typeof (mem) __acev_mem __asm__ ("%g1") = (mem); \
|
||||
register __typeof (*(mem)) __acev_oldval __asm__ ("%g5"); \
|
||||
__acev_tmp = (newval); \
|
||||
__acev_oldval = (oldval); \
|
||||
/* .word 0xcde05005 is cas [%g1], %g5, %g6. Can't use cas here though, \
|
||||
because as will then mark the object file as V8+ arch. */ \
|
||||
__asm __volatile (".word 0xcde05005" \
|
||||
__asm__ __volatile__ (".word 0xcde05005" \
|
||||
: "+r" (__acev_tmp), "=m" (*__acev_mem) \
|
||||
: "r" (__acev_oldval), "m" (*__acev_mem), \
|
||||
"r" (__acev_mem) : "memory"); \
|
||||
@@ -187,7 +187,7 @@ volatile unsigned char __sparc32_atomic_locks[64]
|
||||
*__acev_memp = __acev_newval; \
|
||||
else \
|
||||
__sparc32_atomic_do_unlock24 (__acev_memp); \
|
||||
__asm __volatile ("" ::: "memory"); \
|
||||
__asm__ __volatile__ ("" ::: "memory"); \
|
||||
__acev_ret; })
|
||||
|
||||
#define __v7_exchange_24_rel(mem, newval) \
|
||||
@@ -198,7 +198,7 @@ volatile unsigned char __sparc32_atomic_locks[64]
|
||||
__sparc32_atomic_do_lock24 (__acev_memp); \
|
||||
__acev_ret = *__acev_memp & 0xffffff; \
|
||||
*__acev_memp = __acev_newval; \
|
||||
__asm __volatile ("" ::: "memory"); \
|
||||
__asm__ __volatile__ ("" ::: "memory"); \
|
||||
__acev_ret; })
|
||||
|
||||
#ifdef SHARED
|
||||
|
@@ -78,7 +78,7 @@
|
||||
register long __o3 __asm__ ("o3") = (long)(arg4); \
|
||||
register long __o4 __asm__ ("o4") = (long)(arg5); \
|
||||
register long __g1 __asm__ ("g1") = __NR_clone; \
|
||||
__asm __volatile (__CLONE_SYSCALL_STRING : \
|
||||
__asm__ __volatile__ (__CLONE_SYSCALL_STRING : \
|
||||
"=r" (__g1), "=r" (__o0), "=r" (__o1) : \
|
||||
"0" (__g1), "1" (__o0), "2" (__o1), \
|
||||
"r" (__o2), "r" (__o3), "r" (__o4) : \
|
||||
|
@@ -66,7 +66,7 @@ long long func##l(long double x) \
|
||||
long double func##l(long double x) \
|
||||
{ \
|
||||
long double st_top; \
|
||||
__asm ( \
|
||||
__asm__ ( \
|
||||
" fldt %1\n" \
|
||||
" fstpl %1\n" \
|
||||
" jmp " __stringify(__GI_##func) "\n" \
|
||||
@@ -79,7 +79,7 @@ long double func##l(long double x) \
|
||||
int func##l(long double x) \
|
||||
{ \
|
||||
int ret; \
|
||||
__asm ( \
|
||||
__asm__ ( \
|
||||
" fldt %1\n" \
|
||||
" fstpl %1\n" \
|
||||
" jmp " __stringify(__GI_##func) "\n" \
|
||||
@@ -92,7 +92,7 @@ int func##l(long double x) \
|
||||
long func##l(long double x) \
|
||||
{ \
|
||||
long ret; \
|
||||
__asm ( \
|
||||
__asm__ ( \
|
||||
" fldt %1\n" \
|
||||
" fstpl %1\n" \
|
||||
" jmp " __stringify(__GI_##func) "\n" \
|
||||
@@ -105,7 +105,7 @@ long func##l(long double x) \
|
||||
long long func##l(long double x) \
|
||||
{ \
|
||||
long long ret; \
|
||||
__asm ( \
|
||||
__asm__ ( \
|
||||
" fldt %1\n" \
|
||||
" fstpl %1\n" \
|
||||
" jmp " __stringify(__GI_##func) "\n" \
|
||||
|
@@ -211,17 +211,17 @@ extern int __kernel_rem_pio2 (double*,double*,int,int,int,const int*) attribu
|
||||
#define math_opt_barrier(x) ({ \
|
||||
__typeof(x) __x = (x); \
|
||||
/* "t": load x into top-of-stack fpreg */ \
|
||||
__asm ("" : "=t" (__x) : "0" (__x)); \
|
||||
__asm__ ("" : "=t" (__x) : "0" (__x)); \
|
||||
__x; \
|
||||
})
|
||||
#define math_force_eval(x) do { \
|
||||
__typeof(x) __x = (x); \
|
||||
if (sizeof(__x) <= sizeof(double)) \
|
||||
/* "m": store x into a memory location */ \
|
||||
__asm __volatile ("" : : "m" (__x)); \
|
||||
__asm__ __volatile__ ("" : : "m" (__x)); \
|
||||
else /* long double */ \
|
||||
/* "f": load x into (any) fpreg */ \
|
||||
__asm __volatile ("" : : "f" (__x)); \
|
||||
__asm__ __volatile__ ("" : : "f" (__x)); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
@@ -230,29 +230,29 @@ extern int __kernel_rem_pio2 (double*,double*,int,int,int,const int*) attribu
|
||||
__typeof(x) __x = (x); \
|
||||
if (sizeof(__x) <= sizeof(double)) \
|
||||
/* "x": load into XMM SSE register */ \
|
||||
__asm ("" : "=x" (__x) : "0" (__x)); \
|
||||
__asm__ ("" : "=x" (__x) : "0" (__x)); \
|
||||
else /* long double */ \
|
||||
/* "t": load x into top-of-stack fpreg */ \
|
||||
__asm ("" : "=t" (__x) : "0" (__x)); \
|
||||
__asm__ ("" : "=t" (__x) : "0" (__x)); \
|
||||
__x; \
|
||||
})
|
||||
#define math_force_eval(x) do { \
|
||||
__typeof(x) __x = (x); \
|
||||
if (sizeof(__x) <= sizeof(double)) \
|
||||
/* "x": load into XMM SSE register */ \
|
||||
__asm __volatile ("" : : "x" (__x)); \
|
||||
__asm__ __volatile__ ("" : : "x" (__x)); \
|
||||
else /* long double */ \
|
||||
/* "f": load x into (any) fpreg */ \
|
||||
__asm __volatile ("" : : "f" (__x)); \
|
||||
__asm__ __volatile__ ("" : : "f" (__x)); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/* Default implementations force store to a memory location */
|
||||
#ifndef math_opt_barrier
|
||||
#define math_opt_barrier(x) ({ __typeof(x) __x = (x); __asm ("" : "+m" (__x)); __x; })
|
||||
#define math_opt_barrier(x) ({ __typeof(x) __x = (x); __asm__ ("" : "+m" (__x)); __x; })
|
||||
#endif
|
||||
#ifndef math_force_eval
|
||||
#define math_force_eval(x) do { __typeof(x) __x = (x); __asm __volatile ("" : : "m" (__x)); } while (0)
|
||||
#define math_force_eval(x) do { __typeof(x) __x = (x); __asm__ __volatile__ ("" : : "m" (__x)); } while (0)
|
||||
#endif
|
||||
|
||||
|
||||
|
@@ -275,7 +275,7 @@ __pthread_initialize_minimal_internal (void)
|
||||
following code ahead of the __libc_setup_tls call. This function
|
||||
will initialize the thread register which is subsequently
|
||||
used. */
|
||||
__asm __volatile ("");
|
||||
__asm__ __volatile__ ("");
|
||||
#endif
|
||||
|
||||
/* Minimal initialization of the thread descriptor. */
|
||||
|
@@ -25,9 +25,9 @@
|
||||
/* Macros to load from and store into segment registers. We can use
|
||||
the 32-bit instructions. */
|
||||
#define TLS_GET_GS() \
|
||||
({ int __seg; __asm ("movl %%gs, %0" : "=q" (__seg)); __seg; })
|
||||
({ int __seg; __asm__ ("movl %%gs, %0" : "=q" (__seg)); __seg; })
|
||||
#define TLS_SET_GS(val) \
|
||||
__asm ("movl %0, %%gs" :: "q" (val))
|
||||
__asm__ ("movl %0, %%gs" :: "q" (val))
|
||||
|
||||
|
||||
/* Get the full set of definitions. */
|
||||
|
@@ -41,8 +41,8 @@
|
||||
#define __exit_thread_inline(val) \
|
||||
while (1) { \
|
||||
if (__builtin_constant_p (val) && (val) == 0) \
|
||||
__asm__ volatile ("xorl %%ebx, %%ebx; int $0x80" :: "a" (__NR_exit)); \
|
||||
__asm__ __volatile__ ("xorl %%ebx, %%ebx; int $0x80" :: "a" (__NR_exit)); \
|
||||
else \
|
||||
__asm__ volatile ("movl %1, %%ebx; int $0x80" \
|
||||
__asm__ __volatile__ ("movl %1, %%ebx; int $0x80" \
|
||||
:: "a" (__NR_exit), "r" (val)); \
|
||||
}
|
||||
|
@@ -232,7 +232,7 @@ union user_desc_init
|
||||
_segdescr.vals[3] = 0x51; \
|
||||
\
|
||||
/* Install the TLS. */ \
|
||||
__asm__ volatile (TLS_LOAD_EBX \
|
||||
__asm__ __volatile__ (TLS_LOAD_EBX \
|
||||
"int $0x80\n\t" \
|
||||
TLS_LOAD_EBX \
|
||||
: "=a" (_result), "=m" (_segdescr.desc.entry_number) \
|
||||
@@ -262,7 +262,7 @@ union user_desc_init
|
||||
|
||||
/* Return the thread descriptor for the current thread.
|
||||
|
||||
The contained asm must *not* be marked volatile since otherwise
|
||||
The contained asm must *not* be marked __volatile__ since otherwise
|
||||
assignments like
|
||||
pthread_descr self = thread_self();
|
||||
do not get optimized away. */
|
||||
@@ -282,11 +282,11 @@ union user_desc_init
|
||||
# define THREAD_GETMEM(descr, member) \
|
||||
({ __typeof (descr->member) __value; \
|
||||
if (sizeof (__value) == 1) \
|
||||
__asm__ volatile ("movb %%gs:%P2,%b0" \
|
||||
__asm__ __volatile__ ("movb %%gs:%P2,%b0" \
|
||||
: "=q" (__value) \
|
||||
: "0" (0), "i" (offsetof (struct pthread, member))); \
|
||||
else if (sizeof (__value) == 4) \
|
||||
__asm__ volatile ("movl %%gs:%P1,%0" \
|
||||
__asm__ __volatile__ ("movl %%gs:%P1,%0" \
|
||||
: "=r" (__value) \
|
||||
: "i" (offsetof (struct pthread, member))); \
|
||||
else \
|
||||
@@ -296,7 +296,7 @@ union user_desc_init
|
||||
4 or 8. */ \
|
||||
abort (); \
|
||||
\
|
||||
__asm__ volatile ("movl %%gs:%P1,%%eax\n\t" \
|
||||
__asm__ __volatile__ ("movl %%gs:%P1,%%eax\n\t" \
|
||||
"movl %%gs:%P2,%%edx" \
|
||||
: "=A" (__value) \
|
||||
: "i" (offsetof (struct pthread, member)), \
|
||||
@@ -309,12 +309,12 @@ union user_desc_init
|
||||
# define THREAD_GETMEM_NC(descr, member, idx) \
|
||||
({ __typeof (descr->member[0]) __value; \
|
||||
if (sizeof (__value) == 1) \
|
||||
__asm__ volatile ("movb %%gs:%P2(%3),%b0" \
|
||||
__asm__ __volatile__ ("movb %%gs:%P2(%3),%b0" \
|
||||
: "=q" (__value) \
|
||||
: "0" (0), "i" (offsetof (struct pthread, member[0])), \
|
||||
"r" (idx)); \
|
||||
else if (sizeof (__value) == 4) \
|
||||
__asm__ volatile ("movl %%gs:%P1(,%2,4),%0" \
|
||||
__asm__ __volatile__ ("movl %%gs:%P1(,%2,4),%0" \
|
||||
: "=r" (__value) \
|
||||
: "i" (offsetof (struct pthread, member[0])), \
|
||||
"r" (idx)); \
|
||||
@@ -325,7 +325,7 @@ union user_desc_init
|
||||
4 or 8. */ \
|
||||
abort (); \
|
||||
\
|
||||
__asm__ volatile ("movl %%gs:%P1(,%2,8),%%eax\n\t" \
|
||||
__asm__ __volatile__ ("movl %%gs:%P1(,%2,8),%%eax\n\t" \
|
||||
"movl %%gs:4+%P1(,%2,8),%%edx" \
|
||||
: "=&A" (__value) \
|
||||
: "i" (offsetof (struct pthread, member[0])), \
|
||||
@@ -337,11 +337,11 @@ union user_desc_init
|
||||
/* Same as THREAD_SETMEM, but the member offset can be non-constant. */
|
||||
# define THREAD_SETMEM(descr, member, value) \
|
||||
({ if (sizeof (descr->member) == 1) \
|
||||
__asm__ volatile ("movb %b0,%%gs:%P1" : \
|
||||
__asm__ __volatile__ ("movb %b0,%%gs:%P1" : \
|
||||
: "iq" (value), \
|
||||
"i" (offsetof (struct pthread, member))); \
|
||||
else if (sizeof (descr->member) == 4) \
|
||||
__asm__ volatile ("movl %0,%%gs:%P1" : \
|
||||
__asm__ __volatile__ ("movl %0,%%gs:%P1" : \
|
||||
: "ir" (value), \
|
||||
"i" (offsetof (struct pthread, member))); \
|
||||
else \
|
||||
@@ -351,7 +351,7 @@ union user_desc_init
|
||||
4 or 8. */ \
|
||||
abort (); \
|
||||
\
|
||||
__asm__ volatile ("movl %%eax,%%gs:%P1\n\t" \
|
||||
__asm__ __volatile__ ("movl %%eax,%%gs:%P1\n\t" \
|
||||
"movl %%edx,%%gs:%P2" : \
|
||||
: "A" (value), \
|
||||
"i" (offsetof (struct pthread, member)), \
|
||||
@@ -362,12 +362,12 @@ union user_desc_init
|
||||
/* Set member of the thread descriptor directly. */
|
||||
# define THREAD_SETMEM_NC(descr, member, idx, value) \
|
||||
({ if (sizeof (descr->member[0]) == 1) \
|
||||
__asm__ volatile ("movb %b0,%%gs:%P1(%2)" : \
|
||||
__asm__ __volatile__ ("movb %b0,%%gs:%P1(%2)" : \
|
||||
: "iq" (value), \
|
||||
"i" (offsetof (struct pthread, member)), \
|
||||
"r" (idx)); \
|
||||
else if (sizeof (descr->member[0]) == 4) \
|
||||
__asm__ volatile ("movl %0,%%gs:%P1(,%2,4)" : \
|
||||
__asm__ __volatile__ ("movl %0,%%gs:%P1(,%2,4)" : \
|
||||
: "ir" (value), \
|
||||
"i" (offsetof (struct pthread, member)), \
|
||||
"r" (idx)); \
|
||||
@@ -378,7 +378,7 @@ union user_desc_init
|
||||
4 or 8. */ \
|
||||
abort (); \
|
||||
\
|
||||
__asm__ volatile ("movl %%eax,%%gs:%P1(,%2,8)\n\t" \
|
||||
__asm__ __volatile__ ("movl %%eax,%%gs:%P1(,%2,8)\n\t" \
|
||||
"movl %%edx,%%gs:4+%P1(,%2,8)" : \
|
||||
: "A" (value), \
|
||||
"i" (offsetof (struct pthread, member)), \
|
||||
@@ -391,7 +391,7 @@ union user_desc_init
|
||||
({ __typeof (descr->member) __ret; \
|
||||
__typeof (oldval) __old = (oldval); \
|
||||
if (sizeof (descr->member) == 4) \
|
||||
__asm__ volatile (LOCK_PREFIX "cmpxchgl %2, %%gs:%P3" \
|
||||
__asm__ __volatile__ (LOCK_PREFIX "cmpxchgl %2, %%gs:%P3" \
|
||||
: "=a" (__ret) \
|
||||
: "0" (__old), "r" (newval), \
|
||||
"i" (offsetof (struct pthread, member))); \
|
||||
@@ -404,7 +404,7 @@ union user_desc_init
|
||||
/* Atomic logical and. */
|
||||
#define THREAD_ATOMIC_AND(descr, member, val) \
|
||||
(void) ({ if (sizeof ((descr)->member) == 4) \
|
||||
__asm__ volatile (LOCK_PREFIX "andl %1, %%gs:%P0" \
|
||||
__asm__ __volatile__ (LOCK_PREFIX "andl %1, %%gs:%P0" \
|
||||
:: "i" (offsetof (struct pthread, member)), \
|
||||
"ir" (val)); \
|
||||
else \
|
||||
@@ -415,7 +415,7 @@ union user_desc_init
|
||||
/* Atomic set bit. */
|
||||
#define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
|
||||
(void) ({ if (sizeof ((descr)->member) == 4) \
|
||||
__asm__ volatile (LOCK_PREFIX "orl %1, %%gs:%P0" \
|
||||
__asm__ __volatile__ (LOCK_PREFIX "orl %1, %%gs:%P0" \
|
||||
:: "i" (offsetof (struct pthread, member)), \
|
||||
"ir" (1 << (bit))); \
|
||||
else \
|
||||
@@ -427,7 +427,7 @@ union user_desc_init
|
||||
#define CALL_THREAD_FCT(descr) \
|
||||
({ void *__res; \
|
||||
int __ignore1, __ignore2; \
|
||||
__asm__ volatile ("pushl %%eax\n\t" \
|
||||
__asm__ __volatile__ ("pushl %%eax\n\t" \
|
||||
"pushl %%eax\n\t" \
|
||||
"pushl %%eax\n\t" \
|
||||
"pushl %%gs:%P4\n\t" \
|
||||
@@ -462,7 +462,7 @@ union user_desc_init
|
||||
#define THREAD_GSCOPE_RESET_FLAG() \
|
||||
do \
|
||||
{ int __res; \
|
||||
__asm__ volatile ("xchgl %0, %%gs:%P1" \
|
||||
__asm__ __volatile__ ("xchgl %0, %%gs:%P1" \
|
||||
: "=r" (__res) \
|
||||
: "i" (offsetof (struct pthread, header.gscope_flag)), \
|
||||
"0" (THREAD_GSCOPE_FLAG_UNUSED)); \
|
||||
|
@@ -43,13 +43,13 @@
|
||||
#define SECTION(x) __asm__ (".section " x )
|
||||
|
||||
/* Embed an #include to pull in the alignment and .end directives. */
|
||||
asm ("\n#include \"defs.h\"");
|
||||
__asm__ ("\n#include \"defs.h\"");
|
||||
|
||||
/* The initial common code ends here. */
|
||||
asm ("\n/*@HEADER_ENDS*/");
|
||||
__asm__ ("\n/*@HEADER_ENDS*/");
|
||||
|
||||
/* To determine whether we need .end and .align: */
|
||||
asm ("\n/*@TESTS_BEGIN*/");
|
||||
__asm__ ("\n/*@TESTS_BEGIN*/");
|
||||
extern void dummy (void (*foo) (void));
|
||||
void
|
||||
dummy (void (*foo) (void))
|
||||
@@ -57,10 +57,10 @@ dummy (void (*foo) (void))
|
||||
if (foo)
|
||||
(*foo) ();
|
||||
}
|
||||
asm ("\n/*@TESTS_END*/");
|
||||
__asm__ ("\n/*@TESTS_END*/");
|
||||
|
||||
/* The beginning of _init: */
|
||||
asm ("\n/*@_init_PROLOG_BEGINS*/");
|
||||
__asm__ ("\n/*@_init_PROLOG_BEGINS*/");
|
||||
|
||||
static void
|
||||
call_initialize_minimal (void)
|
||||
@@ -79,18 +79,18 @@ _init (void)
|
||||
/* The very first thing we must do is to set up the registers. */
|
||||
call_initialize_minimal ();
|
||||
|
||||
asm ("ALIGN");
|
||||
asm("END_INIT");
|
||||
__asm__ ("ALIGN");
|
||||
__asm__("END_INIT");
|
||||
/* Now the epilog. */
|
||||
asm ("\n/*@_init_PROLOG_ENDS*/");
|
||||
asm ("\n/*@_init_EPILOG_BEGINS*/");
|
||||
__asm__ ("\n/*@_init_PROLOG_ENDS*/");
|
||||
__asm__ ("\n/*@_init_EPILOG_BEGINS*/");
|
||||
SECTION(".init");
|
||||
}
|
||||
asm ("END_INIT");
|
||||
__asm__ ("END_INIT");
|
||||
|
||||
/* End of the _init epilog, beginning of the _fini prolog. */
|
||||
asm ("\n/*@_init_EPILOG_ENDS*/");
|
||||
asm ("\n/*@_fini_PROLOG_BEGINS*/");
|
||||
__asm__ ("\n/*@_init_EPILOG_ENDS*/");
|
||||
__asm__ ("\n/*@_fini_PROLOG_BEGINS*/");
|
||||
|
||||
SECTION (".fini");
|
||||
extern void __attribute__ ((section (".fini"))) _fini (void);
|
||||
@@ -99,9 +99,9 @@ _fini (void)
|
||||
{
|
||||
|
||||
/* End of the _fini prolog. */
|
||||
asm ("ALIGN");
|
||||
asm ("END_FINI");
|
||||
asm ("\n/*@_fini_PROLOG_ENDS*/");
|
||||
__asm__ ("ALIGN");
|
||||
__asm__ ("END_FINI");
|
||||
__asm__ ("\n/*@_fini_PROLOG_ENDS*/");
|
||||
|
||||
{
|
||||
/* Let GCC know that _fini is not a leaf function by having a dummy
|
||||
@@ -112,14 +112,14 @@ _fini (void)
|
||||
}
|
||||
|
||||
/* Beginning of the _fini epilog. */
|
||||
asm ("\n/*@_fini_EPILOG_BEGINS*/");
|
||||
__asm__ ("\n/*@_fini_EPILOG_BEGINS*/");
|
||||
SECTION (".fini");
|
||||
}
|
||||
asm ("END_FINI");
|
||||
__asm__ ("END_FINI");
|
||||
|
||||
/* End of the _fini epilog. Any further generated assembly (e.g. .ident)
|
||||
is shared between both crt files. */
|
||||
asm ("\n/*@_fini_EPILOG_ENDS*/");
|
||||
asm ("\n/*@TRAILER_BEGINS*/");
|
||||
__asm__ ("\n/*@_fini_EPILOG_ENDS*/");
|
||||
__asm__ ("\n/*@TRAILER_BEGINS*/");
|
||||
|
||||
/* End of file. */
|
||||
|
@@ -50,7 +50,7 @@ pthread_cancel_init (void)
|
||||
if (__builtin_expect (libgcc_s_handle != NULL, 1))
|
||||
{
|
||||
/* Force gcc to reload all values. */
|
||||
__asm__ volatile ("" ::: "memory");
|
||||
__asm__ __volatile__ ("" ::: "memory");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@@ -24,7 +24,7 @@ pthread_spin_lock (pthread_spinlock_t *lock)
|
||||
unsigned int val;
|
||||
|
||||
do
|
||||
__asm__ volatile ("tas.b @%1; movt %0"
|
||||
__asm__ __volatile__ ("tas.b @%1; movt %0"
|
||||
: "=&r" (val)
|
||||
: "r" (lock)
|
||||
: "memory");
|
||||
|
@@ -41,9 +41,9 @@
|
||||
#define __exit_thread_inline(val) \
|
||||
while (1) { \
|
||||
if (__builtin_constant_p (val) && (val) == 0) \
|
||||
__asm__ volatile ("mov #0,r4; mov %0,r3; trapa #0x11\n\t" SYSCALL_INST_PAD \
|
||||
__asm__ __volatile__ ("mov #0,r4; mov %0,r3; trapa #0x11\n\t" SYSCALL_INST_PAD \
|
||||
:: "i" (__NR_exit)); \
|
||||
else \
|
||||
__asm__ volatile ("mov %1,r4; mov %0,r3; trapa #0x11\n\t" SYSCALL_INST_PAD \
|
||||
__asm__ __volatile__ ("mov %1,r4; mov %0,r3; trapa #0x11\n\t" SYSCALL_INST_PAD \
|
||||
:: "i" (__NR_exit), "r" (val)); \
|
||||
}
|
||||
|
@@ -94,7 +94,7 @@ typedef struct
|
||||
/* Install new dtv for current thread. */
|
||||
# define INSTALL_NEW_DTV(dtv) \
|
||||
({ tcbhead_t *__tcbp; \
|
||||
__asm __volatile ("stc gbr,%0" : "=r" (__tcbp)); \
|
||||
__asm__ __volatile__ ("stc gbr,%0" : "=r" (__tcbp)); \
|
||||
__tcbp->dtv = (dtv);})
|
||||
|
||||
/* Return dtv of given thread descriptor. */
|
||||
@@ -105,12 +105,12 @@ typedef struct
|
||||
special attention since 'errno' is not yet available and if the
|
||||
operation can cause a failure 'errno' must not be touched. */
|
||||
# define TLS_INIT_TP(tcbp, secondcall) \
|
||||
({ __asm __volatile ("ldc %0,gbr" : : "r" (tcbp)); 0; })
|
||||
({ __asm__ __volatile__ ("ldc %0,gbr" : : "r" (tcbp)); 0; })
|
||||
|
||||
/* Return the address of the dtv for the current thread. */
|
||||
# define THREAD_DTV() \
|
||||
({ tcbhead_t *__tcbp; \
|
||||
__asm __volatile ("stc gbr,%0" : "=r" (__tcbp)); \
|
||||
__asm__ __volatile__ ("stc gbr,%0" : "=r" (__tcbp)); \
|
||||
__tcbp->dtv;})
|
||||
|
||||
/* Return the thread descriptor for the current thread.
|
||||
@@ -120,7 +120,7 @@ typedef struct
|
||||
do not get optimized away. */
|
||||
# define THREAD_SELF \
|
||||
({ struct pthread *__self; \
|
||||
__asm ("stc gbr,%0" : "=r" (__self)); \
|
||||
__asm__ ("stc gbr,%0" : "=r" (__self)); \
|
||||
__self - 1;})
|
||||
|
||||
/* Magic for libthread_db to know how to do THREAD_SELF. */
|
||||
@@ -143,15 +143,15 @@ typedef struct
|
||||
|
||||
#define THREAD_GET_POINTER_GUARD() \
|
||||
({ tcbhead_t *__tcbp; \
|
||||
__asm __volatile ("stc gbr,%0" : "=r" (__tcbp)); \
|
||||
__asm__ __volatile__ ("stc gbr,%0" : "=r" (__tcbp)); \
|
||||
__tcbp->pointer_guard;})
|
||||
#define THREAD_SET_POINTER_GUARD(value) \
|
||||
({ tcbhead_t *__tcbp; \
|
||||
__asm __volatile ("stc gbr,%0" : "=r" (__tcbp)); \
|
||||
__asm__ __volatile__ ("stc gbr,%0" : "=r" (__tcbp)); \
|
||||
__tcbp->pointer_guard = (value);})
|
||||
#define THREAD_COPY_POINTER_GUARD(descr) \
|
||||
({ tcbhead_t *__tcbp; \
|
||||
__asm __volatile ("stc gbr,%0" : "=r" (__tcbp)); \
|
||||
__asm__ __volatile__ ("stc gbr,%0" : "=r" (__tcbp)); \
|
||||
((tcbhead_t *) (descr + 1))->pointer_guard = __tcbp->pointer_guard;})
|
||||
|
||||
/* Get and set the global scope generation counter in struct pthread. */
|
||||
|
@@ -22,7 +22,7 @@
|
||||
int
|
||||
pthread_spin_lock (pthread_spinlock_t *lock)
|
||||
{
|
||||
__asm __volatile
|
||||
__asm__ __volatile
|
||||
("1: ldstub [%0], %%g2\n"
|
||||
" orcc %%g2, 0x0, %%g0\n"
|
||||
" bne,a 2f\n"
|
||||
|
@@ -24,6 +24,6 @@ int
|
||||
pthread_spin_trylock (pthread_spinlock_t *lock)
|
||||
{
|
||||
int res;
|
||||
__asm __volatile ("ldstub [%1], %0" : "=r" (res) : "r" (lock) : "memory");
|
||||
__asm__ __volatile__ ("ldstub [%1], %0" : "=r" (res) : "r" (lock) : "memory");
|
||||
return res == 0 ? 0 : EBUSY;
|
||||
}
|
||||
|
@@ -22,7 +22,7 @@
|
||||
int
|
||||
pthread_spin_lock (pthread_spinlock_t *lock)
|
||||
{
|
||||
__asm __volatile
|
||||
__asm__ __volatile
|
||||
("1: ldstub [%0], %%g2\n"
|
||||
" brnz,pn %%g2, 2f\n"
|
||||
" membar #StoreLoad | #StoreStore\n"
|
||||
|
@@ -22,7 +22,7 @@
|
||||
int
|
||||
pthread_spin_lock (pthread_spinlock_t *lock)
|
||||
{
|
||||
__asm __volatile
|
||||
__asm__ __volatile
|
||||
("1: ldstub [%0], %%g5\n"
|
||||
" brnz,pn %%g5, 2f\n"
|
||||
" membar #StoreLoad | #StoreStore\n"
|
||||
|
@@ -24,7 +24,7 @@ int
|
||||
pthread_spin_trylock (pthread_spinlock_t *lock)
|
||||
{
|
||||
int res;
|
||||
__asm __volatile
|
||||
__asm__ __volatile
|
||||
("ldstub [%1], %0\n"
|
||||
"membar #StoreLoad | #StoreStore"
|
||||
: "=r" (res)
|
||||
|
@@ -24,7 +24,7 @@
|
||||
int
|
||||
pthread_spin_unlock (pthread_spinlock_t *lock)
|
||||
{
|
||||
__asm __volatile ("membar #StoreStore | #LoadStore");
|
||||
__asm__ __volatile__ ("membar #StoreStore | #LoadStore");
|
||||
*lock = 0;
|
||||
return 0;
|
||||
}
|
||||
|
@@ -49,7 +49,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
|
||||
Do this atomically.
|
||||
*/
|
||||
newval = __fork_generation | 1;
|
||||
__asm __volatile (
|
||||
__asm__ __volatile__ (
|
||||
"1: ldl_l %0, %2\n"
|
||||
" and %0, 2, %1\n"
|
||||
" bne %1, 2f\n"
|
||||
|
@@ -72,11 +72,11 @@ void __arm_link_error (void);
|
||||
/* Thumb-2 has ldrex/strex. However it does not have barrier instructions,
|
||||
so we still need to use the kernel helper. */
|
||||
#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
|
||||
({ register __typeof (oldval) a_oldval asm ("r0"); \
|
||||
register __typeof (oldval) a_newval asm ("r1") = (newval); \
|
||||
register __typeof (mem) a_ptr asm ("r2") = (mem); \
|
||||
register __typeof (oldval) a_tmp asm ("r3"); \
|
||||
register __typeof (oldval) a_oldval2 asm ("r4") = (oldval); \
|
||||
({ register __typeof (oldval) a_oldval __asm__ ("r0"); \
|
||||
register __typeof (oldval) a_newval __asm__ ("r1") = (newval); \
|
||||
register __typeof (mem) a_ptr __asm__ ("r2") = (mem); \
|
||||
register __typeof (oldval) a_tmp __asm__ ("r3"); \
|
||||
register __typeof (oldval) a_oldval2 __asm__ ("r4") = (oldval); \
|
||||
__asm__ __volatile__ \
|
||||
("0:\tldr\t%[tmp],[%[ptr]]\n\t" \
|
||||
"cmp\t%[tmp], %[old2]\n\t" \
|
||||
@@ -95,11 +95,11 @@ void __arm_link_error (void);
|
||||
a_tmp; })
|
||||
#else
|
||||
#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
|
||||
({ register __typeof (oldval) a_oldval asm ("r0"); \
|
||||
register __typeof (oldval) a_newval asm ("r1") = (newval); \
|
||||
register __typeof (mem) a_ptr asm ("r2") = (mem); \
|
||||
register __typeof (oldval) a_tmp asm ("r3"); \
|
||||
register __typeof (oldval) a_oldval2 asm ("r4") = (oldval); \
|
||||
({ register __typeof (oldval) a_oldval __asm__ ("r0"); \
|
||||
register __typeof (oldval) a_newval __asm__ ("r1") = (newval); \
|
||||
register __typeof (mem) a_ptr __asm__ ("r2") = (mem); \
|
||||
register __typeof (oldval) a_tmp __asm__ ("r3"); \
|
||||
register __typeof (oldval) a_oldval2 __asm__ ("r4") = (oldval); \
|
||||
__asm__ __volatile__ \
|
||||
("0:\tldr\t%[tmp],[%[ptr]]\n\t" \
|
||||
"cmp\t%[tmp], %[old2]\n\t" \
|
||||
|
@@ -40,7 +40,7 @@ pthread_cancel_init (void)
|
||||
if (__builtin_expect (libgcc_s_handle != NULL, 1))
|
||||
{
|
||||
/* Force gcc to reload all values. */
|
||||
asm volatile ("" ::: "memory");
|
||||
__asm__ __volatile__ ("" ::: "memory");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -85,7 +85,7 @@ __unwind_freeres (void)
|
||||
ARM unwinder relies on register state at entrance. So we write this in
|
||||
assembly. */
|
||||
|
||||
asm (
|
||||
__asm__ (
|
||||
" .globl _Unwind_Resume\n"
|
||||
" .type _Unwind_Resume, %function\n"
|
||||
"_Unwind_Resume:\n"
|
||||
|
@@ -48,7 +48,7 @@ init (void)
|
||||
ARM unwinder relies on register state at entrance. So we write this in
|
||||
assembly. */
|
||||
|
||||
asm (
|
||||
__asm__ (
|
||||
" .globl _Unwind_Resume\n"
|
||||
" .type _Unwind_Resume, %function\n"
|
||||
"_Unwind_Resume:\n"
|
||||
|
@@ -210,7 +210,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
({ \
|
||||
int __status; \
|
||||
register __typeof (val) _val __asm__ ("edx") = (val); \
|
||||
__asm__ __volatile (LLL_EBX_LOAD \
|
||||
__asm__ __volatile__ (LLL_EBX_LOAD \
|
||||
LLL_ENTER_KERNEL \
|
||||
LLL_EBX_LOAD \
|
||||
: "=a" (__status) \
|
||||
@@ -226,7 +226,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
do { \
|
||||
int __ignore; \
|
||||
register __typeof (nr) _nr __asm__ ("edx") = (nr); \
|
||||
__asm__ __volatile (LLL_EBX_LOAD \
|
||||
__asm__ __volatile__ (LLL_EBX_LOAD \
|
||||
LLL_ENTER_KERNEL \
|
||||
LLL_EBX_LOAD \
|
||||
: "=a" (__ignore) \
|
||||
@@ -254,7 +254,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
|
||||
#define lll_trylock(futex) \
|
||||
({ int ret; \
|
||||
__asm__ __volatile (__lll_trylock_asm \
|
||||
__asm__ __volatile__ (__lll_trylock_asm \
|
||||
: "=a" (ret), "=m" (futex) \
|
||||
: "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \
|
||||
"0" (LLL_LOCK_INITIALIZER), \
|
||||
@@ -264,7 +264,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
|
||||
#define lll_robust_trylock(futex, id) \
|
||||
({ int ret; \
|
||||
__asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "cmpxchgl %2, %1" \
|
||||
: "=a" (ret), "=m" (futex) \
|
||||
: "r" (id), "m" (futex), \
|
||||
"0" (LLL_LOCK_INITIALIZER) \
|
||||
@@ -274,7 +274,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
|
||||
#define lll_cond_trylock(futex) \
|
||||
({ int ret; \
|
||||
__asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "cmpxchgl %2, %1" \
|
||||
: "=a" (ret), "=m" (futex) \
|
||||
: "r" (LLL_LOCK_INITIALIZER_WAITERS), \
|
||||
"m" (futex), "0" (LLL_LOCK_INITIALIZER) \
|
||||
@@ -294,7 +294,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
(void) \
|
||||
({ int ignore1, ignore2; \
|
||||
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
|
||||
__asm__ __volatile (__lll_lock_asm_start \
|
||||
__asm__ __volatile__ (__lll_lock_asm_start \
|
||||
"jnz _L_lock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_lock_%=,@function\n" \
|
||||
@@ -313,7 +313,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
else \
|
||||
{ \
|
||||
int ignore3; \
|
||||
__asm__ __volatile (__lll_lock_asm_start \
|
||||
__asm__ __volatile__ (__lll_lock_asm_start \
|
||||
"jnz _L_lock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_lock_%=,@function\n" \
|
||||
@@ -337,7 +337,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
|
||||
#define lll_robust_lock(futex, id, private) \
|
||||
({ int __result, ignore1, ignore2; \
|
||||
__asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
|
||||
"jnz _L_robust_lock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_lock_%=,@function\n" \
|
||||
@@ -362,7 +362,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
#define lll_cond_lock(futex, private) \
|
||||
(void) \
|
||||
({ int ignore1, ignore2, ignore3; \
|
||||
__asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
|
||||
"jnz _L_cond_lock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_cond_lock_%=,@function\n" \
|
||||
@@ -384,7 +384,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
|
||||
#define lll_robust_cond_lock(futex, id, private) \
|
||||
({ int __result, ignore1, ignore2; \
|
||||
__asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
|
||||
"jnz _L_robust_cond_lock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_cond_lock_%=,@function\n" \
|
||||
@@ -407,7 +407,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
|
||||
#define lll_timedlock(futex, timeout, private) \
|
||||
({ int __result, ignore1, ignore2, ignore3; \
|
||||
__asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
|
||||
"jnz _L_timedlock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_timedlock_%=,@function\n" \
|
||||
@@ -430,7 +430,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
|
||||
#define lll_robust_timedlock(futex, timeout, id, private) \
|
||||
({ int __result, ignore1, ignore2, ignore3; \
|
||||
__asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
|
||||
"jnz _L_robust_timedlock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_timedlock_%=,@function\n" \
|
||||
@@ -463,7 +463,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
(void) \
|
||||
({ int ignore; \
|
||||
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
|
||||
__asm__ __volatile (__lll_unlock_asm \
|
||||
__asm__ __volatile__ (__lll_unlock_asm \
|
||||
"jne _L_unlock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_unlock_%=,@function\n" \
|
||||
@@ -481,7 +481,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
else \
|
||||
{ \
|
||||
int ignore2; \
|
||||
__asm__ __volatile (__lll_unlock_asm \
|
||||
__asm__ __volatile__ (__lll_unlock_asm \
|
||||
"jne _L_unlock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_unlock_%=,@function\n" \
|
||||
@@ -504,7 +504,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
#define lll_robust_unlock(futex, private) \
|
||||
(void) \
|
||||
({ int ignore, ignore2; \
|
||||
__asm__ __volatile (LOCK_INSTR "andl %3, %0\n\t" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "andl %3, %0\n\t" \
|
||||
"jne _L_robust_unlock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_unlock_%=,@function\n" \
|
||||
@@ -528,7 +528,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
(void) \
|
||||
({ int __ignore; \
|
||||
register int _nr __asm__ ("edx") = 1; \
|
||||
__asm__ __volatile (LOCK_INSTR "orl %5, (%2)\n\t" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "orl %5, (%2)\n\t" \
|
||||
LLL_EBX_LOAD \
|
||||
LLL_ENTER_KERNEL \
|
||||
LLL_EBX_LOAD \
|
||||
@@ -553,7 +553,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
int __ignore; \
|
||||
register __typeof (tid) _tid __asm__ ("edx") = (tid); \
|
||||
if (_tid != 0) \
|
||||
__asm__ __volatile (LLL_EBX_LOAD \
|
||||
__asm__ __volatile__ (LLL_EBX_LOAD \
|
||||
"1:\tmovl %1, %%eax\n\t" \
|
||||
LLL_ENTER_KERNEL \
|
||||
"cmpl $0, (%%ebx)\n\t" \
|
||||
|
@@ -164,7 +164,7 @@
|
||||
/* Set *futex to ID if it is 0, atomically. Returns the old value */
|
||||
#define __lll_robust_trylock(futex, id) \
|
||||
({ int __val; \
|
||||
__asm __volatile ("1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
|
||||
__asm__ __volatile__ ("1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
|
||||
" cmpwi 0,%0,0\n" \
|
||||
" bne 2f\n" \
|
||||
" stwcx. %3,0,%2\n" \
|
||||
|
@@ -52,7 +52,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
|
||||
Do this atomically.
|
||||
*/
|
||||
newval = __fork_generation | 1;
|
||||
__asm __volatile ("1: lwarx %0,0,%3\n"
|
||||
__asm__ __volatile__ ("1: lwarx %0,0,%3\n"
|
||||
" andi. %1,%0,2\n"
|
||||
" bne 2f\n"
|
||||
" stwcx. %4,0,%3\n"
|
||||
|
@@ -23,7 +23,7 @@
|
||||
int
|
||||
pthread_spin_unlock (pthread_spinlock_t *lock)
|
||||
{
|
||||
__asm __volatile (__lll_rel_instr ::: "memory");
|
||||
__asm__ __volatile__ (__lll_rel_instr ::: "memory");
|
||||
*lock = 0;
|
||||
return 0;
|
||||
}
|
||||
|
@@ -29,9 +29,9 @@ __new_sem_post (sem_t *sem)
|
||||
{
|
||||
struct new_sem *isem = (struct new_sem *) sem;
|
||||
|
||||
__asm __volatile (__lll_rel_instr ::: "memory");
|
||||
__asm__ __volatile__ (__lll_rel_instr ::: "memory");
|
||||
atomic_increment (&isem->value);
|
||||
__asm __volatile (__lll_acq_instr ::: "memory");
|
||||
__asm__ __volatile__ (__lll_acq_instr ::: "memory");
|
||||
if (isem->nwaiters > 0)
|
||||
{
|
||||
int err = lll_futex_wake (&isem->value, 1,
|
||||
|
@@ -99,7 +99,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
|
||||
|
||||
#define lll_trylock(futex) \
|
||||
({ unsigned char __result; \
|
||||
__asm __volatile ("\
|
||||
__asm__ __volatile__ ("\
|
||||
.align 2\n\
|
||||
mova 1f,r0\n\
|
||||
nop\n\
|
||||
@@ -121,7 +121,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
|
||||
|
||||
#define lll_robust_trylock(futex, id) \
|
||||
({ unsigned char __result; \
|
||||
__asm __volatile ("\
|
||||
__asm__ __volatile__ ("\
|
||||
.align 2\n\
|
||||
mova 1f,r0\n\
|
||||
nop\n\
|
||||
@@ -143,7 +143,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
|
||||
|
||||
#define lll_cond_trylock(futex) \
|
||||
({ unsigned char __result; \
|
||||
__asm __volatile ("\
|
||||
__asm__ __volatile__ ("\
|
||||
.align 2\n\
|
||||
mova 1f,r0\n\
|
||||
nop\n\
|
||||
@@ -165,7 +165,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
|
||||
|
||||
#define lll_lock(futex, private) \
|
||||
(void) ({ int __result, *__futex = &(futex); \
|
||||
__asm __volatile ("\
|
||||
__asm__ __volatile__ ("\
|
||||
.align 2\n\
|
||||
mova 1f,r0\n\
|
||||
nop\n\
|
||||
@@ -190,7 +190,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
|
||||
|
||||
#define lll_robust_lock(futex, id, private) \
|
||||
({ int __result, *__futex = &(futex); \
|
||||
__asm __volatile ("\
|
||||
__asm__ __volatile__ ("\
|
||||
.align 2\n\
|
||||
mova 1f,r0\n\
|
||||
nop\n\
|
||||
@@ -211,7 +211,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
|
||||
always wakeup waiters. */
|
||||
#define lll_cond_lock(futex, private) \
|
||||
(void) ({ int __result, *__futex = &(futex); \
|
||||
__asm __volatile ("\
|
||||
__asm__ __volatile__ ("\
|
||||
.align 2\n\
|
||||
mova 1f,r0\n\
|
||||
nop\n\
|
||||
@@ -229,7 +229,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
|
||||
|
||||
#define lll_robust_cond_lock(futex, id, private) \
|
||||
({ int __result, *__futex = &(futex); \
|
||||
__asm __volatile ("\
|
||||
__asm__ __volatile__ ("\
|
||||
.align 2\n\
|
||||
mova 1f,r0\n\
|
||||
nop\n\
|
||||
@@ -248,7 +248,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
|
||||
|
||||
#define lll_timedlock(futex, timeout, private) \
|
||||
({ int __result, *__futex = &(futex); \
|
||||
__asm __volatile ("\
|
||||
__asm__ __volatile__ ("\
|
||||
.align 2\n\
|
||||
mova 1f,r0\n\
|
||||
nop\n\
|
||||
@@ -267,7 +267,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
|
||||
|
||||
#define lll_robust_timedlock(futex, timeout, id, private) \
|
||||
({ int __result, *__futex = &(futex); \
|
||||
__asm __volatile ("\
|
||||
__asm__ __volatile__ ("\
|
||||
.align 2\n\
|
||||
mova 1f,r0\n\
|
||||
nop\n\
|
||||
@@ -287,7 +287,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
|
||||
|
||||
#define lll_unlock(futex, private) \
|
||||
(void) ({ int __result, *__futex = &(futex); \
|
||||
__asm __volatile ("\
|
||||
__asm__ __volatile__ ("\
|
||||
.align 2\n\
|
||||
mova 1f,r0\n\
|
||||
mov r15,r1\n\
|
||||
@@ -310,7 +310,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
|
||||
|
||||
#define lll_robust_unlock(futex, private) \
|
||||
(void) ({ int __result, *__futex = &(futex); \
|
||||
__asm __volatile ("\
|
||||
__asm__ __volatile__ ("\
|
||||
.align 2\n\
|
||||
mova 1f,r0\n\
|
||||
mov r15,r1\n\
|
||||
@@ -326,7 +326,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
|
||||
|
||||
#define lll_robust_dead(futex, private) \
|
||||
(void) ({ int __ignore, *__futex = &(futex); \
|
||||
__asm __volatile ("\
|
||||
__asm__ __volatile__ ("\
|
||||
.align 2\n\
|
||||
mova 1f,r0\n\
|
||||
mov r15,r1\n\
|
||||
@@ -354,13 +354,13 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
|
||||
#define lll_futex_timed_wait(futex, val, timeout, private) \
|
||||
({ \
|
||||
int __status; \
|
||||
register unsigned long __r3 __asm ("r3") = SYS_futex; \
|
||||
register unsigned long __r4 __asm ("r4") = (unsigned long) (futex); \
|
||||
register unsigned long __r5 __asm ("r5") \
|
||||
register unsigned long __r3 __asm__ ("r3") = SYS_futex; \
|
||||
register unsigned long __r4 __asm__ ("r4") = (unsigned long) (futex); \
|
||||
register unsigned long __r5 __asm__ ("r5") \
|
||||
= __lll_private_flag (FUTEX_WAIT, private); \
|
||||
register unsigned long __r6 __asm ("r6") = (unsigned long) (val); \
|
||||
register unsigned long __r7 __asm ("r7") = (timeout); \
|
||||
__asm __volatile (SYSCALL_WITH_INST_PAD \
|
||||
register unsigned long __r6 __asm__ ("r6") = (unsigned long) (val); \
|
||||
register unsigned long __r7 __asm__ ("r7") = (timeout); \
|
||||
__asm__ __volatile__ (SYSCALL_WITH_INST_PAD \
|
||||
: "=z" (__status) \
|
||||
: "r" (__r3), "r" (__r4), "r" (__r5), \
|
||||
"r" (__r6), "r" (__r7) \
|
||||
@@ -372,13 +372,13 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
|
||||
#define lll_futex_wake(futex, nr, private) \
|
||||
do { \
|
||||
int __ignore; \
|
||||
register unsigned long __r3 __asm ("r3") = SYS_futex; \
|
||||
register unsigned long __r4 __asm ("r4") = (unsigned long) (futex); \
|
||||
register unsigned long __r5 __asm ("r5") \
|
||||
register unsigned long __r3 __asm__ ("r3") = SYS_futex; \
|
||||
register unsigned long __r4 __asm__ ("r4") = (unsigned long) (futex); \
|
||||
register unsigned long __r5 __asm__ ("r5") \
|
||||
= __lll_private_flag (FUTEX_WAKE, private); \
|
||||
register unsigned long __r6 __asm ("r6") = (unsigned long) (nr); \
|
||||
register unsigned long __r7 __asm ("r7") = 0; \
|
||||
__asm __volatile (SYSCALL_WITH_INST_PAD \
|
||||
register unsigned long __r6 __asm__ ("r6") = (unsigned long) (nr); \
|
||||
register unsigned long __r7 __asm__ ("r7") = 0; \
|
||||
__asm__ __volatile__ (SYSCALL_WITH_INST_PAD \
|
||||
: "=z" (__ignore) \
|
||||
: "r" (__r3), "r" (__r4), "r" (__r5), \
|
||||
"r" (__r6), "r" (__r7) \
|
||||
|
@@ -212,7 +212,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
register const struct timespec *__to __asm__ ("r10") = timeout; \
|
||||
int __status; \
|
||||
register __typeof (val) _val __asm__ ("edx") = (val); \
|
||||
__asm__ __volatile ("syscall" \
|
||||
__asm__ __volatile__ ("syscall" \
|
||||
: "=a" (__status) \
|
||||
: "0" (SYS_futex), "D" (futex), \
|
||||
"S" (__lll_private_flag (FUTEX_WAIT, private)), \
|
||||
@@ -226,7 +226,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
do { \
|
||||
int __ignore; \
|
||||
register __typeof (nr) _nr __asm__ ("edx") = (nr); \
|
||||
__asm__ __volatile ("syscall" \
|
||||
__asm__ __volatile__ ("syscall" \
|
||||
: "=a" (__ignore) \
|
||||
: "0" (SYS_futex), "D" (futex), \
|
||||
"S" (__lll_private_flag (FUTEX_WAKE, private)), \
|
||||
@@ -253,7 +253,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
|
||||
#define lll_trylock(futex) \
|
||||
({ int ret; \
|
||||
__asm__ __volatile (__lll_trylock_asm \
|
||||
__asm__ __volatile__ (__lll_trylock_asm \
|
||||
: "=a" (ret), "=m" (futex) \
|
||||
: "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \
|
||||
"0" (LLL_LOCK_INITIALIZER) \
|
||||
@@ -262,7 +262,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
|
||||
#define lll_robust_trylock(futex, id) \
|
||||
({ int ret; \
|
||||
__asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "cmpxchgl %2, %1" \
|
||||
: "=a" (ret), "=m" (futex) \
|
||||
: "r" (id), "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
|
||||
: "memory"); \
|
||||
@@ -270,7 +270,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
|
||||
#define lll_cond_trylock(futex) \
|
||||
({ int ret; \
|
||||
__asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "cmpxchgl %2, %1" \
|
||||
: "=a" (ret), "=m" (futex) \
|
||||
: "r" (LLL_LOCK_INITIALIZER_WAITERS), \
|
||||
"m" (futex), "0" (LLL_LOCK_INITIALIZER) \
|
||||
@@ -294,7 +294,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
(void) \
|
||||
({ int ignore1, ignore2, ignore3; \
|
||||
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
|
||||
__asm__ __volatile (__lll_lock_asm_start \
|
||||
__asm__ __volatile__ (__lll_lock_asm_start \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_lock_%=, @function\n" \
|
||||
"_L_lock_%=:\n" \
|
||||
@@ -312,7 +312,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
: "0" (1), "m" (futex), "3" (0) \
|
||||
: "cx", "r11", "cc", "memory"); \
|
||||
else \
|
||||
__asm__ __volatile (__lll_lock_asm_start \
|
||||
__asm__ __volatile__ (__lll_lock_asm_start \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_lock_%=, @function\n" \
|
||||
"_L_lock_%=:\n" \
|
||||
@@ -333,7 +333,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
|
||||
#define lll_robust_lock(futex, id, private) \
|
||||
({ int result, ignore1, ignore2; \
|
||||
__asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
|
||||
"jnz 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_lock_%=, @function\n" \
|
||||
@@ -356,7 +356,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
#define lll_cond_lock(futex, private) \
|
||||
(void) \
|
||||
({ int ignore1, ignore2, ignore3; \
|
||||
__asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
|
||||
"jnz 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_cond_lock_%=, @function\n" \
|
||||
@@ -378,7 +378,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
|
||||
#define lll_robust_cond_lock(futex, id, private) \
|
||||
({ int result, ignore1, ignore2; \
|
||||
__asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
|
||||
"jnz 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_cond_lock_%=, @function\n" \
|
||||
@@ -401,7 +401,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
|
||||
#define lll_timedlock(futex, timeout, private) \
|
||||
({ int result, ignore1, ignore2, ignore3; \
|
||||
__asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
|
||||
"jnz 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_timedlock_%=, @function\n" \
|
||||
@@ -425,7 +425,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
|
||||
#define lll_robust_timedlock(futex, timeout, id, private) \
|
||||
({ int result, ignore1, ignore2, ignore3; \
|
||||
__asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
|
||||
"jnz 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_timedlock_%=, @function\n" \
|
||||
@@ -464,7 +464,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
(void) \
|
||||
({ int ignore; \
|
||||
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
|
||||
__asm__ __volatile (__lll_unlock_asm_start \
|
||||
__asm__ __volatile__ (__lll_unlock_asm_start \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_unlock_%=, @function\n" \
|
||||
"_L_unlock_%=:\n" \
|
||||
@@ -481,7 +481,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
: "m" (futex) \
|
||||
: "ax", "cx", "r11", "cc", "memory"); \
|
||||
else \
|
||||
__asm__ __volatile (__lll_unlock_asm_start \
|
||||
__asm__ __volatile__ (__lll_unlock_asm_start \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_unlock_%=, @function\n" \
|
||||
"_L_unlock_%=:\n" \
|
||||
@@ -503,7 +503,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
do \
|
||||
{ \
|
||||
int ignore; \
|
||||
__asm__ __volatile (LOCK_INSTR "andl %2, %0\n\t" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "andl %2, %0\n\t" \
|
||||
"jne 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_unlock_%=, @function\n" \
|
||||
@@ -528,7 +528,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
do \
|
||||
{ \
|
||||
int ignore; \
|
||||
__asm__ __volatile (LOCK_INSTR "orl %3, (%2)\n\t" \
|
||||
__asm__ __volatile__ (LOCK_INSTR "orl %3, (%2)\n\t" \
|
||||
"syscall" \
|
||||
: "=m" (futex), "=a" (ignore) \
|
||||
: "D" (&(futex)), "i" (FUTEX_OWNER_DIED), \
|
||||
@@ -544,7 +544,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
register int __nr_move __asm__ ("r10") = nr_move; \
|
||||
register void *__mutex __asm__ ("r8") = mutex; \
|
||||
register int __val __asm__ ("r9") = val; \
|
||||
__asm__ __volatile ("syscall" \
|
||||
__asm__ __volatile__ ("syscall" \
|
||||
: "=a" (__res) \
|
||||
: "0" (__NR_futex), "D" ((void *) ftx), \
|
||||
"S" (__lll_private_flag (FUTEX_CMP_REQUEUE, \
|
||||
@@ -568,7 +568,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
int __ignore; \
|
||||
register __typeof (tid) _tid __asm__ ("edx") = (tid); \
|
||||
if (_tid != 0) \
|
||||
__asm__ __volatile ("xorq %%r10, %%r10\n\t" \
|
||||
__asm__ __volatile__ ("xorq %%r10, %%r10\n\t" \
|
||||
"1:\tmovq %2, %%rax\n\t" \
|
||||
"syscall\n\t" \
|
||||
"cmpl $0, (%%rdi)\n\t" \
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
#define RESET_VGETCPU_CACHE() \
|
||||
do { \
|
||||
asm volatile ("movl %0, %%fs:%P1\n\t" \
|
||||
__asm__ __volatile__ ("movl %0, %%fs:%P1\n\t" \
|
||||
"movl %0, %%fs:%P2" \
|
||||
: \
|
||||
: "ir" (0), "i" (offsetof (struct pthread, \
|
||||
|
@@ -40,4 +40,4 @@
|
||||
|
||||
/* While there is no such syscall. */
|
||||
#define __exit_thread_inline(val) \
|
||||
__asm__ volatile ("syscall" :: "a" (__NR_exit), "D" (val))
|
||||
__asm__ __volatile__ ("syscall" :: "a" (__NR_exit), "D" (val))
|
||||
|
@@ -170,7 +170,7 @@ typedef struct
|
||||
_head->self = _thrdescr; \
|
||||
\
|
||||
/* It is a simple syscall to set the %fs value for the thread. */ \
|
||||
__asm__ volatile ("syscall" \
|
||||
__asm__ __volatile__ ("syscall" \
|
||||
: "=a" (_result) \
|
||||
: "0" ((unsigned long int) __NR_arch_prctl), \
|
||||
"D" ((unsigned long int) ARCH_SET_FS), \
|
||||
@@ -189,7 +189,7 @@ typedef struct
|
||||
|
||||
/* Return the thread descriptor for the current thread.
|
||||
|
||||
The contained asm must *not* be marked volatile since otherwise
|
||||
The contained asm must *not* be marked __volatile__ since otherwise
|
||||
assignments like
|
||||
pthread_descr self = thread_self();
|
||||
do not get optimized away. */
|
||||
@@ -207,11 +207,11 @@ typedef struct
|
||||
# define THREAD_GETMEM(descr, member) \
|
||||
({ __typeof (descr->member) __value; \
|
||||
if (sizeof (__value) == 1) \
|
||||
__asm__ volatile ("movb %%fs:%P2,%b0" \
|
||||
__asm__ __volatile__ ("movb %%fs:%P2,%b0" \
|
||||
: "=q" (__value) \
|
||||
: "0" (0), "i" (offsetof (struct pthread, member))); \
|
||||
else if (sizeof (__value) == 4) \
|
||||
__asm__ volatile ("movl %%fs:%P1,%0" \
|
||||
__asm__ __volatile__ ("movl %%fs:%P1,%0" \
|
||||
: "=r" (__value) \
|
||||
: "i" (offsetof (struct pthread, member))); \
|
||||
else \
|
||||
@@ -221,7 +221,7 @@ typedef struct
|
||||
4 or 8. */ \
|
||||
abort (); \
|
||||
\
|
||||
__asm__ volatile ("movq %%fs:%P1,%q0" \
|
||||
__asm__ __volatile__ ("movq %%fs:%P1,%q0" \
|
||||
: "=r" (__value) \
|
||||
: "i" (offsetof (struct pthread, member))); \
|
||||
} \
|
||||
@@ -232,12 +232,12 @@ typedef struct
|
||||
# define THREAD_GETMEM_NC(descr, member, idx) \
|
||||
({ __typeof (descr->member[0]) __value; \
|
||||
if (sizeof (__value) == 1) \
|
||||
__asm__ volatile ("movb %%fs:%P2(%q3),%b0" \
|
||||
__asm__ __volatile__ ("movb %%fs:%P2(%q3),%b0" \
|
||||
: "=q" (__value) \
|
||||
: "0" (0), "i" (offsetof (struct pthread, member[0])), \
|
||||
"r" (idx)); \
|
||||
else if (sizeof (__value) == 4) \
|
||||
__asm__ volatile ("movl %%fs:%P1(,%q2,4),%0" \
|
||||
__asm__ __volatile__ ("movl %%fs:%P1(,%q2,4),%0" \
|
||||
: "=r" (__value) \
|
||||
: "i" (offsetof (struct pthread, member[0])), "r" (idx));\
|
||||
else \
|
||||
@@ -247,7 +247,7 @@ typedef struct
|
||||
4 or 8. */ \
|
||||
abort (); \
|
||||
\
|
||||
__asm__ volatile ("movq %%fs:%P1(,%q2,8),%q0" \
|
||||
__asm__ __volatile__ ("movq %%fs:%P1(,%q2,8),%q0" \
|
||||
: "=r" (__value) \
|
||||
: "i" (offsetof (struct pthread, member[0])), \
|
||||
"r" (idx)); \
|
||||
@@ -267,11 +267,11 @@ typedef struct
|
||||
/* Same as THREAD_SETMEM, but the member offset can be non-constant. */
|
||||
# define THREAD_SETMEM(descr, member, value) \
|
||||
({ if (sizeof (descr->member) == 1) \
|
||||
__asm__ volatile ("movb %b0,%%fs:%P1" : \
|
||||
__asm__ __volatile__ ("movb %b0,%%fs:%P1" : \
|
||||
: "iq" (value), \
|
||||
"i" (offsetof (struct pthread, member))); \
|
||||
else if (sizeof (descr->member) == 4) \
|
||||
__asm__ volatile ("movl %0,%%fs:%P1" : \
|
||||
__asm__ __volatile__ ("movl %0,%%fs:%P1" : \
|
||||
: IMM_MODE (value), \
|
||||
"i" (offsetof (struct pthread, member))); \
|
||||
else \
|
||||
@@ -281,7 +281,7 @@ typedef struct
|
||||
4 or 8. */ \
|
||||
abort (); \
|
||||
\
|
||||
__asm__ volatile ("movq %q0,%%fs:%P1" : \
|
||||
__asm__ __volatile__ ("movq %q0,%%fs:%P1" : \
|
||||
: IMM_MODE ((unsigned long int) value), \
|
||||
"i" (offsetof (struct pthread, member))); \
|
||||
}})
|
||||
@@ -290,12 +290,12 @@ typedef struct
|
||||
/* Set member of the thread descriptor directly. */
|
||||
# define THREAD_SETMEM_NC(descr, member, idx, value) \
|
||||
({ if (sizeof (descr->member[0]) == 1) \
|
||||
__asm__ volatile ("movb %b0,%%fs:%P1(%q2)" : \
|
||||
__asm__ __volatile__ ("movb %b0,%%fs:%P1(%q2)" : \
|
||||
: "iq" (value), \
|
||||
"i" (offsetof (struct pthread, member[0])), \
|
||||
"r" (idx)); \
|
||||
else if (sizeof (descr->member[0]) == 4) \
|
||||
__asm__ volatile ("movl %0,%%fs:%P1(,%q2,4)" : \
|
||||
__asm__ __volatile__ ("movl %0,%%fs:%P1(,%q2,4)" : \
|
||||
: IMM_MODE (value), \
|
||||
"i" (offsetof (struct pthread, member[0])), \
|
||||
"r" (idx)); \
|
||||
@@ -306,7 +306,7 @@ typedef struct
|
||||
4 or 8. */ \
|
||||
abort (); \
|
||||
\
|
||||
__asm__ volatile ("movq %q0,%%fs:%P1(,%q2,8)" : \
|
||||
__asm__ __volatile__ ("movq %q0,%%fs:%P1(,%q2,8)" : \
|
||||
: IMM_MODE ((unsigned long int) value), \
|
||||
"i" (offsetof (struct pthread, member[0])), \
|
||||
"r" (idx)); \
|
||||
@@ -318,7 +318,7 @@ typedef struct
|
||||
({ __typeof (descr->member) __ret; \
|
||||
__typeof (oldval) __old = (oldval); \
|
||||
if (sizeof (descr->member) == 4) \
|
||||
__asm__ volatile (LOCK_PREFIX "cmpxchgl %2, %%fs:%P3" \
|
||||
__asm__ __volatile__ (LOCK_PREFIX "cmpxchgl %2, %%fs:%P3" \
|
||||
: "=a" (__ret) \
|
||||
: "0" (__old), "r" (newval), \
|
||||
"i" (offsetof (struct pthread, member))); \
|
||||
@@ -331,7 +331,7 @@ typedef struct
|
||||
/* Atomic logical and. */
|
||||
# define THREAD_ATOMIC_AND(descr, member, val) \
|
||||
(void) ({ if (sizeof ((descr)->member) == 4) \
|
||||
__asm__ volatile (LOCK_PREFIX "andl %1, %%fs:%P0" \
|
||||
__asm__ __volatile__ (LOCK_PREFIX "andl %1, %%fs:%P0" \
|
||||
:: "i" (offsetof (struct pthread, member)), \
|
||||
"ir" (val)); \
|
||||
else \
|
||||
@@ -342,7 +342,7 @@ typedef struct
|
||||
/* Atomic set bit. */
|
||||
# define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
|
||||
(void) ({ if (sizeof ((descr)->member) == 4) \
|
||||
__asm__ volatile (LOCK_PREFIX "orl %1, %%fs:%P0" \
|
||||
__asm__ __volatile__ (LOCK_PREFIX "orl %1, %%fs:%P0" \
|
||||
:: "i" (offsetof (struct pthread, member)), \
|
||||
"ir" (1 << (bit))); \
|
||||
else \
|
||||
@@ -352,7 +352,7 @@ typedef struct
|
||||
|
||||
# define CALL_THREAD_FCT(descr) \
|
||||
({ void *__res; \
|
||||
__asm__ volatile ("movq %%fs:%P2, %%rdi\n\t" \
|
||||
__asm__ __volatile__ ("movq %%fs:%P2, %%rdi\n\t" \
|
||||
"callq *%%fs:%P1" \
|
||||
: "=a" (__res) \
|
||||
: "i" (offsetof (struct pthread, start_routine)), \
|
||||
@@ -385,7 +385,7 @@ typedef struct
|
||||
# define THREAD_GSCOPE_RESET_FLAG() \
|
||||
do \
|
||||
{ int __res; \
|
||||
__asm__ volatile ("xchgl %0, %%fs:%P1" \
|
||||
__asm__ __volatile__ ("xchgl %0, %%fs:%P1" \
|
||||
: "=r" (__res) \
|
||||
: "i" (offsetof (struct pthread, header.gscope_flag)), \
|
||||
"0" (THREAD_GSCOPE_FLAG_UNUSED)); \
|
||||
|
@@ -43,12 +43,12 @@ handler (int sig)
|
||||
static void __attribute__ ((noinline))
|
||||
clobber_lots_of_regs (void)
|
||||
{
|
||||
#define X1(n) long r##n = 10##n; __asm __volatile ("" : "+r" (r##n));
|
||||
#define X1(n) long r##n = 10##n; __asm__ __volatile__ ("" : "+r" (r##n));
|
||||
#define X2(n) X1(n##0) X1(n##1) X1(n##2) X1(n##3) X1(n##4)
|
||||
#define X3(n) X2(n##0) X2(n##1) X2(n##2) X2(n##3) X2(n##4)
|
||||
X3(0) X3(1) X3(2) X3(3) X3(4)
|
||||
#undef X1
|
||||
#define X1(n) __asm __volatile ("" : : "r" (r##n));
|
||||
#define X1(n) __asm__ __volatile__ ("" : : "r" (r##n));
|
||||
X3(0) X3(1) X3(2) X3(3) X3(4)
|
||||
#undef X1
|
||||
#undef X2
|
||||
|
@@ -13,7 +13,7 @@ CONCAT (tlsmod17a, N) (void)
|
||||
{
|
||||
int *p = &CONCAT (v, N);
|
||||
/* GCC assumes &var is never NULL, add optimization barrier. */
|
||||
__asm __volatile ("" : "+r" (p));
|
||||
__asm__ __volatile__ ("" : "+r" (p));
|
||||
if (p == NULL || *p != 4)
|
||||
{
|
||||
printf ("fail %d %p\n", N, p);
|
||||
|
@@ -11,7 +11,7 @@ test (void)
|
||||
{
|
||||
int *p = &var;
|
||||
/* GCC assumes &var is never NULL, add optimization barrier. */
|
||||
__asm __volatile ("" : "+r" (p));
|
||||
__asm__ __volatile__ ("" : "+r" (p));
|
||||
if (p == NULL || *p != 4)
|
||||
{
|
||||
printf ("fail %d %p\n", N, p);
|
||||
|
Reference in New Issue
Block a user